patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -28,3 +28,6 @@ var ErrIncomplete = errors.New("incomplete config")
// ErrNotReady is the error when a broker is not ready.
var ErrNotReady = errors.New("not ready")
+
+// ErrOverflow is the error when a broker ingress is sending too many requests to PubSub.
+var ErrOverflow = errors.New("bundler reached buffered byte limit") | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ingress
import "errors"
// ErrNotFound is the error when a broker doesn't exist in the configmap.
// This can happen if the clients specifies invalid broker in the path, or the configmap volume hasn't been updated.
var ErrNotFound = errors.New("not found")
// ErrIncomplete is the error when a broker entry exists in the configmap but its decouple queue is nil or empty.
// This should never happen unless there is a bug in the controller.
var ErrIncomplete = errors.New("incomplete config")
// ErrNotReady is the error when a broker is not ready.
var ErrNotReady = errors.New("not ready")
| 1 | 18,054 | Can you import `bundler.ErrOverflow` rather than redefine it here? | google-knative-gcp | go |
@@ -305,7 +305,7 @@ public abstract class FlatteningConfig {
}
ResourceNameTreatment resourceNameTreatment = ResourceNameTreatment.NONE;
- String resourceNameType = protoParser.getResourceType(parameterField.getProtoField());
+ String resourceNameType = protoParser.getResourceReference(parameterField.getProtoField());
if (!Strings.isNullOrEmpty(resourceNameType)) {
resourceNameTreatment = ResourceNameTreatment.STATIC_TYPES;
} | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.MethodSignature;
import com.google.api.codegen.FlatteningGroupProto;
import com.google.api.codegen.MethodConfigProto;
import com.google.api.codegen.ResourceNameTreatment;
import com.google.api.codegen.configgen.transformer.DiscoveryMethodTransformer;
import com.google.api.codegen.util.ProtoParser;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.Oneof;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.auto.value.AutoValue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
/** FlatteningConfig represents a specific flattening configuration for a method. */
@AutoValue
public abstract class FlatteningConfig {
// Maps the name of the parameter in this flattening to its FieldConfig.
public abstract ImmutableMap<String, FieldConfig> getFlattenedFieldConfigs();
@Nullable
public abstract String getFlatteningName();
/**
* Returns a map of a string representing a list of the fields in a flattening, to the flattening
* config created from a method in the gapic config.
*/
private static Map<String, FlatteningConfig> createFlatteningsFromGapicConfig(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
MethodModel methodModel) {
Map<String, FlatteningConfig> flatteningConfigs = new LinkedHashMap<>();
for (FlatteningGroupProto flatteningGroup : methodConfigProto.getFlattening().getGroupsList()) {
FlatteningConfig groupConfig =
FlatteningConfig.createFlatteningFromConfigProto(
diagCollector,
messageConfigs,
resourceNameConfigs,
methodConfigProto,
flatteningGroup,
methodModel);
if (groupConfig != null) {
flatteningConfigs.put(flatteningConfigToString(groupConfig), groupConfig);
}
}
if (diagCollector.hasErrors()) {
return null;
}
return flatteningConfigs;
}
static ImmutableList<FlatteningConfig> createFlatteningConfigs(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
DiscoveryMethodModel methodModel) {
Map<String, FlatteningConfig> flatteningConfigMap =
createFlatteningsFromGapicConfig(
diagCollector, messageConfigs, resourceNameConfigs, methodConfigProto, methodModel);
if (flatteningConfigMap == null) {
return null;
}
return ImmutableList.copyOf(flatteningConfigMap.values());
}
@VisibleForTesting
@Nullable
static ImmutableList<FlatteningConfig> createFlatteningConfigs(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
ProtoMethodModel methodModel,
ProtoParser protoParser) {
Map<String, FlatteningConfig> flatteningConfigsFromGapicConfig =
createFlatteningsFromGapicConfig(
diagCollector, messageConfigs, resourceNameConfigs, methodConfigProto, methodModel);
if (flatteningConfigsFromGapicConfig == null) {
return null;
}
// Get flattenings from protofile annotations
Map<String, FlatteningConfig> flatteningConfigsFromProtoFile =
createFlatteningConfigsFromProtoFile(
diagCollector, messageConfigs, resourceNameConfigs, methodModel, protoParser);
if (flatteningConfigsFromProtoFile == null) {
return null;
}
// Enforce unique flattening configs, in case proto annotations overlaps with configProto
// flattening.
Map<String, FlatteningConfig> flatteningConfigs = new LinkedHashMap<>();
flatteningConfigs.putAll(flatteningConfigsFromGapicConfig);
// Let flattenings from proto annotations override flattenings from GAPIC config.
flatteningConfigs.putAll(flatteningConfigsFromProtoFile);
return ImmutableList.copyOf(flatteningConfigs.values());
}
/**
* Returns a map of a string representing a list of the fields in a flattening, to the flattening
* config created from a method from the proto file.
*/
@Nullable
private static Map<String, FlatteningConfig> createFlatteningConfigsFromProtoFile(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
ProtoMethodModel methodModel,
ProtoParser protoParser) {
Map<String, FlatteningConfig> flatteningConfigs = new LinkedHashMap<>();
// Get flattenings from protofile annotations, let these override flattenings from GAPIC config.
List<MethodSignature> methodSignatures =
protoParser.getMethodSignatures(methodModel.getProtoMethod());
for (MethodSignature signature : methodSignatures) {
if (signature.getFieldsCount() == 0) {
break;
}
FlatteningConfig groupConfig =
FlatteningConfig.createFlatteningFromProtoFile(
diagCollector,
messageConfigs,
resourceNameConfigs,
signature,
methodModel,
protoParser);
if (groupConfig != null) {
flatteningConfigs.put(flatteningConfigToString(groupConfig), groupConfig);
}
}
if (diagCollector.hasErrors()) {
return null;
}
return flatteningConfigs;
}
/**
* Creates an instance of FlatteningConfig based on a FlatteningGroupProto, linking it up with the
* provided method.
*/
@Nullable
private static FlatteningConfig createFlatteningFromConfigProto(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
FlatteningGroupProto flatteningGroup,
MethodModel method) {
boolean missing = false;
ImmutableMap.Builder<String, FieldConfig> flattenedFieldConfigBuilder = ImmutableMap.builder();
Set<String> oneofNames = new HashSet<>();
List<String> flattenedParams = Lists.newArrayList(flatteningGroup.getParametersList());
if (method.hasExtraFieldMask()) {
flattenedParams.add(DiscoveryMethodTransformer.FIELDMASK_STRING);
}
for (String parameter : flattenedParams) {
FieldModel parameterField = method.getInputField(parameter);
if (parameterField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Field missing for flattening: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getInputFullName(),
parameter));
return null;
}
Oneof oneof = parameterField.getOneof();
if (oneof != null) {
String oneofName = oneof.getName();
if (oneofNames.contains(oneofName)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Value from oneof already specifed for flattening:%n"
+ "method = %s, message type = %s, oneof = %s",
method.getFullName(),
method.getInputFullName(),
oneofName));
return null;
}
oneofNames.add(oneofName);
}
ResourceNameTreatment defaultResourceNameTreatment =
methodConfigProto.getResourceNameTreatment();
if (!parameterField.mayBeInResourceName()) {
defaultResourceNameTreatment = ResourceNameTreatment.NONE;
}
if (defaultResourceNameTreatment == null
|| defaultResourceNameTreatment.equals(ResourceNameTreatment.UNSET_TREATMENT)) {
defaultResourceNameTreatment = ResourceNameTreatment.VALIDATE;
}
FieldConfig fieldConfig =
FieldConfig.createFieldConfig(
diagCollector,
messageConfigs,
methodConfigProto.getFieldNamePatternsMap(),
resourceNameConfigs,
parameterField,
flatteningGroup.getParameterResourceNameTreatmentMap().get(parameter),
defaultResourceNameTreatment);
if (fieldConfig == null) {
missing = true;
} else {
flattenedFieldConfigBuilder.put(parameter, fieldConfig);
}
}
if (missing) {
return null;
}
return new AutoValue_FlatteningConfig(
flattenedFieldConfigBuilder.build(), flatteningGroup.getFlatteningGroupName());
}
/**
* Creates an instance of FlatteningConfig based on a FlatteningGroupProto, linking it up with the
* provided method.
*/
@Nullable
private static FlatteningConfig createFlatteningFromProtoFile(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodSignature methodSignature,
ProtoMethodModel method,
ProtoParser protoParser) {
// TODO(andrealin): combine this method with createFlatteningFromConfigProto.
ImmutableMap.Builder<String, FieldConfig> flattenedFieldConfigBuilder = ImmutableMap.builder();
Set<String> oneofNames = new HashSet<>();
List<String> flattenedParams = Lists.newArrayList(methodSignature.getFieldsList());
for (String parameter : flattenedParams) {
ProtoField parameterField = method.getInputField(parameter);
if (parameterField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Field missing for flattening: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getInputFullName(),
parameter));
return null;
}
Oneof oneof = parameterField.getOneof();
if (oneof != null) {
String oneofName = oneof.getName();
if (oneofNames.contains(oneofName)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Value from oneof already specifed for flattening:%n"
+ "method = %s, message type = %s, oneof = %s",
method.getFullName(),
method.getInputFullName(),
oneofName));
return null;
}
oneofNames.add(oneofName);
}
ResourceNameTreatment resourceNameTreatment = ResourceNameTreatment.NONE;
String resourceNameType = protoParser.getResourceType(parameterField.getProtoField());
if (!Strings.isNullOrEmpty(resourceNameType)) {
resourceNameTreatment = ResourceNameTreatment.STATIC_TYPES;
}
FieldConfig fieldConfig =
FieldConfig.createMessageFieldConfig(
messageConfigs, resourceNameConfigs, parameterField, resourceNameTreatment);
flattenedFieldConfigBuilder.put(parameter, fieldConfig);
}
return new AutoValue_FlatteningConfig(flattenedFieldConfigBuilder.build(), null);
}
public Iterable<FieldModel> getFlattenedFields() {
return FieldConfig.toFieldTypeIterable(getFlattenedFieldConfigs().values());
}
public FlatteningConfig withResourceNamesInSamplesOnly() {
ImmutableMap<String, FieldConfig> newFlattenedFieldConfigs =
getFlattenedFieldConfigs()
.entrySet()
.stream()
.collect(
ImmutableMap.toImmutableMap(
Map.Entry::getKey, e -> e.getValue().withResourceNameInSampleOnly()));
return new AutoValue_FlatteningConfig(newFlattenedFieldConfigs, getFlatteningName());
}
public static boolean hasAnyRepeatedResourceNameParameter(FlatteningConfig flatteningGroup) {
// Used in Java to prevent generating a flattened method with List<ResourceName> as a parameter
// because that has the same type erasure as the version of the flattened method with
// List<String> as a parameter.
// TODO(gapic-generator issue #2137) Only use raw String type for repeated params
// not for singular params in the same flattened method.
return flatteningGroup
.getFlattenedFieldConfigs()
.values()
.stream()
.anyMatch(
(FieldConfig fieldConfig) ->
fieldConfig.getField().isRepeated() && fieldConfig.useResourceNameType());
}
/** Returns a string representing the ordered fields in a flattening config. */
private static String flatteningConfigToString(FlatteningConfig flatteningConfig) {
Iterable<FieldModel> paramList = flatteningConfig.getFlattenedFields();
StringBuilder paramsAsString = new StringBuilder();
paramList.forEach(p -> paramsAsString.append(p.getSimpleName()).append(", "));
return paramsAsString.toString();
}
}
| 1 | 27,535 | This is just a renaming of the function getResourceType() to getResourceReference() | googleapis-gapic-generator | java |
@@ -106,7 +106,7 @@ module Beaker
ip = ''
if File.file?(@vagrant_file) #we should have a vagrant file available to us for reading
f = File.read(@vagrant_file)
- m = /#{hostname}.*?ip:\s*('|")\s*([^'"]+)('|")/m.match(f)
+ m = /'#{hostname}'.*?ip:\s*('|")\s*([^'"]+)('|")/m.match(f)
if m
ip = m[2]
@logger.debug("Determined existing vagrant box #{hostname} ip to be: #{ip} ") | 1 | require 'open3'
module Beaker
class Vagrant < Beaker::Hypervisor
# Return a random mac address
#
# @return [String] a random mac address
def randmac
"080027" + (1..3).map{"%0.2X"%rand(256)}.join
end
def rand_chunk
(2 + rand(252)).to_s #don't want a 0, 1, or a 255
end
def randip
"10.255.#{rand_chunk}.#{rand_chunk}"
end
def make_vfile hosts, options = {}
#HACK HACK HACK - add checks here to ensure that we have box + box_url
#generate the VagrantFile
v_file = "Vagrant.configure(\"2\") do |c|\n"
v_file << " c.ssh.forward_agent = true\n" if options[:forward_ssh_agent] == true
v_file << " c.ssh.insert_key = false\n"
hosts.each do |host|
host['ip'] ||= randip #use the existing ip, otherwise default to a random ip
v_file << " c.vm.define '#{host.name}' do |v|\n"
v_file << " v.vm.hostname = '#{host.name}'\n"
v_file << " v.vm.box = '#{host['box']}'\n"
v_file << " v.vm.box_url = '#{host['box_url']}'\n" unless host['box_url'].nil?
v_file << " v.vm.box_version = '#{host['box_version']}'\n" unless host['box_version'].nil?
v_file << " v.vm.box_check_update = '#{host['box_check_update'] ||= 'true'}'\n"
v_file << " v.vm.synced_folder '.', '/vagrant', disabled: true\n" if host['synced_folder'] == 'disabled'
v_file << " v.vm.network :private_network, ip: \"#{host['ip'].to_s}\", :netmask => \"#{host['netmask'] ||= "255.255.0.0"}\", :mac => \"#{randmac}\"\n"
if /windows/i.match(host['platform'])
v_file << " v.vm.network :forwarded_port, guest: 3389, host: 3389, id: 'rdp', auto_correct: true\n"
v_file << " v.vm.network :forwarded_port, guest: 5985, host: 5985, id: 'winrm', auto_correct: true\n"
v_file << " v.vm.guest = :windows\n"
end
if /osx/i.match(host['platform'])
v_file << " v.vm.network 'private_network', ip: '10.0.1.10'\n"
v_file << " v.vm.synced_folder '.', '/vagrant', :nfs => true\n"
end
if /freebsd/i.match(host['platform'])
v_file << " v.ssh.shell = 'sh'\n"
v_file << " v.vm.guest = :freebsd\n"
# FreeBSD NFS has a character restriction of 88 characters
# So you can enable it but if your module has a long name it probably won't work...
# So to keep things simple let's rsync by default!
#
# Further reading if interested:
# http://www.secnetix.de/olli/FreeBSD/mnamelen.hawk
# https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=167105
#
if host['vagrant_freebsd_nfs'].nil?
v_file << " v.vm.synced_folder '.', '/vagrant', type: 'rsync'\n"
else
v_file << " v.vm.synced_folder '.', '/vagrant', :nfs => true\n"
end
end
v_file << self.class.provider_vfile_section(host, options)
v_file << " end\n"
@logger.debug "created Vagrantfile for VagrantHost #{host.name}"
end
v_file << "end\n"
File.open(@vagrant_file, 'w') do |f|
f.write(v_file)
end
end
def self.provider_vfile_section host, options
# Backwards compatibility; default to virtualbox
Beaker::VagrantVirtualbox.provider_vfile_section(host, options)
end
def set_ssh_config host, user
f = Tempfile.new("#{host.name}")
ssh_config = Dir.chdir(@vagrant_path) do
stdin, stdout, stderr, wait_thr = Open3.popen3(@vagrant_env, 'vagrant', 'ssh-config', host.name)
if not wait_thr.value.success?
raise "Failed to 'vagrant ssh-config' for #{host.name}"
end
stdout.read
end
#replace hostname with ip
ssh_config = ssh_config.gsub(/Host #{host.name}/, "Host #{host['ip']}") unless not host['ip']
#set the user
ssh_config = ssh_config.gsub(/User vagrant/, "User #{user}")
f.write(ssh_config)
f.rewind
host['ssh'] = {:config => f.path()}
host['user'] = user
@temp_files << f
end
def get_ip_from_vagrant_file(hostname)
ip = ''
if File.file?(@vagrant_file) #we should have a vagrant file available to us for reading
f = File.read(@vagrant_file)
m = /#{hostname}.*?ip:\s*('|")\s*([^'"]+)('|")/m.match(f)
if m
ip = m[2]
@logger.debug("Determined existing vagrant box #{hostname} ip to be: #{ip} ")
else
raise("Unable to determine ip for vagrant box #{hostname}")
end
else
raise("No vagrant file found (should be located at #{@vagrant_file})")
end
ip
end
def initialize(vagrant_hosts, options)
require 'tempfile'
@options = options
@logger = options[:logger]
@temp_files = []
@hosts = vagrant_hosts
@vagrant_path = File.expand_path(File.join(File.basename(__FILE__), '..', '.vagrant', 'beaker_vagrant_files', File.basename(options[:hosts_file])))
FileUtils.mkdir_p(@vagrant_path)
@vagrant_file = File.expand_path(File.join(@vagrant_path, "Vagrantfile"))
@vagrant_env = { "RUBYLIB" => "" }
end
def provision(provider = nil)
if !@options[:provision] and !File.file?(@vagrant_file)
raise "Beaker is configured with provision = false but no vagrant file was found at #{@vagrant_file}. You need to enable provision"
end
if @options[:provision]
#setting up new vagrant hosts
#make sure that any old boxes are dead dead dead
vagrant_cmd("destroy --force") if File.file?(@vagrant_file)
make_vfile @hosts, @options
vagrant_cmd("up#{" --provider #{provider}" if provider}")
else #set host ip of already up boxes
@hosts.each do |host|
host[:ip] = get_ip_from_vagrant_file(host.name)
end
end
@logger.debug "configure vagrant boxes (set ssh-config, switch to root user, hack etc/hosts)"
@hosts.each do |host|
default_user = host['user']
set_ssh_config host, 'vagrant'
#copy vagrant's keys to roots home dir, to allow for login as root
copy_ssh_to_root host, @options
#ensure that root login is enabled for this host
enable_root_login host, @options
#shut down connection, will reconnect on next exec
host.close
set_ssh_config host, default_user
end
hack_etc_hosts @hosts, @options
end
def cleanup
@logger.debug "removing temporory ssh-config files per-vagrant box"
@temp_files.each do |f|
f.close()
end
@logger.notify "Destroying vagrant boxes"
vagrant_cmd("destroy --force")
FileUtils.rm_rf(@vagrant_path)
end
def vagrant_cmd(args)
Dir.chdir(@vagrant_path) do
exit_status = 1
Open3.popen3(@vagrant_env, "vagrant #{args}") {|stdin, stdout, stderr, wait_thr|
while line = stdout.gets
@logger.info(line)
end
if not wait_thr.value.success?
raise "Failed to exec 'vagrant #{args}'. Error was #{stderr.read}"
end
exit_status = wait_thr.value
}
if exit_status != 0
raise "Failed to execute vagrant_cmd ( #{args} ). Error was #{stderr.read}"
end
end
end
end
end
| 1 | 11,738 | Seems like this might need to allow for `"`s to be used here? | voxpupuli-beaker | rb |
@@ -295,8 +295,8 @@ CombatSpell::~CombatSpell()
bool CombatSpell::loadScriptCombat()
{
- combat = g_luaEnvironment.getCombatObject(g_luaEnvironment.lastCombatId);
- return combat != nullptr;
+ Combat_ptr combatPtr = g_luaEnvironment.getCombatObject(g_luaEnvironment.lastCombatId);
+ return combatPtr != nullptr;
}
bool CombatSpell::castSpell(Creature* creature) | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "combat.h"
#include "configmanager.h"
#include "game.h"
#include "monster.h"
#include "pugicast.h"
#include "spells.h"
extern Game g_game;
extern Spells* g_spells;
extern Monsters g_monsters;
extern Vocations g_vocations;
extern ConfigManager g_config;
extern LuaEnvironment g_luaEnvironment;
Spells::Spells()
{
scriptInterface.initState();
}
Spells::~Spells()
{
clear(false);
}
TalkActionResult_t Spells::playerSaySpell(Player* player, std::string& words)
{
std::string str_words = words;
//strip trailing spaces
trimString(str_words);
InstantSpell* instantSpell = getInstantSpell(str_words);
if (!instantSpell) {
return TALKACTION_CONTINUE;
}
std::string param;
if (instantSpell->getHasParam()) {
size_t spellLen = instantSpell->getWords().length();
size_t paramLen = str_words.length() - spellLen;
std::string paramText = str_words.substr(spellLen, paramLen);
if (!paramText.empty() && paramText.front() == ' ') {
size_t loc1 = paramText.find('"', 1);
if (loc1 != std::string::npos) {
size_t loc2 = paramText.find('"', loc1 + 1);
if (loc2 == std::string::npos) {
loc2 = paramText.length();
} else if (paramText.find_last_not_of(' ') != loc2) {
return TALKACTION_CONTINUE;
}
param = paramText.substr(loc1 + 1, loc2 - loc1 - 1);
} else {
trimString(paramText);
loc1 = paramText.find(' ', 0);
if (loc1 == std::string::npos) {
param = paramText;
} else {
return TALKACTION_CONTINUE;
}
}
}
}
if (instantSpell->playerCastInstant(player, param)) {
words = instantSpell->getWords();
if (instantSpell->getHasParam() && !param.empty()) {
words += " \"" + param + "\"";
}
return TALKACTION_BREAK;
}
return TALKACTION_FAILED;
}
void Spells::clearMaps(bool fromLua)
{
for (auto instant = instants.begin(); instant != instants.end(); ) {
if (fromLua == instant->second.fromLua) {
instant = instants.erase(instant);
} else {
++instant;
}
}
for (auto rune = runes.begin(); rune != runes.end(); ) {
if (fromLua == rune->second.fromLua) {
rune = runes.erase(rune);
} else {
++rune;
}
}
}
void Spells::clear(bool fromLua)
{
clearMaps(fromLua);
reInitState(fromLua);
}
LuaScriptInterface& Spells::getScriptInterface()
{
return scriptInterface;
}
std::string Spells::getScriptBaseName() const
{
return "spells";
}
Event_ptr Spells::getEvent(const std::string& nodeName)
{
if (strcasecmp(nodeName.c_str(), "rune") == 0) {
return Event_ptr(new RuneSpell(&scriptInterface));
} else if (strcasecmp(nodeName.c_str(), "instant") == 0) {
return Event_ptr(new InstantSpell(&scriptInterface));
}
return nullptr;
}
bool Spells::registerEvent(Event_ptr event, const pugi::xml_node&)
{
InstantSpell* instant = dynamic_cast<InstantSpell*>(event.get());
if (instant) {
auto result = instants.emplace(instant->getWords(), std::move(*instant));
if (!result.second) {
std::cout << "[Warning - Spells::registerEvent] Duplicate registered instant spell with words: " << instant->getWords() << std::endl;
}
return result.second;
}
RuneSpell* rune = dynamic_cast<RuneSpell*>(event.get());
if (rune) {
auto result = runes.emplace(rune->getRuneItemId(), std::move(*rune));
if (!result.second) {
std::cout << "[Warning - Spells::registerEvent] Duplicate registered rune with id: " << rune->getRuneItemId() << std::endl;
}
return result.second;
}
return false;
}
bool Spells::registerInstantLuaEvent(InstantSpell* event)
{
InstantSpell_ptr instant { event };
if (instant) {
std::string words = instant->getWords();
auto result = instants.emplace(instant->getWords(), std::move(*instant));
if (!result.second) {
std::cout << "[Warning - Spells::registerInstantLuaEvent] Duplicate registered instant spell with words: " << words << std::endl;
}
return result.second;
}
return false;
}
bool Spells::registerRuneLuaEvent(RuneSpell* event)
{
RuneSpell_ptr rune { event };
if (rune) {
uint16_t id = rune->getRuneItemId();
auto result = runes.emplace(rune->getRuneItemId(), std::move(*rune));
if (!result.second) {
std::cout << "[Warning - Spells::registerRuneLuaEvent] Duplicate registered rune with id: " << id << std::endl;
}
return result.second;
}
return false;
}
Spell* Spells::getSpellByName(const std::string& name)
{
Spell* spell = getRuneSpellByName(name);
if (!spell) {
spell = getInstantSpellByName(name);
}
return spell;
}
RuneSpell* Spells::getRuneSpell(uint32_t id)
{
auto it = runes.find(id);
if (it == runes.end()) {
for (auto& rune : runes) {
if (rune.second.getId() == id) {
return &rune.second;
}
}
return nullptr;
}
return &it->second;
}
RuneSpell* Spells::getRuneSpellByName(const std::string& name)
{
for (auto& it : runes) {
if (strcasecmp(it.second.getName().c_str(), name.c_str()) == 0) {
return &it.second;
}
}
return nullptr;
}
InstantSpell* Spells::getInstantSpell(const std::string& words)
{
InstantSpell* result = nullptr;
for (auto& it : instants) {
const std::string& instantSpellWords = it.second.getWords();
size_t spellLen = instantSpellWords.length();
if (strncasecmp(instantSpellWords.c_str(), words.c_str(), spellLen) == 0) {
if (!result || spellLen > result->getWords().length()) {
result = &it.second;
if (words.length() == spellLen) {
break;
}
}
}
}
if (result) {
const std::string& resultWords = result->getWords();
if (words.length() > resultWords.length()) {
if (!result->getHasParam()) {
return nullptr;
}
size_t spellLen = resultWords.length();
size_t paramLen = words.length() - spellLen;
if (paramLen < 2 || words[spellLen] != ' ') {
return nullptr;
}
}
return result;
}
return nullptr;
}
InstantSpell* Spells::getInstantSpellByName(const std::string& name)
{
for (auto& it : instants) {
if (strcasecmp(it.second.getName().c_str(), name.c_str()) == 0) {
return &it.second;
}
}
return nullptr;
}
Position Spells::getCasterPosition(Creature* creature, Direction dir)
{
return getNextPosition(dir, creature->getPosition());
}
CombatSpell::CombatSpell(Combat* combat, bool needTarget, bool needDirection) :
Event(&g_spells->getScriptInterface()),
combat(combat),
needDirection(needDirection),
needTarget(needTarget)
{}
CombatSpell::~CombatSpell()
{
if (!scripted) {
delete combat;
}
}
bool CombatSpell::loadScriptCombat()
{
combat = g_luaEnvironment.getCombatObject(g_luaEnvironment.lastCombatId);
return combat != nullptr;
}
bool CombatSpell::castSpell(Creature* creature)
{
if (scripted) {
LuaVariant var;
var.type = VARIANT_POSITION;
if (needDirection) {
var.pos = Spells::getCasterPosition(creature, creature->getDirection());
} else {
var.pos = creature->getPosition();
}
return executeCastSpell(creature, var);
}
Position pos;
if (needDirection) {
pos = Spells::getCasterPosition(creature, creature->getDirection());
} else {
pos = creature->getPosition();
}
combat->doCombat(creature, pos);
return true;
}
bool CombatSpell::castSpell(Creature* creature, Creature* target)
{
if (scripted) {
LuaVariant var;
if (combat->hasArea()) {
var.type = VARIANT_POSITION;
if (needTarget) {
var.pos = target->getPosition();
} else if (needDirection) {
var.pos = Spells::getCasterPosition(creature, creature->getDirection());
} else {
var.pos = creature->getPosition();
}
} else {
var.type = VARIANT_NUMBER;
var.number = target->getID();
}
return executeCastSpell(creature, var);
}
if (combat->hasArea()) {
if (needTarget) {
combat->doCombat(creature, target->getPosition());
} else {
return castSpell(creature);
}
} else {
combat->doCombat(creature, target);
}
return true;
}
bool CombatSpell::executeCastSpell(Creature* creature, const LuaVariant& var)
{
//onCastSpell(creature, var)
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - CombatSpell::executeCastSpell] Call stack overflow" << std::endl;
return false;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(scriptId, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(scriptId);
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
LuaScriptInterface::pushVariant(L, var);
return scriptInterface->callFunction(2);
}
bool Spell::configureSpell(const pugi::xml_node& node)
{
pugi::xml_attribute nameAttribute = node.attribute("name");
if (!nameAttribute) {
std::cout << "[Error - Spell::configureSpell] Spell without name" << std::endl;
return false;
}
name = nameAttribute.as_string();
static const char* reservedList[] = {
"melee",
"physical",
"poison",
"fire",
"energy",
"drown",
"lifedrain",
"manadrain",
"healing",
"speed",
"outfit",
"invisible",
"drunk",
"firefield",
"poisonfield",
"energyfield",
"firecondition",
"poisoncondition",
"energycondition",
"drowncondition",
"freezecondition",
"cursecondition",
"dazzlecondition"
};
//static size_t size = sizeof(reservedList) / sizeof(const char*);
//for (size_t i = 0; i < size; ++i) {
for (const char* reserved : reservedList) {
if (strcasecmp(reserved, name.c_str()) == 0) {
std::cout << "[Error - Spell::configureSpell] Spell is using a reserved name: " << reserved << std::endl;
return false;
}
}
pugi::xml_attribute attr;
if ((attr = node.attribute("spellid"))) {
spellId = pugi::cast<uint16_t>(attr.value());
}
if ((attr = node.attribute("group"))) {
std::string tmpStr = asLowerCaseString(attr.as_string());
if (tmpStr == "none" || tmpStr == "0") {
group = SPELLGROUP_NONE;
} else if (tmpStr == "attack" || tmpStr == "1") {
group = SPELLGROUP_ATTACK;
} else if (tmpStr == "healing" || tmpStr == "2") {
group = SPELLGROUP_HEALING;
} else if (tmpStr == "support" || tmpStr == "3") {
group = SPELLGROUP_SUPPORT;
} else if (tmpStr == "special" || tmpStr == "4") {
group = SPELLGROUP_SPECIAL;
} else {
std::cout << "[Warning - Spell::configureSpell] Unknown group: " << attr.as_string() << std::endl;
}
}
if ((attr = node.attribute("groupcooldown"))) {
groupCooldown = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("secondarygroup"))) {
std::string tmpStr = asLowerCaseString(attr.as_string());
if (tmpStr == "none" || tmpStr == "0") {
secondaryGroup = SPELLGROUP_NONE;
} else if (tmpStr == "attack" || tmpStr == "1") {
secondaryGroup = SPELLGROUP_ATTACK;
} else if (tmpStr == "healing" || tmpStr == "2") {
secondaryGroup = SPELLGROUP_HEALING;
} else if (tmpStr == "support" || tmpStr == "3") {
secondaryGroup = SPELLGROUP_SUPPORT;
} else if (tmpStr == "special" || tmpStr == "4") {
secondaryGroup = SPELLGROUP_SPECIAL;
} else {
std::cout << "[Warning - Spell::configureSpell] Unknown secondarygroup: " << attr.as_string() << std::endl;
}
}
if ((attr = node.attribute("secondarygroupcooldown"))) {
secondaryGroupCooldown = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("level")) || (attr = node.attribute("lvl"))) {
level = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("magiclevel")) || (attr = node.attribute("maglv"))) {
magLevel = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("mana"))) {
mana = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("manapercent"))) {
manaPercent = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("soul"))) {
soul = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("range"))) {
range = pugi::cast<int32_t>(attr.value());
}
if ((attr = node.attribute("cooldown")) || (attr = node.attribute("exhaustion"))) {
cooldown = pugi::cast<uint32_t>(attr.value());
}
if ((attr = node.attribute("premium")) || (attr = node.attribute("prem"))) {
premium = attr.as_bool();
}
if ((attr = node.attribute("enabled"))) {
enabled = attr.as_bool();
}
if ((attr = node.attribute("needtarget"))) {
needTarget = attr.as_bool();
}
if ((attr = node.attribute("needweapon"))) {
needWeapon = attr.as_bool();
}
if ((attr = node.attribute("selftarget"))) {
selfTarget = attr.as_bool();
}
if ((attr = node.attribute("needlearn"))) {
learnable = attr.as_bool();
}
if ((attr = node.attribute("blocking"))) {
blockingSolid = attr.as_bool();
blockingCreature = blockingSolid;
}
if ((attr = node.attribute("blocktype"))) {
std::string tmpStrValue = asLowerCaseString(attr.as_string());
if (tmpStrValue == "all") {
blockingSolid = true;
blockingCreature = true;
} else if (tmpStrValue == "solid") {
blockingSolid = true;
} else if (tmpStrValue == "creature") {
blockingCreature = true;
} else {
std::cout << "[Warning - Spell::configureSpell] Blocktype \"" << attr.as_string() << "\" does not exist." << std::endl;
}
}
if ((attr = node.attribute("pzlock"))) {
pzLock = booleanString(attr.as_string());
}
if ((attr = node.attribute("aggressive"))) {
aggressive = booleanString(attr.as_string());
}
if (group == SPELLGROUP_NONE) {
group = (aggressive ? SPELLGROUP_ATTACK : SPELLGROUP_HEALING);
}
for (auto vocationNode : node.children()) {
if (!(attr = vocationNode.attribute("name"))) {
continue;
}
int32_t vocationId = g_vocations.getVocationId(attr.as_string());
if (vocationId != -1) {
attr = vocationNode.attribute("showInDescription");
vocSpellMap[vocationId] = !attr || attr.as_bool();
} else {
std::cout << "[Warning - Spell::configureSpell] Wrong vocation name: " << attr.as_string() << std::endl;
}
}
return true;
}
bool Spell::playerSpellCheck(Player* player) const
{
if (player->hasFlag(PlayerFlag_CannotUseSpells)) {
return false;
}
if (player->hasFlag(PlayerFlag_IgnoreSpellCheck)) {
return true;
}
if (!enabled) {
return false;
}
if ((aggressive || pzLock) && (range < 1 || (range > 0 && !player->getAttackedCreature())) && player->getSkull() == SKULL_BLACK) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return false;
}
if ((aggressive || pzLock) && player->hasCondition(CONDITION_PACIFIED)) {
player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if ((aggressive || pzLock) && !player->hasFlag(PlayerFlag_IgnoreProtectionZone) && player->getZone() == ZONE_PROTECTION) {
player->sendCancelMessage(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE);
return false;
}
if (player->hasCondition(CONDITION_SPELLGROUPCOOLDOWN, group) || player->hasCondition(CONDITION_SPELLCOOLDOWN, spellId) || (secondaryGroup != SPELLGROUP_NONE && player->hasCondition(CONDITION_SPELLGROUPCOOLDOWN, secondaryGroup))) {
player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
if (isInstant()) {
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
}
return false;
}
if (player->getLevel() < level) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHLEVEL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (player->getMagicLevel() < magLevel) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHMAGICLEVEL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (player->getMana() < getManaCost(player) && !player->hasFlag(PlayerFlag_HasInfiniteMana)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHMANA);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (player->getSoul() < soul && !player->hasFlag(PlayerFlag_HasInfiniteSoul)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHSOUL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (isInstant() && isLearnable()) {
if (!player->hasLearnedInstantSpell(getName())) {
player->sendCancelMessage(RETURNVALUE_YOUNEEDTOLEARNTHISSPELL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
} else if (!vocSpellMap.empty() && vocSpellMap.find(player->getVocationId()) == vocSpellMap.end()) {
player->sendCancelMessage(RETURNVALUE_YOURVOCATIONCANNOTUSETHISSPELL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (needWeapon) {
switch (player->getWeaponType()) {
case WEAPON_SWORD:
case WEAPON_CLUB:
case WEAPON_AXE:
break;
default: {
player->sendCancelMessage(RETURNVALUE_YOUNEEDAWEAPONTOUSETHISSPELL);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
}
}
if (isPremium() && !player->isPremium()) {
player->sendCancelMessage(RETURNVALUE_YOUNEEDPREMIUMACCOUNT);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
return true;
}
bool Spell::playerInstantSpellCheck(Player* player, const Position& toPos)
{
if (toPos.x == 0xFFFF) {
return true;
}
const Position& playerPos = player->getPosition();
if (playerPos.z > toPos.z) {
player->sendCancelMessage(RETURNVALUE_FIRSTGOUPSTAIRS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
} else if (playerPos.z < toPos.z) {
player->sendCancelMessage(RETURNVALUE_FIRSTGODOWNSTAIRS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
Tile* tile = g_game.map.getTile(toPos);
if (!tile) {
tile = new StaticTile(toPos.x, toPos.y, toPos.z);
g_game.map.setTile(toPos, tile);
}
ReturnValue ret = Combat::canDoCombat(player, tile, aggressive);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (blockingCreature && tile->getBottomVisibleCreature(player) != nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (blockingSolid && tile->hasFlag(TILESTATE_BLOCKSOLID)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
return true;
}
bool Spell::playerRuneSpellCheck(Player* player, const Position& toPos)
{
if (!playerSpellCheck(player)) {
return false;
}
if (toPos.x == 0xFFFF) {
return true;
}
const Position& playerPos = player->getPosition();
if (playerPos.z > toPos.z) {
player->sendCancelMessage(RETURNVALUE_FIRSTGOUPSTAIRS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
} else if (playerPos.z < toPos.z) {
player->sendCancelMessage(RETURNVALUE_FIRSTGODOWNSTAIRS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
Tile* tile = g_game.map.getTile(toPos);
if (!tile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (range != -1 && !g_game.canThrowObjectTo(playerPos, toPos, true, range, range)) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
ReturnValue ret = Combat::canDoCombat(player, tile, aggressive);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
const Creature* topVisibleCreature = tile->getBottomVisibleCreature(player);
if (blockingCreature && topVisibleCreature) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
} else if (blockingSolid && tile->hasFlag(TILESTATE_BLOCKSOLID)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (needTarget && !topVisibleCreature) {
player->sendCancelMessage(RETURNVALUE_CANONLYUSETHISRUNEONCREATURES);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (aggressive && needTarget && topVisibleCreature && player->hasSecureMode()) {
const Player* targetPlayer = topVisibleCreature->getPlayer();
if (targetPlayer && targetPlayer != player && player->getSkullClient(targetPlayer) == SKULL_NONE && !Combat::isInPvpZone(player, targetPlayer)) {
player->sendCancelMessage(RETURNVALUE_TURNSECUREMODETOATTACKUNMARKEDPLAYERS);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
}
return true;
}
void Spell::postCastSpell(Player* player, bool finishedCast /*= true*/, bool payCost /*= true*/) const
{
if (finishedCast) {
if (!player->hasFlag(PlayerFlag_HasNoExhaustion)) {
if (cooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLCOOLDOWN, cooldown, 0, false, spellId);
player->addCondition(condition);
}
if (groupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, groupCooldown, 0, false, group);
player->addCondition(condition);
}
if (secondaryGroupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, secondaryGroupCooldown, 0, false, secondaryGroup);
player->addCondition(condition);
}
}
if (aggressive) {
player->addInFightTicks();
}
}
if (payCost) {
Spell::postCastSpell(player, getManaCost(player), getSoulCost());
}
}
void Spell::postCastSpell(Player* player, uint32_t manaCost, uint32_t soulCost)
{
if (manaCost > 0) {
player->addManaSpent(manaCost);
player->changeMana(-static_cast<int32_t>(manaCost));
}
if (!player->hasFlag(PlayerFlag_HasInfiniteSoul)) {
if (soulCost > 0) {
player->changeSoul(-static_cast<int32_t>(soulCost));
}
}
}
uint32_t Spell::getManaCost(const Player* player) const
{
if (mana != 0) {
return mana;
}
if (manaPercent != 0) {
uint32_t maxMana = player->getMaxMana();
uint32_t manaCost = (maxMana * manaPercent) / 100;
return manaCost;
}
return 0;
}
std::string InstantSpell::getScriptEventName() const
{
return "onCastSpell";
}
bool InstantSpell::configureEvent(const pugi::xml_node& node)
{
if (!Spell::configureSpell(node)) {
return false;
}
if (!TalkAction::configureEvent(node)) {
return false;
}
spellType = SPELL_INSTANT;
pugi::xml_attribute attr;
if ((attr = node.attribute("params"))) {
hasParam = attr.as_bool();
}
if ((attr = node.attribute("playernameparam"))) {
hasPlayerNameParam = attr.as_bool();
}
if ((attr = node.attribute("direction"))) {
needDirection = attr.as_bool();
} else if ((attr = node.attribute("casterTargetOrDirection"))) {
casterTargetOrDirection = attr.as_bool();
}
if ((attr = node.attribute("blockwalls"))) {
checkLineOfSight = attr.as_bool();
}
return true;
}
bool InstantSpell::playerCastInstant(Player* player, std::string& param)
{
if (!playerSpellCheck(player)) {
return false;
}
LuaVariant var;
if (selfTarget) {
var.type = VARIANT_NUMBER;
var.number = player->getID();
} else if (needTarget || casterTargetOrDirection) {
Creature* target = nullptr;
bool useDirection = false;
if (hasParam) {
Player* playerTarget = nullptr;
ReturnValue ret = g_game.getPlayerByNameWildcard(param, playerTarget);
if (playerTarget && playerTarget->isAccessPlayer() && !player->isAccessPlayer()) {
playerTarget = nullptr;
}
target = playerTarget;
if (!target || target->getHealth() <= 0) {
if (!casterTargetOrDirection) {
if (cooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLCOOLDOWN, cooldown, 0, false, spellId);
player->addCondition(condition);
}
if (groupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, groupCooldown, 0, false, group);
player->addCondition(condition);
}
if (secondaryGroupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, secondaryGroupCooldown, 0, false, secondaryGroup);
player->addCondition(condition);
}
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
useDirection = true;
}
if (playerTarget) {
param = playerTarget->getName();
}
} else {
target = player->getAttackedCreature();
if (!target || target->getHealth() <= 0) {
if (!casterTargetOrDirection) {
player->sendCancelMessage(RETURNVALUE_YOUCANONLYUSEITONCREATURES);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
useDirection = true;
}
}
if (!useDirection) {
if (!canThrowSpell(player, target)) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
var.type = VARIANT_NUMBER;
var.number = target->getID();
} else {
var.type = VARIANT_POSITION;
var.pos = Spells::getCasterPosition(player, player->getDirection());
if (!playerInstantSpellCheck(player, var.pos)) {
return false;
}
}
} else if (hasParam) {
var.type = VARIANT_STRING;
if (getHasPlayerNameParam()) {
Player* playerTarget = nullptr;
ReturnValue ret = g_game.getPlayerByNameWildcard(param, playerTarget);
if (ret != RETURNVALUE_NOERROR) {
if (cooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLCOOLDOWN, cooldown, 0, false, spellId);
player->addCondition(condition);
}
if (groupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, groupCooldown, 0, false, group);
player->addCondition(condition);
}
if (secondaryGroupCooldown > 0) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_SPELLGROUPCOOLDOWN, secondaryGroupCooldown, 0, false, secondaryGroup);
player->addCondition(condition);
}
player->sendCancelMessage(ret);
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
return false;
}
if (playerTarget && (!playerTarget->isAccessPlayer() || player->isAccessPlayer())) {
param = playerTarget->getName();
}
}
var.text = param;
} else {
var.type = VARIANT_POSITION;
if (needDirection) {
var.pos = Spells::getCasterPosition(player, player->getDirection());
} else {
var.pos = player->getPosition();
}
if (!playerInstantSpellCheck(player, var.pos)) {
return false;
}
}
bool result = internalCastSpell(player, var);
if (result) {
postCastSpell(player);
}
return result;
}
bool InstantSpell::canThrowSpell(const Creature* creature, const Creature* target) const
{
const Position& fromPos = creature->getPosition();
const Position& toPos = target->getPosition();
if (fromPos.z != toPos.z ||
(range == -1 && !g_game.canThrowObjectTo(fromPos, toPos, checkLineOfSight, 7, 5)) ||
(range != -1 && !g_game.canThrowObjectTo(fromPos, toPos, checkLineOfSight, range, range))) {
return false;
}
return true;
}
bool InstantSpell::castSpell(Creature* creature)
{
LuaVariant var;
if (casterTargetOrDirection) {
Creature* target = creature->getAttackedCreature();
if (target && target->getHealth() > 0) {
if (!canThrowSpell(creature, target)) {
return false;
}
var.type = VARIANT_NUMBER;
var.number = target->getID();
return internalCastSpell(creature, var);
}
return false;
} else if (needDirection) {
var.type = VARIANT_POSITION;
var.pos = Spells::getCasterPosition(creature, creature->getDirection());
} else {
var.type = VARIANT_POSITION;
var.pos = creature->getPosition();
}
return internalCastSpell(creature, var);
}
bool InstantSpell::castSpell(Creature* creature, Creature* target)
{
if (needTarget) {
LuaVariant var;
var.type = VARIANT_NUMBER;
var.number = target->getID();
return internalCastSpell(creature, var);
} else {
return castSpell(creature);
}
}
bool InstantSpell::internalCastSpell(Creature* creature, const LuaVariant& var)
{
return executeCastSpell(creature, var);
}
bool InstantSpell::executeCastSpell(Creature* creature, const LuaVariant& var)
{
//onCastSpell(creature, var)
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - InstantSpell::executeCastSpell] Call stack overflow" << std::endl;
return false;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(scriptId, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(scriptId);
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
LuaScriptInterface::pushVariant(L, var);
return scriptInterface->callFunction(2);
}
bool InstantSpell::canCast(const Player* player) const
{
if (player->hasFlag(PlayerFlag_CannotUseSpells)) {
return false;
}
if (player->hasFlag(PlayerFlag_IgnoreSpellCheck)) {
return true;
}
if (isLearnable()) {
if (player->hasLearnedInstantSpell(getName())) {
return true;
}
} else {
if (vocSpellMap.empty() || vocSpellMap.find(player->getVocationId()) != vocSpellMap.end()) {
return true;
}
}
return false;
}
std::string RuneSpell::getScriptEventName() const
{
return "onCastSpell";
}
bool RuneSpell::configureEvent(const pugi::xml_node& node)
{
if (!Spell::configureSpell(node)) {
return false;
}
if (!Action::configureEvent(node)) {
return false;
}
spellType = SPELL_RUNE;
pugi::xml_attribute attr;
if (!(attr = node.attribute("id"))) {
std::cout << "[Error - RuneSpell::configureSpell] Rune spell without id." << std::endl;
return false;
}
runeId = pugi::cast<uint16_t>(attr.value());
if ((attr = node.attribute("charges"))) {
charges = pugi::cast<uint32_t>(attr.value());
} else {
charges = 0;
}
hasCharges = (charges > 0);
if (magLevel != 0 || level != 0) {
//Change information in the ItemType to get accurate description
ItemType& iType = Item::items.getItemType(runeId);
iType.runeMagLevel = magLevel;
iType.runeLevel = level;
iType.charges = charges;
}
return true;
}
ReturnValue RuneSpell::canExecuteAction(const Player* player, const Position& toPos)
{
if (player->hasFlag(PlayerFlag_CannotUseSpells)) {
return RETURNVALUE_CANNOTUSETHISOBJECT;
}
ReturnValue ret = Action::canExecuteAction(player, toPos);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (toPos.x == 0xFFFF) {
if (needTarget) {
return RETURNVALUE_CANONLYUSETHISRUNEONCREATURES;
} else if (!selfTarget) {
return RETURNVALUE_NOTENOUGHROOM;
}
}
return RETURNVALUE_NOERROR;
}
bool RuneSpell::executeUse(Player* player, Item* item, const Position&, Thing* target, const Position& toPosition, bool isHotkey)
{
if (!playerRuneSpellCheck(player, toPosition)) {
return false;
}
if (!scripted) {
return false;
}
LuaVariant var;
if (needTarget) {
var.type = VARIANT_NUMBER;
if (target == nullptr) {
Tile* toTile = g_game.map.getTile(toPosition);
if (toTile) {
const Creature* visibleCreature = toTile->getBottomVisibleCreature(player);
if (visibleCreature) {
var.number = visibleCreature->getID();
}
}
} else {
var.number = target->getCreature()->getID();
}
} else {
var.type = VARIANT_POSITION;
var.pos = toPosition;
}
if (!internalCastSpell(player, var, isHotkey)) {
return false;
}
postCastSpell(player);
target = g_game.getCreatureByID(var.number);
if (getPzLock() && target) {
player->onAttackedCreature(target->getCreature());
}
if (hasCharges && item && g_config.getBoolean(ConfigManager::REMOVE_RUNE_CHARGES)) {
int32_t newCount = std::max<int32_t>(0, item->getItemCount() - 1);
g_game.transformItem(item, item->getID(), newCount);
}
return true;
}
bool RuneSpell::castSpell(Creature* creature)
{
LuaVariant var;
var.type = VARIANT_NUMBER;
var.number = creature->getID();
return internalCastSpell(creature, var, false);
}
bool RuneSpell::castSpell(Creature* creature, Creature* target)
{
LuaVariant var;
var.type = VARIANT_NUMBER;
var.number = target->getID();
return internalCastSpell(creature, var, false);
}
bool RuneSpell::internalCastSpell(Creature* creature, const LuaVariant& var, bool isHotkey)
{
bool result;
if (scripted) {
result = executeCastSpell(creature, var, isHotkey);
} else {
result = false;
}
return result;
}
bool RuneSpell::executeCastSpell(Creature* creature, const LuaVariant& var, bool isHotkey)
{
//onCastSpell(creature, var, isHotkey)
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - RuneSpell::executeCastSpell] Call stack overflow" << std::endl;
return false;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(scriptId, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(scriptId);
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
LuaScriptInterface::pushVariant(L, var);
LuaScriptInterface::pushBoolean(L, isHotkey);
return scriptInterface->callFunction(3);
}
| 1 | 19,019 | here you assigning it to a local variable, in the original code it assigned to combat(CombatSpell member). Not sure if CombatSpell needs to use a shared_ptr since currently it does not and it works "good". | otland-forgottenserver | cpp |
@@ -1,7 +1,5 @@
<section id="terms">
<dl>
- <dt><p>How often is this video tutorial offered?</p></dt>
- <dd><p>This online video tutorial starts as soon as you register.</p></dd>
<dt><p>What if I'm not happy?</p></dt>
<dd><p>If you’re not happy, just let us know within 30 days and we’ll refund your money. It’s as simple as that.</p></dd>
</dl> | 1 | <section id="terms">
<dl>
<dt><p>How often is this video tutorial offered?</p></dt>
<dd><p>This online video tutorial starts as soon as you register.</p></dd>
<dt><p>What if I'm not happy?</p></dt>
<dd><p>If you’re not happy, just let us know within 30 days and we’ll refund your money. It’s as simple as that.</p></dd>
</dl>
</section>
| 1 | 13,284 | Should we also remove this? This applies to subscriptions in general, but it seems sort of weird in the context of products now. | thoughtbot-upcase | rb |
@@ -27,7 +27,6 @@ from dagster.core.errors import DagsterExecutionInterruptedError, DagsterInvaria
from dagster.seven import IS_WINDOWS, multiprocessing
from dagster.seven.abc import Mapping
-from .alert import make_email_on_pipeline_failure_sensor, make_email_on_run_failure_sensor
from .merger import merge_dicts
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
| 1 | import contextlib
import contextvars
import datetime
import errno
import functools
import inspect
import os
import re
import signal
import socket
import subprocess
import sys
import tempfile
import threading
from collections import OrderedDict, defaultdict, namedtuple
from datetime import timezone
from enum import Enum
from typing import TYPE_CHECKING, Callable, ContextManager, Generator, Generic, Iterator
from typing import Mapping as TypingMapping
from typing import Optional, Type, TypeVar, Union, cast
from warnings import warn
import _thread as thread
import yaml
from dagster import check, seven
from dagster.core.errors import DagsterExecutionInterruptedError, DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, multiprocessing
from dagster.seven.abc import Mapping
from .alert import make_email_on_pipeline_failure_sensor, make_email_on_run_failure_sensor
from .merger import merge_dicts
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
if TYPE_CHECKING:
from dagster.core.events import DagsterEvent
EPOCH = datetime.datetime.utcfromtimestamp(0)
PICKLE_PROTOCOL = 4
DEFAULT_WORKSPACE_YAML_FILENAME = "workspace.yaml"
def file_relative_path(dunderfile: str, relative_path: str) -> str:
"""Get a path relative to the currently executing Python file.
This function is useful when one needs to load a file that is relative to the position of
the current file. (Such as when you encode a configuration file path in source file and want
in runnable in any current working directory)
Args:
dunderfile (str): Should always be ``__file__``.
relative_path (str): Path to get relative to the currently executing file.
**Examples**:
.. code-block:: python
file_relative_path(__file__, 'path/relative/to/file')
"""
check.str_param(dunderfile, "dunderfile")
check.str_param(relative_path, "relative_path")
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path: str) -> str:
"""
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
"""
# from http://bit.ly/2snyC6s
check.str_param(file_path, "file_path")
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, "string")
string = re.sub(r"^[\-_\.]", "", str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r"[\-_\.\s]([a-z])", lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, "ddict")
check.param_invariant(len(ddict) == 1, "ddict", "Expected dict with single item")
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
""" "Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
"""
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# TODO: Make frozendict generic for type annotations
# https://github.com/dagster-io/dagster/issues/3641
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__ # type: ignore[assignment]
popitem = __readonly__
clear = __readonly__
update = __readonly__ # type: ignore[assignment]
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# Like frozendict, implement __reduce__ and __setstate__ to handle pickling.
# Otherwise, __setstate__ will be called to restore the frozenlist, causing
# a RuntimeError because frozenlist is not mutable.
def __reduce__(self):
return (frozenlist, (), list(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__ # type: ignore[assignment]
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__ # type: ignore[assignment]
def __hash__(self):
return hash(tuple(self))
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output([sys.executable, path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
from dagster.core.test_utils import instance_for_test
with instance_for_test():
cli_cmd = [
sys.executable,
"-m",
"dagster",
"pipeline",
"execute",
"-f",
path,
"-a",
pipeline_fn_name,
]
if env_file:
cli_cmd.append("-c")
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe) # pylint: disable=print-call
raise cpe
def safe_tempfile_path_unmanaged() -> str:
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
return Path(path).as_posix()
@contextlib.contextmanager
def safe_tempfile_path() -> Iterator[str]:
try:
path = safe_tempfile_path_unmanaged()
yield path
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, "a"):
os.utime(path, None)
def _kill_on_event(termination_event):
termination_event.wait()
send_interrupt()
def send_interrupt():
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(termination_event, "termination_event", ttype=type(multiprocessing.Event()))
int_thread = threading.Thread(
target=_kill_on_event, args=(termination_event,), name="kill-on-event"
)
int_thread.daemon = True
int_thread.start()
T = TypeVar("T")
# Executes the next() function within an instance of the supplied context manager class
# (leaving the context before yielding each result)
def iterate_with_context(
context_fn: Callable[[], ContextManager], iterator: Iterator[T]
) -> Iterator[T]:
while True:
# Allow interrupts during user code so that we can terminate slow/hanging steps
with context_fn():
try:
next_output = next(iterator)
except StopIteration:
return
yield next_output
def datetime_as_float(dt):
check.inst_param(dt, "dt", datetime.datetime)
return float((dt - EPOCH).total_seconds())
# hashable frozen string to string dict
class frozentags(frozendict):
def __init__(self, *args, **kwargs):
super(frozentags, self).__init__(*args, **kwargs)
check.dict_param(self, "self", key_type=str, value_type=str)
def __hash__(self):
return hash(tuple(sorted(self.items())))
def updated_with(self, new_tags):
check.dict_param(new_tags, "new_tags", key_type=str, value_type=str)
updated = dict(self)
for key, value in new_tags.items():
updated[key] = value
return frozentags(updated)
GeneratedContext = TypeVar("GeneratedContext")
class EventGenerationManager(Generic[GeneratedContext]):
"""Utility class that wraps an event generator function, that also yields a single instance of
a typed object. All events yielded before the typed object are yielded through the method
`generate_setup_events` and all events yielded after the typed object are yielded through the
method `generate_teardown_events`.
This is used to help replace the context managers used in pipeline initialization with
generators so that we can begin emitting initialization events AND construct a pipeline context
object, while managing explicit setup/teardown.
This does require calling `generate_setup_events` AND `generate_teardown_events` in order to
get the typed object.
"""
def __init__(
self,
generator: Generator[Union["DagsterEvent", GeneratedContext], None, None],
object_cls: Type[GeneratedContext],
require_object: Optional[bool] = True,
):
self.generator = check.generator(generator)
self.object_cls: Type[GeneratedContext] = check.type_param(object_cls, "object_cls")
self.require_object = check.bool_param(require_object, "require_object")
self.object: Optional[GeneratedContext] = None
self.did_setup = False
self.did_teardown = False
def generate_setup_events(self) -> Iterator["DagsterEvent"]:
self.did_setup = True
try:
while self.object is None:
obj = next(self.generator)
if isinstance(obj, self.object_cls):
self.object = obj
else:
yield obj
except StopIteration:
if self.require_object:
check.inst_param(
self.object,
"self.object",
self.object_cls,
"generator never yielded object of type {}".format(self.object_cls.__name__),
)
def get_object(self) -> GeneratedContext:
if not self.did_setup:
check.failed("Called `get_object` before `generate_setup_events`")
return cast(GeneratedContext, self.object)
def generate_teardown_events(self) -> Iterator["DagsterEvent"]:
self.did_teardown = True
if self.object:
yield from self.generator
def utc_datetime_from_timestamp(timestamp):
tz = timezone.utc
return datetime.datetime.fromtimestamp(timestamp, tz=tz)
def utc_datetime_from_naive(dt):
tz = timezone.utc
return dt.replace(tzinfo=tz)
def is_enum_value(value):
return False if value is None else issubclass(value.__class__, Enum)
def git_repository_root():
return subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode("utf-8").strip()
def segfault():
"""Reliable cross-Python version segfault.
https://bugs.python.org/issue1215#msg143236
"""
import ctypes
ctypes.string_at(0)
def find_free_port():
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
@contextlib.contextmanager
def alter_sys_path(to_add, to_remove):
to_restore = [path for path in sys.path]
# remove paths
for path in to_remove:
if path in sys.path:
sys.path.remove(path)
# add paths
for path in to_add:
sys.path.insert(0, path)
try:
yield
finally:
sys.path = to_restore
@contextlib.contextmanager
def restore_sys_modules():
sys_modules = {k: v for k, v in sys.modules.items()}
try:
yield
finally:
to_delete = set(sys.modules) - set(sys_modules)
for key in to_delete:
del sys.modules[key]
def process_is_alive(pid):
if IS_WINDOWS:
import psutil # pylint: disable=import-error
return psutil.pid_exists(pid=pid)
else:
try:
subprocess.check_output(["ps", str(pid)])
except subprocess.CalledProcessError as exc:
assert exc.returncode == 1
return False
return True
def compose(*args):
"""
Compose python functions args such that compose(f, g)(x) is equivalent to f(g(x)).
"""
# reduce using functional composition over all the arguments, with the identity function as
# initializer
return functools.reduce(lambda f, g: lambda x: f(g(x)), args, lambda x: x)
def dict_without_keys(ddict, *keys):
return {key: value for key, value in ddict.items() if key not in set(keys)}
class Counter:
def __init__(self):
self._lock = threading.Lock()
self._counts = OrderedDict()
super(Counter, self).__init__()
def increment(self, key: str):
with self._lock:
self._counts[key] = self._counts.get(key, 0) + 1
def counts(self) -> TypingMapping[str, int]:
with self._lock:
copy = {k: v for k, v in self._counts.items()}
return copy
traced_counter = contextvars.ContextVar("traced_counts", default=Counter())
def traced(func=None):
"""
A decorator that keeps track of how many times a function is called.
"""
def inner(*args, **kwargs):
counter = traced_counter.get()
if counter and isinstance(counter, Counter):
counter.increment(func.__qualname__)
return func(*args, **kwargs)
return inner
| 1 | 18,150 | I think we should keep these, o.w. our examples will be wrong (where we're importing from `dagster.utils`). Also, we might break folks who've imported following our docs. | dagster-io-dagster | py |
@@ -24,7 +24,7 @@ namespace OpenTelemetry.Trace
/// <summary>
/// Build TracerProvider with Resource, Sampler, Processors and Instrumentation.
/// </summary>
- internal class TracerProviderBuilderSdk : TracerProviderBuilder
+ public class TracerProviderBuilderSdk : TracerProviderBuilder
{
private readonly List<InstrumentationFactory> instrumentationFactories = new List<InstrumentationFactory>();
private readonly List<BaseProcessor<Activity>> processors = new List<BaseProcessor<Activity>>(); | 1 | // <copyright file="TracerProviderBuilderSdk.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using OpenTelemetry.Resources;
namespace OpenTelemetry.Trace
{
/// <summary>
/// Build TracerProvider with Resource, Sampler, Processors and Instrumentation.
/// </summary>
internal class TracerProviderBuilderSdk : TracerProviderBuilder
{
private readonly List<InstrumentationFactory> instrumentationFactories = new List<InstrumentationFactory>();
private readonly List<BaseProcessor<Activity>> processors = new List<BaseProcessor<Activity>>();
private readonly List<string> sources = new List<string>();
private readonly Dictionary<string, bool> legacyActivityOperationNames = new Dictionary<string, bool>(StringComparer.OrdinalIgnoreCase);
private ResourceBuilder resourceBuilder = ResourceBuilder.CreateDefault();
private Sampler sampler = new ParentBasedSampler(new AlwaysOnSampler());
internal TracerProviderBuilderSdk()
{
}
/// <summary>
/// Adds an instrumentation to the provider.
/// </summary>
/// <typeparam name="TInstrumentation">Type of instrumentation class.</typeparam>
/// <param name="instrumentationFactory">Function that builds instrumentation.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
public override TracerProviderBuilder AddInstrumentation<TInstrumentation>(
Func<TInstrumentation> instrumentationFactory)
where TInstrumentation : class
{
if (instrumentationFactory == null)
{
throw new ArgumentNullException(nameof(instrumentationFactory));
}
this.instrumentationFactories.Add(
new InstrumentationFactory(
typeof(TInstrumentation).Name,
"semver:" + typeof(TInstrumentation).Assembly.GetName().Version,
instrumentationFactory));
return this;
}
/// <summary>
/// Adds given activitysource names to the list of subscribed sources.
/// </summary>
/// <param name="names">Activity source names.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
public override TracerProviderBuilder AddSource(params string[] names)
{
if (names == null)
{
throw new ArgumentNullException(nameof(names));
}
foreach (var name in names)
{
if (string.IsNullOrWhiteSpace(name))
{
throw new ArgumentException($"{nameof(names)} contains null or whitespace string.");
}
// TODO: We need to fix the listening model.
// Today it ignores version.
this.sources.Add(name);
}
return this;
}
/// <summary>
/// Sets whether the status of <see cref="System.Diagnostics.Activity"/>
/// should be set to <c>Status.Error</c> when it ended abnormally due to an unhandled exception.
/// </summary>
/// <param name="enabled">Enabled or not.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
internal TracerProviderBuilder SetErrorStatusOnException(bool enabled)
{
ExceptionProcessor existingExceptionProcessor = null;
if (this.processors.Count > 0)
{
existingExceptionProcessor = this.processors[0] as ExceptionProcessor;
}
if (enabled)
{
if (existingExceptionProcessor == null)
{
try
{
this.processors.Insert(0, new ExceptionProcessor());
}
catch (Exception ex)
{
throw new NotSupportedException("SetErrorStatusOnException is not supported on this platform.", ex);
}
}
}
else
{
if (existingExceptionProcessor != null)
{
this.processors.RemoveAt(0);
existingExceptionProcessor.Dispose();
}
}
return this;
}
/// <summary>
/// Sets sampler.
/// </summary>
/// <param name="sampler">Sampler instance.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
internal TracerProviderBuilder SetSampler(Sampler sampler)
{
this.sampler = sampler ?? throw new ArgumentNullException(nameof(sampler));
return this;
}
/// <summary>
/// Sets the <see cref="ResourceBuilder"/> from which the Resource associated with
/// this provider is built from. Overwrites currently set ResourceBuilder.
/// </summary>
/// <param name="resourceBuilder"><see cref="ResourceBuilder"/> from which Resource will be built.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
internal TracerProviderBuilder SetResourceBuilder(ResourceBuilder resourceBuilder)
{
this.resourceBuilder = resourceBuilder ?? throw new ArgumentNullException(nameof(resourceBuilder));
return this;
}
/// <summary>
/// Adds processor to the provider.
/// </summary>
/// <param name="processor">Activity processor to add.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
internal TracerProviderBuilder AddProcessor(BaseProcessor<Activity> processor)
{
if (processor == null)
{
throw new ArgumentNullException(nameof(processor));
}
this.processors.Add(processor);
return this;
}
/// <summary>
/// Adds a listener for <see cref="Activity"/> objects created with the given operation name to the <see cref="TracerProviderBuilder"/>.
/// </summary>
/// <remarks>
/// This is provided to capture legacy <see cref="Activity"/> objects created without using the <see cref="ActivitySource"/> API.
/// </remarks>
/// <param name="operationName">Operation name of the <see cref="Activity"/> objects to capture.</param>
/// <returns>Returns <see cref="TracerProviderBuilder"/> for chaining.</returns>
internal TracerProviderBuilder AddLegacySource(string operationName)
{
if (string.IsNullOrWhiteSpace(operationName))
{
throw new ArgumentException($"{nameof(operationName)} contains null or whitespace string.");
}
this.legacyActivityOperationNames[operationName] = true;
return this;
}
internal TracerProvider Build()
{
return new TracerProviderSdk(
this.resourceBuilder.Build(),
this.sources,
this.instrumentationFactories,
this.sampler,
this.processors,
this.legacyActivityOperationNames);
}
internal readonly struct InstrumentationFactory
{
public readonly string Name;
public readonly string Version;
public readonly Func<object> Factory;
internal InstrumentationFactory(string name, string version, Func<object> factory)
{
this.Name = name;
this.Version = version;
this.Factory = factory;
}
}
}
}
| 1 | 19,647 | This is good and something that I think opens a lot of possibilities. I think what would be even better is to change up `TracerProviderSdk` to take factories for the processors just like it already does for the instrumentations. Should probably use use a factory for the sampler. If we made those 2 changes then we could actually split this all up into the config and the build phase and we could use the factory methods in the hosting version to call into the IServiceProvider to get those instances. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -48,9 +48,14 @@ func (t *Table) Normalize() {
if len(t.Data) > 0 {
t.KeyValues[j] = t.Data[0][idx]
}
- v := values.New(t.KeyValues[j])
- if v.Type() == semantic.Invalid {
- panic(fmt.Errorf("invalid value: %s", t.KeyValues[j]))
+ var v values.Value
+ if t.KeyValues[j] == nil {
+ v = values.NewNull(flux.SemanticType(t.ColMeta[idx].Type))
+ } else {
+ v = values.New(t.KeyValues[j])
+ if v.Type() == semantic.Invalid {
+ panic(fmt.Errorf("invalid value: %s", t.KeyValues[j]))
+ }
}
vs[j] = v
} | 1 | package executetest
import (
"fmt"
"github.com/apache/arrow/go/arrow/array"
"github.com/influxdata/flux"
"github.com/influxdata/flux/arrow"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
)
// Table is an implementation of execute.Table
// It is designed to make it easy to statically declare the data within the table.
// Not all fields need to be set. See comments on each field.
// Use Normalize to ensure that all fields are set before equality comparisons.
type Table struct {
// GroupKey of the table. Does not need to be set explicitly.
GroupKey flux.GroupKey
// KeyCols is a list of column that are part of the group key.
// The column type is deduced from the ColMeta slice.
KeyCols []string
// KeyValues is a list of values for the group key columns.
// Only needs to be set when no data is present on the table.
KeyValues []interface{}
// ColMeta is a list of columns of the table.
ColMeta []flux.ColMeta
// Data is a list of rows, i.e. Data[row][col]
// Each row must be a list with length equal to len(ColMeta)
Data [][]interface{}
}
// Normalize ensures all fields of the table are set correctly.
func (t *Table) Normalize() {
if t.GroupKey == nil {
cols := make([]flux.ColMeta, len(t.KeyCols))
vs := make([]values.Value, len(t.KeyCols))
if len(t.KeyValues) != len(t.KeyCols) {
t.KeyValues = make([]interface{}, len(t.KeyCols))
}
for j, label := range t.KeyCols {
idx := execute.ColIdx(label, t.ColMeta)
if idx < 0 {
panic(fmt.Errorf("table invalid: missing group column %q", label))
}
cols[j] = t.ColMeta[idx]
if len(t.Data) > 0 {
t.KeyValues[j] = t.Data[0][idx]
}
v := values.New(t.KeyValues[j])
if v.Type() == semantic.Invalid {
panic(fmt.Errorf("invalid value: %s", t.KeyValues[j]))
}
vs[j] = v
}
t.GroupKey = execute.NewGroupKey(cols, vs)
}
}
func (t *Table) Empty() bool {
return len(t.Data) == 0
}
func (t *Table) RefCount(n int) {}
func (t *Table) Cols() []flux.ColMeta {
return t.ColMeta
}
func (t *Table) Key() flux.GroupKey {
t.Normalize()
return t.GroupKey
}
func (t *Table) Do(f func(flux.ColReader) error) error {
for _, r := range t.Data {
if err := f(ColReader{
key: t.Key(),
cols: t.ColMeta,
row: r,
}); err != nil {
return err
}
}
return nil
}
// RowWiseArrowTable is a flux Table implementation that
// calls f once for each row in its DoArrow method.
type RowWiseArrowTable struct {
*Table
}
// DoArrow calls f once for each row in the table
func (t *RowWiseArrowTable) DoArrow(f func(flux.ArrowColReader) error) error {
cols := make([]array.Interface, len(t.ColMeta))
for j, col := range t.ColMeta {
switch col.Type {
case flux.TBool:
b := arrow.NewBoolBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(v.(bool))
} else {
b.AppendNull()
}
}
cols[j] = b.NewBooleanArray()
b.Release()
case flux.TFloat:
b := arrow.NewFloatBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(v.(float64))
} else {
b.AppendNull()
}
}
cols[j] = b.NewFloat64Array()
b.Release()
case flux.TInt:
b := arrow.NewIntBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(v.(int64))
} else {
b.AppendNull()
}
}
cols[j] = b.NewInt64Array()
b.Release()
case flux.TString:
b := arrow.NewStringBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.AppendString(v.(string))
} else {
b.AppendNull()
}
}
cols[j] = b.NewBinaryArray()
b.Release()
case flux.TTime:
b := arrow.NewIntBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(int64(v.(values.Time)))
} else {
b.AppendNull()
}
}
cols[j] = b.NewInt64Array()
b.Release()
case flux.TUInt:
b := arrow.NewUintBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(v.(uint64))
} else {
b.AppendNull()
}
}
cols[j] = b.NewUint64Array()
b.Release()
}
}
release := func(cols []array.Interface) {
for _, arr := range cols {
arr.Release()
}
}
defer release(cols)
l := cols[0].Len()
for i := 0; i < l; i++ {
row := make([]array.Interface, len(t.ColMeta))
for j, col := range t.ColMeta {
switch col.Type {
case flux.TBool:
row[j] = arrow.BoolSlice(cols[j].(*array.Boolean), i, i+1)
case flux.TFloat:
row[j] = arrow.FloatSlice(cols[j].(*array.Float64), i, i+1)
case flux.TInt:
row[j] = arrow.IntSlice(cols[j].(*array.Int64), i, i+1)
case flux.TString:
row[j] = arrow.StringSlice(cols[j].(*array.Binary), i, i+1)
case flux.TTime:
row[j] = arrow.IntSlice(cols[j].(*array.Int64), i, i+1)
case flux.TUInt:
row[j] = arrow.UintSlice(cols[j].(*array.Uint64), i, i+1)
}
}
if err := f(&ArrowColReader{
key: t.Key(),
meta: t.ColMeta,
cols: row,
}); err != nil {
return err
}
release(row)
}
return nil
}
func (t *Table) DoArrow(f func(flux.ArrowColReader) error) error {
cols := make([]array.Interface, len(t.ColMeta))
for j, col := range t.ColMeta {
switch col.Type {
case flux.TBool:
b := arrow.NewBoolBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(v.(bool))
} else {
b.AppendNull()
}
}
cols[j] = b.NewBooleanArray()
b.Release()
case flux.TFloat:
b := arrow.NewFloatBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(v.(float64))
} else {
b.AppendNull()
}
}
cols[j] = b.NewFloat64Array()
b.Release()
case flux.TInt:
b := arrow.NewIntBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(v.(int64))
} else {
b.AppendNull()
}
}
cols[j] = b.NewInt64Array()
b.Release()
case flux.TString:
b := arrow.NewStringBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.AppendString(v.(string))
} else {
b.AppendNull()
}
}
cols[j] = b.NewBinaryArray()
b.Release()
case flux.TTime:
b := arrow.NewIntBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(int64(v.(values.Time)))
} else {
b.AppendNull()
}
}
cols[j] = b.NewInt64Array()
b.Release()
case flux.TUInt:
b := arrow.NewUintBuilder(nil)
for i := range t.Data {
if v := t.Data[i][j]; v != nil {
b.Append(v.(uint64))
} else {
b.AppendNull()
}
}
cols[j] = b.NewUint64Array()
b.Release()
}
}
cr := &ArrowColReader{
key: t.Key(),
meta: t.ColMeta,
cols: cols,
}
return f(cr)
}
func (t *Table) Statistics() flux.Statistics { return flux.Statistics{} }
type ColReader struct {
key flux.GroupKey
cols []flux.ColMeta
row []interface{}
}
func (cr ColReader) Cols() []flux.ColMeta {
return cr.cols
}
func (cr ColReader) Key() flux.GroupKey {
return cr.key
}
func (cr ColReader) Len() int {
return 1
}
func (cr ColReader) Bools(j int) []bool {
return []bool{cr.row[j].(bool)}
}
func (cr ColReader) Ints(j int) []int64 {
return []int64{cr.row[j].(int64)}
}
func (cr ColReader) UInts(j int) []uint64 {
return []uint64{cr.row[j].(uint64)}
}
func (cr ColReader) Floats(j int) []float64 {
return []float64{cr.row[j].(float64)}
}
func (cr ColReader) Strings(j int) []string {
return []string{cr.row[j].(string)}
}
func (cr ColReader) Times(j int) []execute.Time {
return []execute.Time{cr.row[j].(execute.Time)}
}
type ArrowColReader struct {
key flux.GroupKey
meta []flux.ColMeta
cols []array.Interface
}
func (cr *ArrowColReader) Key() flux.GroupKey {
return cr.key
}
func (cr *ArrowColReader) Cols() []flux.ColMeta {
return cr.meta
}
func (cr *ArrowColReader) Len() int {
if len(cr.cols) == 0 {
return 0
}
return cr.cols[0].Len()
}
func (cr *ArrowColReader) Bools(j int) *array.Boolean {
return cr.cols[j].(*array.Boolean)
}
func (cr *ArrowColReader) Ints(j int) *array.Int64 {
return cr.cols[j].(*array.Int64)
}
func (cr *ArrowColReader) UInts(j int) *array.Uint64 {
return cr.cols[j].(*array.Uint64)
}
func (cr *ArrowColReader) Floats(j int) *array.Float64 {
return cr.cols[j].(*array.Float64)
}
func (cr *ArrowColReader) Strings(j int) *array.Binary {
return cr.cols[j].(*array.Binary)
}
func (cr *ArrowColReader) Times(j int) *array.Int64 {
return cr.cols[j].(*array.Int64)
}
func TablesFromCache(c execute.DataCache) (tables []*Table, err error) {
c.ForEach(func(key flux.GroupKey) {
if err != nil {
return
}
var tbl flux.Table
tbl, err = c.Table(key)
if err != nil {
return
}
var cb *Table
cb, err = ConvertTable(tbl)
if err != nil {
return
}
tables = append(tables, cb)
c.ExpireTable(key)
})
return tables, nil
}
func ConvertTable(tbl flux.Table) (*Table, error) {
key := tbl.Key()
blk := &Table{
GroupKey: key,
ColMeta: tbl.Cols(),
}
keyCols := key.Cols()
if len(keyCols) > 0 {
blk.KeyCols = make([]string, len(keyCols))
blk.KeyValues = make([]interface{}, len(keyCols))
for j, c := range keyCols {
blk.KeyCols[j] = c.Label
var v interface{}
switch c.Type {
case flux.TBool:
v = key.ValueBool(j)
case flux.TUInt:
v = key.ValueUInt(j)
case flux.TInt:
v = key.ValueInt(j)
case flux.TFloat:
v = key.ValueFloat(j)
case flux.TString:
v = key.ValueString(j)
case flux.TTime:
v = key.ValueTime(j)
default:
return nil, fmt.Errorf("unsupported column type %v", c.Type)
}
blk.KeyValues[j] = v
}
}
err := tbl.DoArrow(func(cr flux.ArrowColReader) error {
l := cr.Len()
for i := 0; i < l; i++ {
row := make([]interface{}, len(blk.ColMeta))
for j, c := range blk.ColMeta {
switch c.Type {
case flux.TBool:
if col := cr.Bools(j); col.IsValid(i) {
row[j] = col.Value(i)
}
case flux.TInt:
if col := cr.Ints(j); col.IsValid(i) {
row[j] = col.Value(i)
}
case flux.TUInt:
if col := cr.UInts(j); col.IsValid(i) {
row[j] = col.Value(i)
}
case flux.TFloat:
if col := cr.Floats(j); col.IsValid(i) {
row[j] = col.Value(i)
}
case flux.TString:
if col := cr.Strings(j); col.IsValid(i) {
row[j] = col.ValueString(i)
}
case flux.TTime:
if col := cr.Times(j); col.IsValid(i) {
row[j] = values.Time(col.Value(i))
}
default:
panic(fmt.Errorf("unknown column type %s", c.Type))
}
}
blk.Data = append(blk.Data, row)
}
return nil
})
if err != nil {
return nil, err
}
return blk, nil
}
type SortedTables []*Table
func (b SortedTables) Len() int {
return len(b)
}
func (b SortedTables) Less(i int, j int) bool {
return b[i].Key().Less(b[j].Key())
}
func (b SortedTables) Swap(i int, j int) {
b[i], b[j] = b[j], b[i]
}
// NormalizeTables ensures that each table is normalized
func NormalizeTables(bs []*Table) {
for _, b := range bs {
b.Key()
}
}
func MustCopyTable(tbl flux.Table) flux.Table {
cpy, _ := execute.CopyTable(tbl, UnlimitedAllocator)
return cpy
}
| 1 | 9,475 | Is there a way to make it so `values.New(nil)` works instead of adding a new function? | influxdata-flux | go |
@@ -85,7 +85,11 @@ class ApiMediaType extends AbstractType
*/
public function getParent()
{
- return 'sonata_media_api_form_doctrine_media';
+ // NEXT_MAJOR: Return 'Sonata\MediaBundle\Form\Type\ApiDoctrineMediaType'
+ // (when requirement of Symfony is >= 2.8)
+ return method_exists('Symfony\Component\Form\AbstractType', 'getBlockPrefix')
+ ? 'Sonata\MediaBundle\Form\Type\ApiDoctrineMediaType'
+ : 'sonata_media_api_form_doctrine_media';
}
/** | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Form\Type;
use Sonata\MediaBundle\Form\DataTransformer\ProviderDataTransformer;
use Sonata\MediaBundle\Provider\Pool;
use Symfony\Component\Form\AbstractType;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\OptionsResolver\OptionsResolver;
use Symfony\Component\OptionsResolver\OptionsResolverInterface;
/**
* Class ApiMediaType.
*
*
* @author Hugo Briand <[email protected]>
*/
class ApiMediaType extends AbstractType
{
/**
* @var Pool
*/
protected $mediaPool;
/**
* @var string
*/
protected $class;
/**
* @param Pool $mediaPool
* @param string $class
*/
public function __construct(Pool $mediaPool, $class)
{
$this->mediaPool = $mediaPool;
$this->class = $class;
}
/**
* {@inheritdoc}
*/
public function buildForm(FormBuilderInterface $builder, array $options)
{
$builder->addModelTransformer(new ProviderDataTransformer($this->mediaPool, $this->class, array(
'empty_on_new' => false,
)), true);
$provider = $this->mediaPool->getProvider($options['provider_name']);
$provider->buildMediaType($builder);
}
/**
* {@inheritdoc}
*
* @deprecated Remove it when bumping requirements to Symfony >=2.7
*/
public function setDefaultOptions(OptionsResolverInterface $resolver)
{
$this->configureOptions($resolver);
}
/**
* {@inheritdoc}
*/
public function configureOptions(OptionsResolver $resolver)
{
$resolver->setDefaults(array(
'provider_name' => 'sonata.media.provider.image',
'context' => 'api',
));
}
/**
* {@inheritdoc}
*/
public function getParent()
{
return 'sonata_media_api_form_doctrine_media';
}
/**
* {@inheritdoc}
*/
public function getBlockPrefix()
{
return 'sonata_media_api_form_media';
}
/**
* {@inheritdoc}
*/
public function getName()
{
return $this->getBlockPrefix();
}
}
| 1 | 8,217 | > when requirement of Symfony **will be** >= 2.8 Same for others. | sonata-project-SonataMediaBundle | php |
@@ -104,7 +104,8 @@ PROJECT_IAM_ROLES_SERVER = [
'roles/storage.objectViewer',
'roles/storage.objectCreator',
'roles/cloudsql.client',
- 'roles/logging.logWriter'
+ 'roles/logging.logWriter',
+ 'roles/iam.serviceAccountTokenCreator'
]
PROJECT_IAM_ROLES_CLIENT = [ | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used for the setup of Forseti."""
import os
from enum import Enum
class FirewallRuleAction(Enum):
"""Firewall rule action object."""
ALLOW = 'ALLOW'
DENY = 'DENY'
class FirewallRuleDirection(Enum):
"""Firewall rule direction object."""
INGRESS = 'INGRESS'
EGRESS = 'EGRESS'
class DeploymentStatus(Enum):
"""Deployment status."""
RUNNING = 'RUNNING'
DONE = 'DONE'
MAXIMUM_LOADING_TIME_IN_SECONDS = 600
DEFAULT_BUCKET_FMT_V1 = 'gs://{}-data-{}'
DEFAULT_BUCKET_FMT_V2 = 'gs://forseti-{}-{}'
REGEX_MATCH_FORSETI_V1_INSTANCE_NAME = r'^forseti-security-\d+-vm$'
FORSETI_V1_RULE_FILES = [
'bigquery_rules.yaml',
'blacklist_rules.yaml',
'bucket_rules.yaml',
'cloudsql_rules.yaml',
'firewall_rules.yaml',
'forwarding_rules.yaml',
'group_rules.yaml',
'iam_rules.yaml',
'iap_rules.yaml',
'instance_network_interface_rules.yaml',
'ke_rules.yaml',
'gke_rules.yaml']
GCLOUD_MIN_VERSION = (180, 0, 0)
GCLOUD_VERSION_REGEX = r'Google Cloud SDK (.*)'
GCLOUD_ALPHA_REGEX = r'alpha.*'
SERVICE_ACCT_NAME_FMT = 'forseti-{}-{}-{}'
SERVICE_ACCT_EMAIL_FMT = '{}@{}.iam.gserviceaccount.com'
INPUT_DEPLOYMENT_TEMPLATE_FILENAME = {
'server': 'deploy-forseti-server.yaml.in',
'client': 'deploy-forseti-client.yaml.in'
}
INPUT_CONFIGURATION_TEMPLATE_FILENAME = {
'server': 'forseti_conf_server.yaml.in',
'client': 'forseti_conf_client.yaml.in'
}
NOTIFICATION_SENDER_EMAIL = '[email protected]'
RESOURCE_TYPE_ARGS_MAP = {
'organizations': ['organizations'],
'folders': ['alpha', 'resource-manager', 'folders'],
'projects': ['projects'],
'forseti_project': ['projects'],
'service_accounts': ['iam', 'service-accounts']
}
# Roles
GCP_READ_IAM_ROLES = [
'roles/browser',
'roles/compute.networkViewer',
'roles/iam.securityReviewer',
'roles/appengine.appViewer',
'roles/bigquery.dataViewer',
'roles/servicemanagement.quotaViewer',
'roles/serviceusage.serviceUsageConsumer',
'roles/cloudsql.viewer'
]
GCP_WRITE_IAM_ROLES = [
'roles/compute.securityAdmin'
]
PROJECT_IAM_ROLES_SERVER = [
'roles/storage.objectViewer',
'roles/storage.objectCreator',
'roles/cloudsql.client',
'roles/logging.logWriter'
]
PROJECT_IAM_ROLES_CLIENT = [
'roles/storage.objectViewer',
'roles/logging.logWriter'
]
SVC_ACCT_ROLES = [
'roles/iam.serviceAccountKeyAdmin'
]
# Required APIs
REQUIRED_APIS = [
{'name': 'Admin SDK',
'service': 'admin.googleapis.com'},
{'name': 'AppEngine Admin',
'service': 'appengine.googleapis.com'},
{'name': 'BigQuery',
'service': 'bigquery-json.googleapis.com'},
{'name': 'Cloud Billing',
'service': 'cloudbilling.googleapis.com'},
{'name': 'Cloud Resource Manager',
'service': 'cloudresourcemanager.googleapis.com'},
{'name': 'Cloud SQL',
'service': 'sql-component.googleapis.com'},
{'name': 'Cloud SQL Admin',
'service': 'sqladmin.googleapis.com'},
{'name': 'Compute Engine',
'service': 'compute.googleapis.com'},
{'name': 'Deployment Manager',
'service': 'deploymentmanager.googleapis.com'},
{'name': 'IAM',
'service': 'iam.googleapis.com'}
]
# Org Resource Types
RESOURCE_TYPES = ['organization', 'folder', 'project']
# Paths
ROOT_DIR_PATH = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__)))))
RULES_DIR_PATH = os.path.abspath(
os.path.join(
ROOT_DIR_PATH, 'rules'))
FORSETI_SRC_PATH = os.path.join(
ROOT_DIR_PATH, 'google', 'cloud', 'forseti')
FORSETI_CONF_PATH = ('{bucket_name}/configs/'
'forseti_conf_{installation_type}.yaml')
DEPLOYMENT_TEMPLATE_OUTPUT_PATH = '{}/deployment_templates/'
VERSIONFILE_REGEX = r'__version__ = \'(.*)\''
# Message templates
MESSAGE_GSUITE_DATA_COLLECTION = (
'To complete setup for G Suite Groups data collection, '
'follow the steps here:\n\n '
'https://forsetisecurity.org/docs/howto'
'/configure/gsuite-group-collection.html\n')
MESSAGE_SKIP_EMAIL = (
'If you would like to enable email notifications via '
'SendGrid, please refer to:\n\n'
' '
'http://forsetisecurity.org/docs/howto/configure/'
'email-notification.html\n\n')
MESSAGE_HAS_ROLE_SCRIPT = (
'Some roles could not be assigned to {} where you want '
'to grant Forseti access. A script `grant_forseti_roles.sh` '
'has been generated with the necessary commands to assign '
'those roles. Please run this script to assign the Forseti '
'roles so that Forseti will work properly.\n\n')
MESSAGE_ENABLE_GSUITE_GROUP_INSTRUCTIONS = (
'IMPORTANT NOTE\n'
'Your Forseti Security Installation will not work until '
'you enable GSuite data collection:\n'
'https://forsetisecurity.org/docs/howto/configure/gsuite'
'-group-collection.html\n')
MESSAGE_FORSETI_CONFIGURATION_INSTRUCTIONS = (
'For instructions on how to change your roles or configuration files:\n'
'http://forsetisecurity.org/docs/howto/deploy/gcp-deployment.html#move'
'-configuration-to-gcs')
MESSAGE_FORSETI_SENDGRID_INSTRUCTIONS = (
'If you would like to enable email notifications via SendGrid,'
' please refer to:\n'
'http://forsetisecurity.org/docs/howto/configure/email-notification.html\n'
)
MESSAGE_ASK_GSUITE_SUPERADMIN_EMAIL = (
'To read G Suite Groups and Users data, '
'please provide a G Suite super admin email address. '
'This step is NOT optional.')
MESSAGE_ASK_SENDGRID_API_KEY = (
'Forseti can send email notifications through SendGrid '
'API Key')
MESSAGE_FORSETI_CONFIGURATION_ACCESS_LEVEL = (
'Forseti can be configured to access an '
'organization, folder, or project.')
MESSAGE_NO_CLOUD_SHELL = (
'Forseti highly recommends running this setup within '
'Cloud Shell. If you would like to run the setup '
'outside Cloud Shell, please be sure to do the '
'following:\n\n'
'1) Create a project.\n'
'2) Enable billing for the project.\n'
'3) Install gcloud and authenticate your account using '
'"gcloud auth login".\n'
'4) Set your project using '
'"gcloud config project set <PROJECT_ID>".\n'
'5) Run this setup again, with the --no-cloudshell flag, '
'i.e.\n\n\tpython setup/installer.py --no-cloudshell\n')
MESSAGE_FORSETI_CONFIGURATION_GENERATED = (
'Forseti configuration file(s) has been generated.\n\n'
'{forseti_config_file_paths}\n\n')
MESSAGE_FORSETI_CONFIGURATION_GENERATED_DRY_RUN = (
'A Forseti configuration file has been generated. '
'After you create your deployment, copy this file to '
'the bucket created in the deployment:\n\n'
' gsutil cp {} {}/configs/forseti_conf_server.yaml\n\n')
MESSAGE_DEPLOYMENT_HAD_ISSUES = (
'Your deployment had some issues. Please review the error '
'messages. If you need help, please either file an issue '
'on our Github Issues or email '
'[email protected].\n')
MESSAGE_FORSETI_BRANCH_DEPLOYED = (
'Forseti (branch/version: {}) has been deployed to GCP.\n\n')
MESSAGE_DEPLOYMENT_TEMPLATE_LOCATION = (
'Your generated Deployment Manager template(s) can be '
'found here:\n\n{deployment_template_gcs_paths}\n\n')
MESSAGE_VIEW_DEPLOYMENT_DETAILS = (
'You can view the details of your deployment in the '
'Cloud Console:\n\n '
'https://console.cloud.google.com/deployments/details/'
'{}?project={}&organizationId={}\n\n')
MESSAGE_GCLOUD_VERSION_MISMATCH = (
'You need the following gcloud setup:\n\n'
'gcloud version >= {}\n'
'gcloud alpha components\n\n'
'To install gcloud alpha components: '
'gcloud components install alpha\n\n'
'To update gcloud: gcloud components update\n')
MESSAGE_CREATE_ROLE_SCRIPT = (
'One or more roles could not be assigned. Writing a '
'script with the commands to assign those roles. Please '
'give this script to someone (like an admin) who can '
'assign these roles for you. If you do not assign these '
'roles, Forseti may not work properly!')
MESSAGE_BILLING_NOT_ENABLED = (
'\nIt seems that billing is not enabled for your project. '
'You can check whether billing has been enabled in the '
'Cloud Platform Console:\n\n'
' https://console.cloud.google.com/billing/linkedaccount?'
'project={}&organizationId={}\n\n'
'Once you have enabled billing, re-run this setup.\n')
MESSAGE_NO_ORGANIZATION = (
'You need to have an organization set up to use Forseti. '
'Refer to the following documentation for more information.\n\n'
'https://cloud.google.com/resource-manager/docs/'
'creating-managing-organization')
MESSAGE_RUN_FREQUENCY = (
'Forseti will run once every 12 hours, you can configure the run '
'frequency in the server deployment template field "run-frequency" '
'and update the deployment using the deployment manager.')
# Questions templates
QUESTION_ENABLE_WRITE_ACCESS = (
'Enable write access for Forseti? '
'This allows Forseti to make changes to policies '
'(e.g. for Enforcer) (y/n): ')
QUESTION_GSUITE_SUPERADMIN_EMAIL = (
'Email: ')
QUESTION_SENDGRID_API_KEY = (
'What is your SendGrid API key? '
'(press [enter] to skip): ')
QUESTION_NOTIFICATION_RECIPIENT_EMAIL = (
'At what email address do you want to receive '
'notifications? (press [enter] to skip): ')
QUESTION_FORSETI_CONFIGURATION_ACCESS_LEVEL = (
'At what level do you want to enable Forseti '
'read (and optionally write) access?: ')
QUESTION_ACCESS_TO_GRANT_ROLES = (
'Do you have access to grant Forseti IAM '
'roles on the target {}? (y/n): ')
QUESTION_CHOOSE_FOLDER = (
'To find the folder, go to Cloud Console:\n\n'
'\thttps://console.cloud.google.com/'
'cloud-resource-manager?organizationId={}\n\n'
'Enter the folder id where you want '
'Forseti to crawl for data: ')
QUESTION_SHOULD_MIGRATE_FROM_V1 = (
'Forseti v1 detected, would you like to migrate the '
'existing configurations to v2? (y/n): '
)
| 1 | 30,112 | I'd like to see if this can just be set on the service account instead of the project. Using the SVC_ACCT_ROLES (which should otherwise be deleted as I don't think anything else is using it.) | forseti-security-forseti-security | py |
@@ -7,6 +7,7 @@ import android.content.Intent;
import android.content.SharedPreferences;
import android.content.res.Configuration;
import android.database.DataSetObserver;
+import android.graphics.Color;
import android.os.Build;
import android.os.Bundle;
import android.os.Handler; | 1 | package de.danoeh.antennapod.activity;
import android.annotation.TargetApi;
import android.app.ProgressDialog;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.res.Configuration;
import android.database.DataSetObserver;
import android.os.Build;
import android.os.Bundle;
import android.os.Handler;
import android.support.design.widget.Snackbar;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentTransaction;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.ActionBarDrawerToggle;
import android.support.v7.app.AlertDialog;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.util.TypedValue;
import android.view.ContextMenu;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.widget.AdapterView;
import android.widget.ListView;
import com.bumptech.glide.Glide;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.Validate;
import java.util.List;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.adapter.NavListAdapter;
import de.danoeh.antennapod.core.asynctask.FeedRemover;
import de.danoeh.antennapod.core.dialog.ConfirmationDialog;
import de.danoeh.antennapod.core.event.MessageEvent;
import de.danoeh.antennapod.core.event.ProgressEvent;
import de.danoeh.antennapod.core.event.QueueEvent;
import de.danoeh.antennapod.core.feed.EventDistributor;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.preferences.PlaybackPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.playback.PlaybackService;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DBTasks;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.util.FeedItemUtil;
import de.danoeh.antennapod.core.util.Flavors;
import de.danoeh.antennapod.core.util.StorageUtils;
import de.danoeh.antennapod.dialog.RatingDialog;
import de.danoeh.antennapod.dialog.RenameFeedDialog;
import de.danoeh.antennapod.fragment.AddFeedFragment;
import de.danoeh.antennapod.fragment.DownloadsFragment;
import de.danoeh.antennapod.fragment.EpisodesFragment;
import de.danoeh.antennapod.fragment.ExternalPlayerFragment;
import de.danoeh.antennapod.fragment.ItemlistFragment;
import de.danoeh.antennapod.fragment.PlaybackHistoryFragment;
import de.danoeh.antennapod.fragment.QueueFragment;
import de.danoeh.antennapod.fragment.SubscriptionFragment;
import de.danoeh.antennapod.menuhandler.NavDrawerActivity;
import de.greenrobot.event.EventBus;
import rx.Observable;
import rx.Subscription;
import rx.android.schedulers.AndroidSchedulers;
import rx.schedulers.Schedulers;
/**
* The activity that is shown when the user launches the app.
*/
public class MainActivity extends CastEnabledActivity implements NavDrawerActivity {
private static final String TAG = "MainActivity";
private static final int EVENTS = EventDistributor.FEED_LIST_UPDATE
| EventDistributor.UNREAD_ITEMS_UPDATE;
public static final String PREF_NAME = "MainActivityPrefs";
public static final String PREF_IS_FIRST_LAUNCH = "prefMainActivityIsFirstLaunch";
private static final String PREF_LAST_FRAGMENT_TAG = "prefMainActivityLastFragmentTag";
public static final String EXTRA_NAV_TYPE = "nav_type";
public static final String EXTRA_NAV_INDEX = "nav_index";
public static final String EXTRA_FRAGMENT_TAG = "fragment_tag";
public static final String EXTRA_FRAGMENT_ARGS = "fragment_args";
public static final String EXTRA_FEED_ID = "fragment_feed_id";
private static final String SAVE_BACKSTACK_COUNT = "backstackCount";
private static final String SAVE_TITLE = "title";
public static final String[] NAV_DRAWER_TAGS = {
QueueFragment.TAG,
EpisodesFragment.TAG,
SubscriptionFragment.TAG,
DownloadsFragment.TAG,
PlaybackHistoryFragment.TAG,
AddFeedFragment.TAG,
NavListAdapter.SUBSCRIPTION_LIST_TAG
};
private Toolbar toolbar;
private ExternalPlayerFragment externalPlayerFragment;
private DrawerLayout drawerLayout;
private View navDrawer;
private ListView navList;
private NavListAdapter navAdapter;
private int mPosition = -1;
private ActionBarDrawerToggle drawerToggle;
private CharSequence currentTitle;
private ProgressDialog pd;
private Subscription subscription;
@Override
public void onCreate(Bundle savedInstanceState) {
setTheme(UserPreferences.getNoTitleTheme());
super.onCreate(savedInstanceState);
StorageUtils.checkStorageAvailability(this);
setContentView(R.layout.main);
toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
findViewById(R.id.shadow).setVisibility(View.GONE);
int elevation = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, 4,
getResources().getDisplayMetrics());
getSupportActionBar().setElevation(elevation);
}
currentTitle = getTitle();
drawerLayout = (DrawerLayout) findViewById(R.id.drawer_layout);
navList = (ListView) findViewById(R.id.nav_list);
navDrawer = findViewById(R.id.nav_layout);
drawerToggle = new ActionBarDrawerToggle(this, drawerLayout, R.string.drawer_open, R.string.drawer_close);
if (savedInstanceState != null) {
int backstackCount = savedInstanceState.getInt(SAVE_BACKSTACK_COUNT, 0);
drawerToggle.setDrawerIndicatorEnabled(backstackCount == 0);
}
drawerLayout.setDrawerListener(drawerToggle);
final FragmentManager fm = getSupportFragmentManager();
fm.addOnBackStackChangedListener(() -> drawerToggle.setDrawerIndicatorEnabled(fm.getBackStackEntryCount() == 0));
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
getSupportActionBar().setHomeButtonEnabled(true);
navAdapter = new NavListAdapter(itemAccess, this);
navList.setAdapter(navAdapter);
navList.setOnItemClickListener(navListClickListener);
navList.setOnItemLongClickListener(newListLongClickListener);
registerForContextMenu(navList);
navAdapter.registerDataSetObserver(new DataSetObserver() {
@Override
public void onChanged() {
selectedNavListIndex = getSelectedNavListIndex();
}
});
findViewById(R.id.nav_settings).setOnClickListener(v -> {
drawerLayout.closeDrawer(navDrawer);
startActivity(new Intent(MainActivity.this, PreferenceActivity.class));
});
FragmentTransaction transaction = fm.beginTransaction();
Fragment mainFragment = fm.findFragmentByTag("main");
if (mainFragment != null) {
transaction.replace(R.id.main_view, mainFragment);
} else {
String lastFragment = getLastNavFragment();
if (ArrayUtils.contains(NAV_DRAWER_TAGS, lastFragment)) {
loadFragment(lastFragment, null);
} else {
try {
loadFeedFragmentById(Integer.parseInt(lastFragment), null);
} catch (NumberFormatException e) {
// it's not a number, this happens if we removed
// a label from the NAV_DRAWER_TAGS
// give them a nice default...
loadFragment(QueueFragment.TAG, null);
}
}
}
externalPlayerFragment = new ExternalPlayerFragment();
transaction.replace(R.id.playerFragment, externalPlayerFragment, ExternalPlayerFragment.TAG);
transaction.commit();
checkFirstLaunch();
}
private void saveLastNavFragment(String tag) {
Log.d(TAG, "saveLastNavFragment(tag: " + tag +")");
SharedPreferences prefs = getSharedPreferences(PREF_NAME, MODE_PRIVATE);
SharedPreferences.Editor edit = prefs.edit();
if(tag != null) {
edit.putString(PREF_LAST_FRAGMENT_TAG, tag);
} else {
edit.remove(PREF_LAST_FRAGMENT_TAG);
}
edit.apply();
}
private String getLastNavFragment() {
SharedPreferences prefs = getSharedPreferences(PREF_NAME, MODE_PRIVATE);
String lastFragment = prefs.getString(PREF_LAST_FRAGMENT_TAG, QueueFragment.TAG);
Log.d(TAG, "getLastNavFragment() -> " + lastFragment);
return lastFragment;
}
private void checkFirstLaunch() {
SharedPreferences prefs = getSharedPreferences(PREF_NAME, MODE_PRIVATE);
if (prefs.getBoolean(PREF_IS_FIRST_LAUNCH, true)) {
new Handler().postDelayed(() -> drawerLayout.openDrawer(navDrawer), 1500);
// for backward compatibility, we only change defaults for fresh installs
UserPreferences.setUpdateInterval(12);
SharedPreferences.Editor edit = prefs.edit();
edit.putBoolean(PREF_IS_FIRST_LAUNCH, false);
edit.commit();
}
}
private void showDrawerPreferencesDialog() {
final List<String> hiddenDrawerItems = UserPreferences.getHiddenDrawerItems();
String[] navLabels = new String[NAV_DRAWER_TAGS.length];
final boolean[] checked = new boolean[NAV_DRAWER_TAGS.length];
for (int i = 0; i < NAV_DRAWER_TAGS.length; i++) {
String tag = NAV_DRAWER_TAGS[i];
navLabels[i] = navAdapter.getLabel(tag);
if (!hiddenDrawerItems.contains(tag)) {
checked[i] = true;
}
}
AlertDialog.Builder builder = new AlertDialog.Builder(MainActivity.this);
builder.setTitle(R.string.drawer_preferences);
builder.setMultiChoiceItems(navLabels, checked, (dialog, which, isChecked) -> {
if (isChecked) {
hiddenDrawerItems.remove(NAV_DRAWER_TAGS[which]);
} else {
hiddenDrawerItems.add(NAV_DRAWER_TAGS[which]);
}
});
builder.setPositiveButton(R.string.confirm_label, (dialog, which) -> UserPreferences.setHiddenDrawerItems(hiddenDrawerItems));
builder.setNegativeButton(R.string.cancel_label, null);
builder.create().show();
}
public boolean isDrawerOpen() {
return drawerLayout != null && navDrawer != null && drawerLayout.isDrawerOpen(navDrawer);
}
public List<Feed> getFeeds() {
return (navDrawerData != null) ? navDrawerData.feeds : null;
}
private void loadFragment(int index, Bundle args) {
Log.d(TAG, "loadFragment(index: " + index + ", args: " + args + ")");
if (index < navAdapter.getSubscriptionOffset()) {
String tag = navAdapter.getTags().get(index);
loadFragment(tag, args);
} else {
int pos = index - navAdapter.getSubscriptionOffset();
loadFeedFragmentByPosition(pos, args);
}
}
public void loadFragment(String tag, Bundle args) {
Log.d(TAG, "loadFragment(tag: " + tag + ", args: " + args + ")");
Fragment fragment = null;
switch (tag) {
case QueueFragment.TAG:
fragment = new QueueFragment();
break;
case EpisodesFragment.TAG:
fragment = new EpisodesFragment();
break;
case DownloadsFragment.TAG:
fragment = new DownloadsFragment();
break;
case PlaybackHistoryFragment.TAG:
fragment = new PlaybackHistoryFragment();
break;
case AddFeedFragment.TAG:
fragment = new AddFeedFragment();
break;
case SubscriptionFragment.TAG:
SubscriptionFragment subscriptionFragment = new SubscriptionFragment();
fragment = subscriptionFragment;
break;
default:
// default to the queue
tag = QueueFragment.TAG;
fragment = new QueueFragment();
args = null;
break;
}
currentTitle = navAdapter.getLabel(tag);
getSupportActionBar().setTitle(currentTitle);
saveLastNavFragment(tag);
if (args != null) {
fragment.setArguments(args);
}
loadFragment(fragment);
}
private void loadFeedFragmentByPosition(int relPos, Bundle args) {
if(relPos < 0) {
return;
}
Feed feed = itemAccess.getItem(relPos);
loadFeedFragmentById(feed.getId(), args);
}
public void loadFeedFragmentById(long feedId, Bundle args) {
Fragment fragment = ItemlistFragment.newInstance(feedId);
if(args != null) {
fragment.setArguments(args);
}
saveLastNavFragment(String.valueOf(feedId));
currentTitle = "";
getSupportActionBar().setTitle(currentTitle);
loadFragment(fragment);
}
private void loadFragment(Fragment fragment) {
FragmentManager fragmentManager = getSupportFragmentManager();
// clear back stack
for (int i = 0; i < fragmentManager.getBackStackEntryCount(); i++) {
fragmentManager.popBackStack();
}
FragmentTransaction t = fragmentManager.beginTransaction();
t.replace(R.id.main_view, fragment, "main");
fragmentManager.popBackStack();
// TODO: we have to allow state loss here
// since this function can get called from an AsyncTask which
// could be finishing after our app has already committed state
// and is about to get shutdown. What we *should* do is
// not commit anything in an AsyncTask, but that's a bigger
// change than we want now.
t.commitAllowingStateLoss();
if (navAdapter != null) {
navAdapter.notifyDataSetChanged();
}
}
public void loadChildFragment(Fragment fragment) {
Validate.notNull(fragment);
FragmentManager fm = getSupportFragmentManager();
fm.beginTransaction()
.replace(R.id.main_view, fragment, "main")
.addToBackStack(null)
.commit();
}
public void dismissChildFragment() {
getSupportFragmentManager().popBackStack();
}
private int getSelectedNavListIndex() {
String currentFragment = getLastNavFragment();
if(currentFragment == null) {
// should not happen, but better safe than sorry
return -1;
}
int tagIndex = navAdapter.getTags().indexOf(currentFragment);
if(tagIndex >= 0) {
return tagIndex;
} else if(ArrayUtils.contains(NAV_DRAWER_TAGS, currentFragment)) {
// the fragment was just hidden
return -1;
} else { // last fragment was not a list, but a feed
long feedId = Long.parseLong(currentFragment);
if (navDrawerData != null) {
List<Feed> feeds = navDrawerData.feeds;
for (int i = 0; i < feeds.size(); i++) {
if (feeds.get(i).getId() == feedId) {
return i + navAdapter.getSubscriptionOffset();
}
}
}
return -1;
}
}
private final AdapterView.OnItemClickListener navListClickListener = new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
int viewType = parent.getAdapter().getItemViewType(position);
if (viewType != NavListAdapter.VIEW_TYPE_SECTION_DIVIDER && position != selectedNavListIndex) {
loadFragment(position, null);
}
drawerLayout.closeDrawer(navDrawer);
}
};
private final AdapterView.OnItemLongClickListener newListLongClickListener = new AdapterView.OnItemLongClickListener() {
@Override
public boolean onItemLongClick(AdapterView<?> parent, View view, int position, long id) {
if(position < navAdapter.getTags().size()) {
showDrawerPreferencesDialog();
return true;
} else {
mPosition = position;
return false;
}
}
};
@Override
protected void onPostCreate(Bundle savedInstanceState) {
super.onPostCreate(savedInstanceState);
drawerToggle.syncState();
if (savedInstanceState != null) {
currentTitle = savedInstanceState.getString(SAVE_TITLE);
if (!drawerLayout.isDrawerOpen(navDrawer)) {
getSupportActionBar().setTitle(currentTitle);
}
selectedNavListIndex = getSelectedNavListIndex();
}
}
@Override
public void onConfigurationChanged(Configuration newConfig) {
super.onConfigurationChanged(newConfig);
drawerToggle.onConfigurationChanged(newConfig);
}
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
outState.putString(SAVE_TITLE, getSupportActionBar().getTitle().toString());
outState.putInt(SAVE_BACKSTACK_COUNT, getSupportFragmentManager().getBackStackEntryCount());
}
@Override
public void onStart() {
super.onStart();
EventDistributor.getInstance().register(contentUpdate);
EventBus.getDefault().register(this);
RatingDialog.init(this);
}
@Override
protected void onPause() {
super.onPause();
}
@Override
protected void onResume() {
super.onResume();
StorageUtils.checkStorageAvailability(this);
DBTasks.checkShouldRefreshFeeds(getApplicationContext());
Intent intent = getIntent();
if (intent.hasExtra(EXTRA_FEED_ID) ||
(navDrawerData != null && intent.hasExtra(EXTRA_NAV_TYPE) &&
(intent.hasExtra(EXTRA_NAV_INDEX) || intent.hasExtra(EXTRA_FRAGMENT_TAG)))) {
handleNavIntent();
}
loadData();
RatingDialog.check();
}
@Override
protected void onStop() {
super.onStop();
EventDistributor.getInstance().unregister(contentUpdate);
EventBus.getDefault().unregister(this);
if(subscription != null) {
subscription.unsubscribe();
}
if(pd != null) {
pd.dismiss();
}
}
@TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH)
@Override
public void onTrimMemory(int level) {
super.onTrimMemory(level);
Glide.get(this).trimMemory(level);
}
@Override
public void onLowMemory() {
super.onLowMemory();
Glide.get(this).clearMemory();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
boolean retVal = super.onCreateOptionsMenu(menu);
if (Flavors.FLAVOR == Flavors.PLAY) {
switch (getLastNavFragment()) {
case QueueFragment.TAG:
case EpisodesFragment.TAG:
requestCastButton(MenuItem.SHOW_AS_ACTION_IF_ROOM);
return retVal;
case DownloadsFragment.TAG:
case PlaybackHistoryFragment.TAG:
case AddFeedFragment.TAG:
case SubscriptionFragment.TAG:
return retVal;
default:
requestCastButton(MenuItem.SHOW_AS_ACTION_NEVER);
return retVal;
}
} else {
return retVal;
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (drawerToggle.onOptionsItemSelected(item)) {
return true;
} else if (item.getItemId() == android.R.id.home) {
if (getSupportFragmentManager().getBackStackEntryCount() > 0) {
dismissChildFragment();
}
return true;
} else {
return super.onOptionsItemSelected(item);
}
}
@Override
public void onCreateContextMenu(ContextMenu menu, View v, ContextMenu.ContextMenuInfo menuInfo) {
super.onCreateContextMenu(menu, v, menuInfo);
if(v.getId() != R.id.nav_list) {
return;
}
AdapterView.AdapterContextMenuInfo adapterInfo = (AdapterView.AdapterContextMenuInfo) menuInfo;
int position = adapterInfo.position;
if(position < navAdapter.getSubscriptionOffset()) {
return;
}
MenuInflater inflater = getMenuInflater();
inflater.inflate(R.menu.nav_feed_context, menu);
Feed feed = navDrawerData.feeds.get(position - navAdapter.getSubscriptionOffset());
menu.setHeaderTitle(feed.getTitle());
// episodes are not loaded, so we cannot check if the podcast has new or unplayed ones!
}
@Override
public boolean onContextItemSelected(MenuItem item) {
final int position = mPosition;
mPosition = -1; // reset
if(position < 0) {
return false;
}
Feed feed = navDrawerData.feeds.get(position - navAdapter.getSubscriptionOffset());
switch(item.getItemId()) {
case R.id.mark_all_seen_item:
DBWriter.markFeedSeen(feed.getId());
return true;
case R.id.mark_all_read_item:
DBWriter.markFeedRead(feed.getId());
return true;
case R.id.rename_item:
new RenameFeedDialog(this, feed).show();
return true;
case R.id.remove_item:
final FeedRemover remover = new FeedRemover(this, feed) {
@Override
protected void onPostExecute(Void result) {
super.onPostExecute(result);
if(getSelectedNavListIndex() == position) {
loadFragment(EpisodesFragment.TAG, null);
}
}
};
ConfirmationDialog conDialog = new ConfirmationDialog(this,
R.string.remove_feed_label,
getString(R.string.feed_delete_confirmation_msg, feed.getTitle())) {
@Override
public void onConfirmButtonPressed(
DialogInterface dialog) {
dialog.dismiss();
long mediaId = PlaybackPreferences.getCurrentlyPlayingFeedMediaId();
if (mediaId > 0 &&
FeedItemUtil.indexOfItemWithMediaId(feed.getItems(), mediaId) >= 0) {
Log.d(TAG, "Currently playing episode is about to be deleted, skipping");
remover.skipOnCompletion = true;
int playerStatus = PlaybackPreferences.getCurrentPlayerStatus();
if(playerStatus == PlaybackPreferences.PLAYER_STATUS_PLAYING) {
sendBroadcast(new Intent(
PlaybackService.ACTION_PAUSE_PLAY_CURRENT_EPISODE));
}
}
remover.executeAsync();
}
};
conDialog.createNewDialog().show();
return true;
default:
return super.onContextItemSelected(item);
}
}
@Override
public void onBackPressed() {
if(isDrawerOpen()) {
drawerLayout.closeDrawer(navDrawer);
} else {
super.onBackPressed();
}
}
private DBReader.NavDrawerData navDrawerData;
private int selectedNavListIndex = 0;
private final NavListAdapter.ItemAccess itemAccess = new NavListAdapter.ItemAccess() {
@Override
public int getCount() {
if (navDrawerData != null) {
return navDrawerData.feeds.size();
} else {
return 0;
}
}
@Override
public Feed getItem(int position) {
if (navDrawerData != null && 0 <= position && position < navDrawerData.feeds.size()) {
return navDrawerData.feeds.get(position);
} else {
return null;
}
}
@Override
public int getSelectedItemIndex() {
return selectedNavListIndex;
}
@Override
public int getQueueSize() {
return (navDrawerData != null) ? navDrawerData.queueSize : 0;
}
@Override
public int getNumberOfNewItems() {
return (navDrawerData != null) ? navDrawerData.numNewItems : 0;
}
@Override
public int getNumberOfDownloadedItems() {
return (navDrawerData != null) ? navDrawerData.numDownloadedItems : 0;
}
@Override
public int getReclaimableItems() {
return (navDrawerData != null) ? navDrawerData.reclaimableSpace : 0;
}
@Override
public int getFeedCounter(long feedId) {
return navDrawerData != null ? navDrawerData.feedCounters.get(feedId) : 0;
}
@Override
public int getFeedCounterSum() {
if(navDrawerData == null) {
return 0;
}
int sum = 0;
for(int counter : navDrawerData.feedCounters.values()) {
sum += counter;
}
return sum;
}
};
private void loadData() {
subscription = Observable.fromCallable(DBReader::getNavDrawerData)
.subscribeOn(Schedulers.newThread())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(result -> {
boolean handleIntent = (navDrawerData == null);
navDrawerData = result;
navAdapter.notifyDataSetChanged();
if (handleIntent) {
handleNavIntent();
}
}, error -> Log.e(TAG, Log.getStackTraceString(error)));
}
public void onEvent(QueueEvent event) {
Log.d(TAG, "onEvent(" + event + ")");
// we are only interested in the number of queue items, not download status or position
if(event.action == QueueEvent.Action.DELETED_MEDIA ||
event.action == QueueEvent.Action.SORTED ||
event.action == QueueEvent.Action.MOVED) {
return;
}
loadData();
}
public void onEventMainThread(ProgressEvent event) {
Log.d(TAG, "onEvent(" + event + ")");
switch(event.action) {
case START:
pd = new ProgressDialog(this);
pd.setMessage(event.message);
pd.setIndeterminate(true);
pd.setCancelable(false);
pd.show();
break;
case END:
if(pd != null) {
pd.dismiss();
}
break;
}
}
public void onEventMainThread(MessageEvent event) {
Log.d(TAG, "onEvent(" + event + ")");
View parentLayout = findViewById(R.id.drawer_layout);
Snackbar snackbar = Snackbar.make(parentLayout, event.message, Snackbar.LENGTH_SHORT);
if(event.action != null) {
snackbar.setAction(getString(R.string.undo), v -> event.action.run());
}
snackbar.show();
}
private final EventDistributor.EventListener contentUpdate = new EventDistributor.EventListener() {
@Override
public void update(EventDistributor eventDistributor, Integer arg) {
if ((EVENTS & arg) != 0) {
Log.d(TAG, "Received contentUpdate Intent.");
loadData();
}
}
};
private void handleNavIntent() {
Log.d(TAG, "handleNavIntent()");
Intent intent = getIntent();
if (intent.hasExtra(EXTRA_FEED_ID) ||
(intent.hasExtra(EXTRA_NAV_TYPE) &&
(intent.hasExtra(EXTRA_NAV_INDEX) || intent.hasExtra(EXTRA_FRAGMENT_TAG)))) {
int index = intent.getIntExtra(EXTRA_NAV_INDEX, -1);
String tag = intent.getStringExtra(EXTRA_FRAGMENT_TAG);
Bundle args = intent.getBundleExtra(EXTRA_FRAGMENT_ARGS);
long feedId = intent.getLongExtra(EXTRA_FEED_ID, 0);
if (index >= 0) {
loadFragment(index, args);
} else if (tag != null) {
loadFragment(tag, args);
} else if(feedId > 0) {
loadFeedFragmentById(feedId, args);
}
}
setIntent(new Intent(MainActivity.this, MainActivity.class)); // to avoid handling the intent twice when the configuration changes
}
@Override
protected void onNewIntent(Intent intent) {
super.onNewIntent(intent);
setIntent(intent);
}
}
| 1 | 13,684 | Please remove the unused imports :) | AntennaPod-AntennaPod | java |
@@ -82,11 +82,11 @@ ActiveRecord::Schema.define(version: 20140723160957) do
end
create_table "comments", force: true do |t|
+ t.text "comment_text"
t.datetime "created_at"
t.datetime "updated_at"
- t.string "commentable_type"
t.integer "commentable_id"
- t.text "comment_text"
+ t.string "commentable_type"
end
create_table "properties", force: true do |t| | 1 | # encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20140723160957) do
create_table "api_tokens", force: true do |t|
t.string "access_token"
t.integer "user_id"
t.integer "cart_id"
t.datetime "expires_at"
t.datetime "created_at"
t.datetime "updated_at"
t.datetime "used_at"
end
create_table "approval_groups", force: true do |t|
t.string "name"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "cart_id"
end
create_table "approval_groups_users", id: false, force: true do |t|
t.integer "approval_group_id"
t.integer "user_id"
end
create_table "approvals", force: true do |t|
t.integer "cart_id"
t.integer "user_id"
t.string "status"
t.datetime "created_at"
t.datetime "updated_at"
t.string "role"
end
create_table "approver_comments", force: true do |t|
t.datetime "created_at"
t.datetime "updated_at"
t.integer "user_id"
t.integer "comment_id"
end
create_table "cart_item_traits", force: true do |t|
t.text "name"
t.text "value"
t.integer "cart_item_id"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "cart_items", force: true do |t|
t.string "vendor"
t.text "description"
t.string "url"
t.text "notes"
t.integer "quantity"
t.text "details"
t.string "part_number"
t.float "price"
t.integer "cart_id"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "carts", force: true do |t|
t.string "name"
t.string "status"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "external_id"
end
create_table "comments", force: true do |t|
t.datetime "created_at"
t.datetime "updated_at"
t.string "commentable_type"
t.integer "commentable_id"
t.text "comment_text"
end
create_table "properties", force: true do |t|
t.text "property"
t.text "value"
t.integer "hasproperties_id"
t.string "hasproperties_type"
end
create_table "user_roles", force: true do |t|
t.integer "approval_group_id"
t.integer "user_id"
t.string "role"
end
create_table "users", force: true do |t|
t.string "email_address"
t.string "first_name"
t.string "last_name"
t.datetime "created_at"
t.datetime "updated_at"
end
end
| 1 | 11,857 | Were these local edits? There doesn't appear to be a change here. If so, let's remove this from the commit. | 18F-C2 | rb |
@@ -3,8 +3,13 @@ package server
import (
"crypto/ecdsa"
"crypto/x509"
+ "github.com/spiffe/spire/pkg/common/profiling"
"net"
+ "net/http"
+ _ "net/http/pprof"
"net/url"
+ "runtime"
+ "strconv"
"sync"
"syscall"
| 1 | package server
import (
"crypto/ecdsa"
"crypto/x509"
"net"
"net/url"
"sync"
"syscall"
"github.com/sirupsen/logrus"
common "github.com/spiffe/spire/pkg/common/catalog"
"github.com/spiffe/spire/pkg/server/ca"
"github.com/spiffe/spire/pkg/server/catalog"
"github.com/spiffe/spire/pkg/server/endpoints"
tomb "gopkg.in/tomb.v2"
)
type Config struct {
// Configurations for server plugins
PluginConfigs common.PluginConfigMap
Log logrus.FieldLogger
// Address of SPIRE server
BindAddress *net.TCPAddr
// Address of the HTTP SPIRE server
BindHTTPAddress *net.TCPAddr
// Trust domain
TrustDomain url.URL
// Umask value to use
Umask int
}
type Server struct {
Catalog catalog.Catalog
Config *Config
caManager ca.Manager
endpoints endpoints.Server
privateKey *ecdsa.PrivateKey
svid *x509.Certificate
m *sync.RWMutex
t *tomb.Tomb
}
// Run the server
// This method initializes the server, including its plugins,
// and then blocks until it's shut down or an error is encountered.
func (s *Server) Run() error {
if s.t == nil {
s.t = new(tomb.Tomb)
}
if s.m == nil {
s.m = new(sync.RWMutex)
}
s.t.Go(s.run)
return s.t.Wait()
}
func (s *Server) run() error {
s.prepareUmask()
err := s.initPlugins()
if err != nil {
return err
}
err = s.startCAManager()
if err != nil {
s.Catalog.Stop()
return err
}
s.t.Go(s.caManager.Wait)
s.t.Go(s.startEndpoints)
<-s.t.Dying()
if s.t.Err() != nil {
s.Config.Log.Errorf("fatal: %v", s.t.Err())
}
s.shutdown()
return nil
}
func (s *Server) Shutdown() {
s.t.Kill(nil)
}
func (s *Server) shutdown() {
if s.endpoints != nil {
s.endpoints.Shutdown()
}
if s.caManager != nil {
s.caManager.Shutdown()
}
if s.Catalog != nil {
s.Catalog.Stop()
}
return
}
func (s *Server) prepareUmask() {
s.Config.Log.Debug("Setting umask to ", s.Config.Umask)
syscall.Umask(s.Config.Umask)
}
func (s *Server) initPlugins() error {
config := &catalog.Config{
PluginConfigs: s.Config.PluginConfigs,
Log: s.Config.Log.WithField("subsystem_name", "catalog"),
}
s.Catalog = catalog.New(config)
err := s.Catalog.Run()
if err != nil {
return err
}
s.Config.Log.Info("plugins started")
return nil
}
func (s *Server) startCAManager() error {
s.m.Lock()
defer s.m.Unlock()
config := &ca.Config{
Catalog: s.Catalog,
TrustDomain: s.Config.TrustDomain,
Log: s.Config.Log.WithField("subsystem_name", "ca_manager"),
}
s.caManager = ca.New(config)
return s.caManager.Start()
}
func (s *Server) startEndpoints() error {
s.m.Lock()
c := &endpoints.Config{
GRPCAddr: s.Config.BindAddress,
HTTPAddr: s.Config.BindHTTPAddress,
TrustDomain: s.Config.TrustDomain,
Catalog: s.Catalog,
Log: s.Config.Log.WithField("subsystem_name", "endpoints"),
}
s.endpoints = endpoints.New(c)
s.m.Unlock()
s.t.Go(s.endpoints.ListenAndServe)
<-s.t.Dying()
s.endpoints.Shutdown()
return nil
}
| 1 | 9,160 | nit: this should be further down w/ the rest of the github imports | spiffe-spire | go |
@@ -2,7 +2,7 @@ Hi <%= @user.first_name %>,
This is a reminder that your Learn <%= @subscription.plan_name %> subscription
will be automatically renewed on <%= l @subscription.next_payment_on %> for the
-amount of <%= number_to_currency(@subscription.next_payment_amount) %>.
+amount of <%= number_to_currency(@subscription.next_payment_amount / 100) %>.
If you need to make any changes, you can do so by accessing your account page at
<%= my_account_url %> | 1 | Hi <%= @user.first_name %>,
This is a reminder that your Learn <%= @subscription.plan_name %> subscription
will be automatically renewed on <%= l @subscription.next_payment_on %> for the
amount of <%= number_to_currency(@subscription.next_payment_amount) %>.
If you need to make any changes, you can do so by accessing your account page at
<%= my_account_url %>
Thank you,
thoughtbot
| 1 | 10,073 | I don't know if this is something we should address now, but we have a cents_to_dollars private method in both `SubscriptionCoupon` and `Invoice`. | thoughtbot-upcase | rb |
@@ -1619,7 +1619,9 @@ void ihipPrintKernelLaunch(const char* kernelName, const grid_launch_parm* lp,
// Allows runtime to track some information about the stream.
hipStream_t ihipPreLaunchKernel(hipStream_t stream, dim3 grid, dim3 block, grid_launch_parm* lp,
const char* kernelNameStr, bool lockAcquired) {
- stream = ihipSyncAndResolveStream(stream, lockAcquired);
+ if (stream == nullptr || stream != stream->getCtx()->_defaultStream){
+ stream = ihipSyncAndResolveStream(stream, lockAcquired);
+ }
lp->grid_dim.x = grid.x;
lp->grid_dim.y = grid.y;
lp->grid_dim.z = grid.z; | 1 | /*
Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
/**
* @file hip_hcc.cpp
*
* Contains definitions for functions that are large enough that we don't want to inline them
* everywhere. This file is compiled and linked into apps running HIP / HCC path.
*/
#include <assert.h>
#include <exception>
#include <stdint.h>
#include <iostream>
#include <sstream>
#include <list>
#include <sys/types.h>
#include <unistd.h>
#include <deque>
#include <vector>
#include <algorithm>
#include <atomic>
#include <mutex>
#include <unordered_set>
#include <hc.hpp>
#include <hc_am.hpp>
#include "hsa/hsa_ext_amd.h"
#include "hsa/hsa_ext_image.h"
#include "hip/hip_runtime.h"
#include "hip_hcc_internal.h"
#include "hip/hip_ext.h"
#include "trace_helper.h"
#include "env.h"
// TODO - create a stream-based debug interface as an additional option for tprintf
#define DB_PEER_CTX 0
//=================================================================================================
// Global variables:
//=================================================================================================
const int release = 1;
const char* API_COLOR = KGRN;
const char* API_COLOR_END = KNRM;
int HIP_LAUNCH_BLOCKING = 0;
std::string HIP_LAUNCH_BLOCKING_KERNELS;
std::vector<std::string> g_hipLaunchBlockingKernels;
int HIP_API_BLOCKING = 0;
int HIP_PRINT_ENV = 0;
int HIP_TRACE_API = 0;
std::string HIP_TRACE_API_COLOR("green");
// TODO - DB_START/STOP need more testing.
std::string HIP_DB_START_API;
std::string HIP_DB_STOP_API;
int HIP_DB = 0;
int HIP_VISIBLE_DEVICES = 0;
int HIP_WAIT_MODE = 0;
int HIP_FORCE_P2P_HOST = 0;
int HIP_FAIL_SOC = 0;
int HIP_DENY_PEER_ACCESS = 0;
int HIP_HIDDEN_FREE_MEM = 256;
// Force async copies to actually use the synchronous copy interface.
int HIP_FORCE_SYNC_COPY = 0;
// TODO - set these to 0 and 1
int HIP_EVENT_SYS_RELEASE = 0;
int HIP_HOST_COHERENT = 1;
int HIP_SYNC_HOST_ALLOC = 1;
int HIP_SYNC_FREE = 0;
int HIP_INIT_ALLOC = -1;
int HIP_SYNC_STREAM_WAIT = 0;
int HIP_FORCE_NULL_STREAM = 0;
int HIP_DUMP_CODE_OBJECT = 0;
#if (__hcc_workweek__ >= 17300)
// Make sure we have required bug fix in HCC
// Perform resolution on the GPU:
// Chicken bit to sync on host to implement null stream.
// If 0, null stream synchronization is performed on the GPU
int HIP_SYNC_NULL_STREAM = 0;
#else
int HIP_SYNC_NULL_STREAM = 1;
#endif
// HIP needs to change some behavior based on HCC_OPT_FLUSH :
#if (__hcc_workweek__ >= 17296)
int HCC_OPT_FLUSH = 1;
#else
#warning "HIP disabled HCC_OPT_FLUSH since HCC version does not yet support"
int HCC_OPT_FLUSH = 0;
#endif
// Array of pointers to devices.
ihipDevice_t** g_deviceArray;
bool g_visible_device = false;
unsigned g_deviceCnt;
std::vector<int> g_hip_visible_devices;
hsa_agent_t g_cpu_agent;
hsa_agent_t* g_allAgents; // CPU agents + all the visible GPU agents.
unsigned g_numLogicalThreads;
bool g_initDeviceFound = false;
std::atomic<int> g_lastShortTid(1);
// Indexed by short-tid:
//
std::vector<ProfTrigger> g_dbStartTriggers;
std::vector<ProfTrigger> g_dbStopTriggers;
//=================================================================================================
// Top-level "free" functions:
//=================================================================================================
uint64_t recordApiTrace(TlsData *tls, std::string* fullStr, const std::string& apiStr) {
auto apiSeqNum = tls->tidInfo.apiSeqNum();
auto tid = tls->tidInfo.tid();
if ((tid < g_dbStartTriggers.size()) && (apiSeqNum >= g_dbStartTriggers[tid].nextTrigger())) {
printf("info: resume profiling at %lu\n", apiSeqNum);
g_dbStartTriggers.pop_back();
};
if ((tid < g_dbStopTriggers.size()) && (apiSeqNum >= g_dbStopTriggers[tid].nextTrigger())) {
printf("info: stop profiling at %lu\n", apiSeqNum);
g_dbStopTriggers.pop_back();
};
fullStr->reserve(16 + apiStr.length());
*fullStr = std::to_string(tid) + ".";
*fullStr += std::to_string(apiSeqNum);
*fullStr += " ";
*fullStr += apiStr;
uint64_t apiStartTick = getTicks();
if (COMPILE_HIP_DB && HIP_TRACE_API) {
fprintf(stderr, "%s<<hip-api pid:%d tid:%s @%lu%s\n", API_COLOR, tls->tidInfo.pid(), fullStr->c_str(), apiStartTick,
API_COLOR_END);
}
return apiStartTick;
}
static inline bool ihipIsValidDevice(unsigned deviceIndex) {
// deviceIndex is unsigned so always > 0
return (deviceIndex < g_deviceCnt);
}
ihipDevice_t* ihipGetDevice(int deviceIndex) {
if (ihipIsValidDevice(deviceIndex)) {
return g_deviceArray[deviceIndex];
} else {
return NULL;
}
}
ihipCtx_t* ihipGetPrimaryCtx(unsigned deviceIndex) {
ihipDevice_t* device = ihipGetDevice(deviceIndex);
return device ? device->getPrimaryCtx() : NULL;
};
hipError_t ihipSynchronize(TlsData *tls) {
ihipGetTlsDefaultCtx()->locked_waitAllStreams(); // ignores non-blocking streams, this waits
// for all activity to finish.
return (hipSuccess);
}
TlsData* tls_get_ptr() {
static thread_local TlsData data;
return &data;
}
//=================================================================================================
// ihipStream_t:
//=================================================================================================
TidInfo::TidInfo() : _apiSeqNum(0) {
_shortTid = g_lastShortTid.fetch_add(1);
_pid = getpid();
if (COMPILE_HIP_DB && HIP_TRACE_API) {
std::stringstream tid_ss;
std::stringstream tid_ss_num;
tid_ss_num << std::this_thread::get_id();
tid_ss << std::hex << std::stoull(tid_ss_num.str());
// cannot use tprintf here since it will recurse back into TlsData constructor
#if COMPILE_HIP_DB
if (HIP_DB & (1 << DB_API)) {
char msgStr[1000];
snprintf(msgStr, sizeof(msgStr),
"HIP initialized short_tid#%d (maps to full_tid: 0x%s)\n",
tid(), tid_ss.str().c_str());
fprintf(stderr, " %ship-%s pid:%d tid:%d:%s%s", dbName[DB_API]._color,
dbName[DB_API]._shortName, pid(), tid(), msgStr, KNRM);
}
#endif
};
}
//=================================================================================================
// ihipStream_t:
//=================================================================================================
//---
ihipStream_t::ihipStream_t(ihipCtx_t* ctx, hc::accelerator_view av, unsigned int flags)
: _id(0), // will be set by add function.
_flags(flags),
_ctx(ctx),
_criticalData(this, av) {
unsigned schedBits = ctx->_ctxFlags & hipDeviceScheduleMask;
switch (schedBits) {
case hipDeviceScheduleAuto:
_scheduleMode = Auto;
break;
case hipDeviceScheduleSpin:
_scheduleMode = Spin;
break;
case hipDeviceScheduleYield:
_scheduleMode = Yield;
break;
case hipDeviceScheduleBlockingSync:
_scheduleMode = Yield;
break;
default:
_scheduleMode = Auto;
};
};
//---
ihipStream_t::~ihipStream_t() {}
hc::hcWaitMode ihipStream_t::waitMode() const {
hc::hcWaitMode waitMode = hc::hcWaitModeActive;
if (_scheduleMode == Auto) {
if (g_deviceCnt > g_numLogicalThreads) {
waitMode = hc::hcWaitModeActive;
} else {
waitMode = hc::hcWaitModeBlocked;
}
} else if (_scheduleMode == Spin) {
waitMode = hc::hcWaitModeActive;
} else if (_scheduleMode == Yield) {
waitMode = hc::hcWaitModeBlocked;
} else {
assert(0); // bad wait mode.
}
if (HIP_WAIT_MODE == 1) {
waitMode = hc::hcWaitModeBlocked;
} else if (HIP_WAIT_MODE == 2) {
waitMode = hc::hcWaitModeActive;
}
return waitMode;
}
// Wait for all kernel and data copy commands in this stream to complete.
// This signature should be used in routines that already have locked the stream mutex
void ihipStream_t::wait(LockedAccessor_StreamCrit_t& crit) {
tprintf(DB_SYNC, "%s wait for queue-empty..\n", ToString(this).c_str());
crit->_av.wait(waitMode());
}
//---
// Wait for all kernel and data copy commands in this stream to complete.
inline void ihipStream_t::locked_wait(bool& waited) {
// create a marker while holding stream lock,
// but release lock prior to waiting on the marker
hc::completion_future marker;
{
LockedAccessor_StreamCrit_t crit(_criticalData);
// skipping marker since stream is empty
if (crit->_av.get_is_empty()) {
waited = false;
return;
}
marker = crit->_av.create_marker(hc::no_scope);
}
marker.wait(waitMode());
waited = true;
return;
};
void ihipStream_t::locked_wait() {
bool waited;
locked_wait(waited);
};
// Causes current stream to wait for specified event to complete:
// Note this does not provide any kind of host serialization.
void ihipStream_t::locked_streamWaitEvent(ihipEventData_t& ecd) {
LockedAccessor_StreamCrit_t crit(_criticalData);
crit->_av.create_blocking_marker(ecd.marker(), hc::accelerator_scope);
}
// Create a marker in this stream.
// Save state in the event so it can track the status of the event.
hc::completion_future ihipStream_t::locked_recordEvent(hipEvent_t event) {
auto scopeFlag = hc::accelerator_scope;
// The env var HIP_EVENT_SYS_RELEASE sets the default,
// The explicit flags override the env var (if specified)
if (event->_flags & hipEventReleaseToSystem) {
scopeFlag = hc::system_scope;
} else if (event->_flags & hipEventReleaseToDevice) {
scopeFlag = hc::accelerator_scope;
} else {
scopeFlag = HIP_EVENT_SYS_RELEASE ? hc::system_scope : hc::accelerator_scope;
}
// Lock the stream to prevent simultaneous access
LockedAccessor_StreamCrit_t crit(_criticalData);
return crit->_av.create_marker(scopeFlag);
};
//=============================================================================
//-------------------------------------------------------------------------------------------------
//---
const ihipDevice_t* ihipStream_t::getDevice() const { return _ctx->getDevice(); };
ihipCtx_t* ihipStream_t::getCtx() const { return _ctx; };
//--
// Lock the stream to prevent other threads from intervening.
LockedAccessor_StreamCrit_t ihipStream_t::lockopen_preKernelCommand() {
LockedAccessor_StreamCrit_t crit(_criticalData, false /*no unlock at destruction*/);
return crit;
}
//---
// Must be called after kernel finishes, this releases the lock on the stream so other commands can
// submit.
void ihipStream_t::lockclose_postKernelCommand(const char* kernelName, hc::accelerator_view* av, bool unlockPostponed) {
bool blockThisKernel = false;
if (!g_hipLaunchBlockingKernels.empty()) {
std::string kernelNameString(kernelName);
for (auto o = g_hipLaunchBlockingKernels.begin(); o != g_hipLaunchBlockingKernels.end();
o++) {
if ((*o == kernelNameString)) {
// printf ("force blocking for kernel %s\n", o->c_str());
blockThisKernel = true;
}
}
}
if (HIP_LAUNCH_BLOCKING || blockThisKernel) {
// TODO - fix this so it goes through proper stream::wait() call.// direct wait OK since we
// know the stream is locked.
av->wait(hc::hcWaitModeActive);
tprintf(DB_SYNC, "%s LAUNCH_BLOCKING for kernel '%s' completion\n", ToString(this).c_str(),
kernelName);
}
// if unlockPostponed is true then this stream will be unlocked later (e.g., see hipExtLaunchMultiKernelMultiDevice for a sample call)
if (!unlockPostponed) {
_criticalData.unlock(); // paired with lock from lockopen_preKernelCommand.
}
};
//=============================================================================
// Recompute the peercnt and the packed _peerAgents whenever a peer is added or deleted.
// The packed _peerAgents can efficiently be used on each memory allocation.
template <>
void ihipCtxCriticalBase_t<CtxMutex>::recomputePeerAgents() {
_peerCnt = 0;
std::for_each(_peers.begin(), _peers.end(), [this](ihipCtx_t* ctx) {
_peerAgents[_peerCnt++] = ctx->getDevice()->_hsaAgent;
});
}
template <>
bool ihipCtxCriticalBase_t<CtxMutex>::isPeerWatcher(const ihipCtx_t* peer) {
auto match = std::find_if(_peers.begin(), _peers.end(), [=](const ihipCtx_t* d) {
return d->getDeviceNum() == peer->getDeviceNum();
});
return (match != std::end(_peers));
}
template <>
bool ihipCtxCriticalBase_t<CtxMutex>::addPeerWatcher(const ihipCtx_t* thisCtx,
ihipCtx_t* peerWatcher) {
auto match = std::find(_peers.begin(), _peers.end(), peerWatcher);
if (match == std::end(_peers)) {
// Not already a peer, let's update the list:
tprintf(DB_COPY, "addPeerWatcher. Allocations on %s now visible to peerWatcher %s.\n",
thisCtx->toString().c_str(), peerWatcher->toString().c_str());
_peers.push_back(peerWatcher);
recomputePeerAgents();
return true;
}
// If we get here - peer was already on list, silently ignore.
return false;
}
template <>
bool ihipCtxCriticalBase_t<CtxMutex>::removePeerWatcher(const ihipCtx_t* thisCtx,
ihipCtx_t* peerWatcher) {
auto match = std::find(_peers.begin(), _peers.end(), peerWatcher);
if (match != std::end(_peers)) {
// Found a valid peer, let's remove it.
tprintf(
DB_COPY,
"removePeerWatcher. Allocations on %s no longer visible to former peerWatcher %s.\n",
thisCtx->toString().c_str(), peerWatcher->toString().c_str());
_peers.remove(peerWatcher);
recomputePeerAgents();
return true;
} else {
return false;
}
}
template <>
void ihipCtxCriticalBase_t<CtxMutex>::resetPeerWatchers(ihipCtx_t* thisCtx) {
tprintf(DB_COPY, "resetPeerWatchers for context=%s\n", thisCtx->toString().c_str());
_peers.clear();
_peerCnt = 0;
addPeerWatcher(thisCtx, thisCtx); // peer-list always contains self agent.
}
template <>
void ihipCtxCriticalBase_t<CtxMutex>::printPeerWatchers(FILE* f) const {
for (auto iter = _peers.begin(); iter != _peers.end(); iter++) {
fprintf(f, "%s ", (*iter)->toString().c_str());
};
}
template <>
void ihipCtxCriticalBase_t<CtxMutex>::addStream(ihipStream_t* stream) {
stream->_id = _streams.size();
_streams.push_back(stream);
tprintf(DB_SYNC, " addStream: %s\n", ToString(stream).c_str());
}
template <>
void ihipDeviceCriticalBase_t<DeviceMutex>::addContext(ihipCtx_t* ctx) {
_ctxs.push_back(ctx);
tprintf(DB_SYNC, " addContext: %s\n", ToString(ctx).c_str());
}
//=============================================================================
//=================================================================================================
// ihipDevice_t
//=================================================================================================
ihipDevice_t::ihipDevice_t(unsigned deviceId, unsigned deviceCnt, hc::accelerator& acc)
: _deviceId(deviceId), _acc(acc), _state(0), _criticalData(this) {
hsa_agent_t* agent = static_cast<hsa_agent_t*>(acc.get_hsa_agent());
if (agent) {
int err;
err = hsa_agent_get_info(
*agent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT, &_computeUnits);
if (err != HSA_STATUS_SUCCESS) {
_computeUnits = 1;
}
err = hsa_agent_get_info(
*agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_DRIVER_NODE_ID, &_driver_node_id);
if (err != HSA_STATUS_SUCCESS){
_driver_node_id = 0;
}
_hsaAgent = *agent;
} else {
_hsaAgent.handle = static_cast<uint64_t>(-1);
}
initProperties(&_props);
_primaryCtx = new ihipCtx_t(this, deviceCnt, hipDeviceMapHost);
}
ihipDevice_t::~ihipDevice_t() {
delete _primaryCtx;
_primaryCtx = NULL;
}
void ihipDevice_t::locked_removeContext(ihipCtx_t* c) {
LockedAccessor_DeviceCrit_t crit(_criticalData);
crit->ctxs().remove(c);
tprintf(DB_SYNC, " locked_removeContext: %s\n", ToString(c).c_str());
}
void ihipDevice_t::locked_reset() {
// Obtain mutex access to the device critical data, release by destructor
LockedAccessor_DeviceCrit_t crit(_criticalData);
//---
// Wait for pending activity to complete? TODO - check if this is required behavior:
tprintf(DB_SYNC, "locked_reset waiting for activity to complete.\n");
// Reset and remove streams:
// Delete all created streams including the default one.
for (auto ctxI = crit->const_ctxs().begin(); ctxI != crit->const_ctxs().end(); ctxI++) {
ihipCtx_t* ctx = *ctxI;
(*ctxI)->locked_reset();
tprintf(DB_SYNC, " ctx cleanup %s\n", ToString(ctx).c_str());
delete ctx;
}
// Clear the list.
crit->ctxs().clear();
// reset _primaryCtx
_primaryCtx->locked_reset();
tprintf(DB_SYNC, " _primaryCtx cleanup %s\n", ToString(_primaryCtx).c_str());
// Reset and release all memory stored in the tracker:
// Reset will remove peer mapping so don't need to do this explicitly.
// FIXME - This is clearly a non-const action! Is this a context reset or a device reset -
// maybe should reference count?
_state = 0;
am_memtracker_reset(_acc);
// FIXME - Calling am_memtracker_reset is really bad since it destroyed all buffers allocated by
// the HCC runtime as well such as the printf buffer. Re-initialze the printf buffer as a
// workaround for now.
#ifdef HC_FEATURE_PRINTF
Kalmar::getContext()->initPrintfBuffer();
#endif
};
#define ErrorCheck(x) error_check(x, __LINE__, __FILE__)
void error_check(hsa_status_t hsa_error_code, int line_num, std::string str) {
if ((hsa_error_code != HSA_STATUS_SUCCESS) && (hsa_error_code != HSA_STATUS_INFO_BREAK)) {
printf("HSA reported error!\n In file: %s\nAt line: %d\n", str.c_str(), line_num);
}
}
//---
// Helper for initProperties
// Determines if the given agent is of type HSA_DEVICE_TYPE_GPU and counts it.
static hsa_status_t countGpuAgents(hsa_agent_t agent, void* data) {
if (data == NULL) {
return HSA_STATUS_ERROR_INVALID_ARGUMENT;
}
hsa_device_type_t device_type;
hsa_status_t status = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
if (status != HSA_STATUS_SUCCESS) {
return status;
}
if (device_type == HSA_DEVICE_TYPE_GPU) {
(*static_cast<int*>(data))++;
}
return HSA_STATUS_SUCCESS;
}
hsa_status_t FindGpuDevice(hsa_agent_t agent, void* data) {
if (data == NULL) {
return HSA_STATUS_ERROR_INVALID_ARGUMENT;
}
hsa_device_type_t hsa_device_type;
hsa_status_t hsa_error_code =
hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &hsa_device_type);
if (hsa_error_code != HSA_STATUS_SUCCESS) {
return hsa_error_code;
}
if (hsa_device_type == HSA_DEVICE_TYPE_GPU) {
*((hsa_agent_t*)data) = agent;
return HSA_STATUS_INFO_BREAK;
}
return HSA_STATUS_SUCCESS;
}
hsa_status_t GetDevicePool(hsa_amd_memory_pool_t pool, void* data) {
if (NULL == data) {
return HSA_STATUS_ERROR_INVALID_ARGUMENT;
}
hsa_status_t err;
hsa_amd_segment_t segment;
uint32_t flag;
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SEGMENT, &segment);
ErrorCheck(err);
if (HSA_AMD_SEGMENT_GLOBAL != segment) return HSA_STATUS_SUCCESS;
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &flag);
ErrorCheck(err);
*((hsa_amd_memory_pool_t*)data) = pool;
return HSA_STATUS_SUCCESS;
}
int checkAccess(hsa_agent_t agent, hsa_amd_memory_pool_t pool) {
hsa_status_t err;
hsa_amd_memory_pool_access_t access;
err = hsa_amd_agent_memory_pool_get_info(agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS,
&access);
ErrorCheck(err);
return access;
}
hsa_status_t get_pool_info(hsa_amd_memory_pool_t pool, void* data) {
hsa_status_t err;
hipDeviceProp_t* p_prop = reinterpret_cast<hipDeviceProp_t*>(data);
uint32_t region_segment;
// Get pool segment
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SEGMENT, ®ion_segment);
ErrorCheck(err);
switch (region_segment) {
case HSA_REGION_SEGMENT_READONLY:
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SIZE,
&(p_prop->totalConstMem));
break;
case HSA_REGION_SEGMENT_GROUP:
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SIZE,
&(p_prop->sharedMemPerBlock));
break;
default:
break;
}
return err;
}
// Determines if the given agent is of type HSA_DEVICE_TYPE_GPU and counts it.
static hsa_status_t findCpuAgent(hsa_agent_t agent, void* data) {
hsa_device_type_t device_type;
hsa_status_t status = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
if (status != HSA_STATUS_SUCCESS) {
return status;
}
if (device_type == HSA_DEVICE_TYPE_CPU) {
(*static_cast<hsa_agent_t*>(data)) = agent;
return HSA_STATUS_INFO_BREAK;
}
return HSA_STATUS_SUCCESS;
}
#define DeviceErrorCheck(x) \
if (x != HSA_STATUS_SUCCESS) { \
return hipErrorInvalidDevice; \
}
//---
// Initialize properties for the device.
// Call this once when the ihipDevice_t is created:
hipError_t ihipDevice_t::initProperties(hipDeviceProp_t* prop) {
hipError_t e = hipSuccess;
hsa_status_t err;
memset(prop, 0, sizeof(hipDeviceProp_t));
if (_hsaAgent.handle == -1) {
return hipErrorInvalidDevice;
}
// Iterates over the agents to determine Multiple GPU devices
// using the countGpuAgents callback.
//! @bug : on HCC, isMultiGpuBoard returns True if system contains multiple GPUS (rather than if
//! GPU is on a multi-ASIC board)
int gpuAgentsCount = 0;
err = hsa_iterate_agents(countGpuAgents, &gpuAgentsCount);
if (err == HSA_STATUS_INFO_BREAK) {
err = HSA_STATUS_SUCCESS;
}
DeviceErrorCheck(err);
prop->isMultiGpuBoard = 0 ? gpuAgentsCount < 2 : 1;
// Get agent name
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_PRODUCT_NAME,
&(prop->name));
DeviceErrorCheck(err);
char archName[256];
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_NAME, &archName);
prop->gcnArch = atoi(archName + 3);
DeviceErrorCheck(err);
// Get agent node
uint32_t node;
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_NODE, &node);
DeviceErrorCheck(err);
// Get wavefront size
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_WAVEFRONT_SIZE, &prop->warpSize);
DeviceErrorCheck(err);
// Get max total number of work-items in a workgroup
err =
hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_WORKGROUP_MAX_SIZE, &prop->maxThreadsPerBlock);
DeviceErrorCheck(err);
// Get max number of work-items of each dimension of a work-group
uint16_t work_group_max_dim[3];
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_WORKGROUP_MAX_DIM, work_group_max_dim);
DeviceErrorCheck(err);
for (int i = 0; i < 3; i++) {
prop->maxThreadsDim[i] = work_group_max_dim[i];
}
hsa_dim3_t grid_max_dim;
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_GRID_MAX_DIM, &grid_max_dim);
DeviceErrorCheck(err);
prop->maxGridSize[0] = (int)((grid_max_dim.x == UINT32_MAX) ? (INT32_MAX) : grid_max_dim.x);
prop->maxGridSize[1] = (int)((grid_max_dim.y == UINT32_MAX) ? (INT32_MAX) : grid_max_dim.y);
prop->maxGridSize[2] = (int)((grid_max_dim.z == UINT32_MAX) ? (INT32_MAX) : grid_max_dim.z);
// Get Max clock frequency
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_MAX_CLOCK_FREQUENCY,
&prop->clockRate);
prop->clockRate *= 1000.0; // convert Mhz to Khz.
DeviceErrorCheck(err);
uint64_t counterHz;
err = hsa_system_get_info(HSA_SYSTEM_INFO_TIMESTAMP_FREQUENCY, &counterHz);
DeviceErrorCheck(err);
prop->clockInstructionRate = counterHz / 1000;
// Get Agent BDFID (bus/device/function ID)
uint16_t bdf_id = 1;
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_BDFID, &bdf_id);
DeviceErrorCheck(err);
// BDFID is 16bit uint: [8bit - BusID | 5bit - Device ID | 3bit - FunctionID]
prop->pciDeviceID = (bdf_id >> 3) & 0x1F;
prop->pciBusID = (bdf_id >> 8) & 0xFF;
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_DOMAIN, &prop->pciDomainID);
DeviceErrorCheck(err);
// Masquerade as a 3.0-level device. This will change as more HW functions are properly
// supported. Application code should use the arch.has* to do detailed feature detection.
prop->major = 3;
prop->minor = 0;
// Get number of Compute Unit
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT,
&(prop->multiProcessorCount));
DeviceErrorCheck(err);
// TODO-hsart - this appears to return 0?
uint32_t cache_size[4];
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_CACHE_SIZE, cache_size);
DeviceErrorCheck(err);
prop->l2CacheSize = cache_size[1];
/* Computemode for HSA Devices is always : cudaComputeModeDefault */
prop->computeMode = 0;
_isLargeBar = _acc.has_cpu_accessible_am();
// Get Max Threads Per Multiprocessor
uint32_t max_waves_per_cu;
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_MAX_WAVES_PER_CU,
&max_waves_per_cu);
DeviceErrorCheck(err);
prop->maxThreadsPerMultiProcessor = prop->warpSize * max_waves_per_cu;
// Get memory properties
err = hsa_amd_agent_iterate_memory_pools(_hsaAgent, get_pool_info, prop);
if (err == HSA_STATUS_INFO_BREAK) {
err = HSA_STATUS_SUCCESS;
}
DeviceErrorCheck(err);
// Get the size of the pool we are using for Accelerator Memory allocations:
hsa_region_t* am_region = static_cast<hsa_region_t*>(_acc.get_hsa_am_region());
err = hsa_region_get_info(*am_region, HSA_REGION_INFO_SIZE, &prop->totalGlobalMem);
DeviceErrorCheck(err);
// maxSharedMemoryPerMultiProcessor should be as the same as group memory size.
// Group memory will not be paged out, so, the physical memory size is the total shared memory
// size, and also equal to the group pool size.
prop->maxSharedMemoryPerMultiProcessor = prop->totalGlobalMem;
// Get Max memory clock frequency
err =
hsa_region_get_info(*am_region, (hsa_region_info_t)HSA_AMD_REGION_INFO_MAX_CLOCK_FREQUENCY,
&prop->memoryClockRate);
DeviceErrorCheck(err);
prop->memoryClockRate *= 1000.0; // convert Mhz to Khz.
// Get global memory bus width in bits
err = hsa_region_get_info(*am_region, (hsa_region_info_t)HSA_AMD_REGION_INFO_BUS_WIDTH,
&prop->memoryBusWidth);
DeviceErrorCheck(err);
// Set feature flags - these are all mandatory for HIP on HCC path:
// Some features are under-development and future revs may support flags that are currently 0.
// Reporting of these flags should be synchronized with the HIP_ARCH* compile-time defines in
// hip_runtime.h
prop->arch.hasGlobalInt32Atomics = 1;
prop->arch.hasGlobalFloatAtomicExch = 1;
prop->arch.hasSharedInt32Atomics = 1;
prop->arch.hasSharedFloatAtomicExch = 1;
prop->arch.hasFloatAtomicAdd = 1; // supported with CAS loop, but is supported
prop->arch.hasGlobalInt64Atomics = 1;
prop->arch.hasSharedInt64Atomics = 1;
prop->arch.hasDoubles = 1;
prop->arch.hasWarpVote = 1;
prop->arch.hasWarpBallot = 1;
prop->arch.hasWarpShuffle = 1;
prop->arch.hasFunnelShift = 0; // TODO-hcc
prop->arch.hasThreadFenceSystem = 1;
prop->arch.hasSyncThreadsExt = 0; // TODO-hcc
prop->arch.hasSurfaceFuncs = 0; // TODO-hcc
prop->arch.has3dGrid = 1;
prop->arch.hasDynamicParallelism = 0;
prop->concurrentKernels =
1; // All ROCm hardware supports executing multiple kernels concurrently
prop->canMapHostMemory = 1; // All ROCm devices can map host memory
prop->totalConstMem = 16384;
#if 0
// TODO - code broken below since it always returns 1.
// Are the flags part of the context or part of the device?
if ( _device_flags | hipDeviceMapHost) {
prop->canMapHostMemory = 1;
} else {
prop->canMapHostMemory = 0;
}
#endif
// Get profile
hsa_profile_t agent_profile;
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_PROFILE, &agent_profile);
DeviceErrorCheck(err);
if(agent_profile == HSA_PROFILE_FULL) {
prop->integrated = 1;
}
// Enable the cooperative group for gfx9+
prop->cooperativeLaunch = (prop->gcnArch < 900) ? 0 : 1;
prop->cooperativeMultiDeviceLaunch = (prop->gcnArch < 900) ? 0 : 1;
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_EXT_AGENT_INFO_IMAGE_1D_MAX_ELEMENTS,
&prop->maxTexture1D);
DeviceErrorCheck(err);
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_EXT_AGENT_INFO_IMAGE_2D_MAX_ELEMENTS,
prop->maxTexture2D);
DeviceErrorCheck(err);
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_EXT_AGENT_INFO_IMAGE_3D_MAX_ELEMENTS,
prop->maxTexture3D);
DeviceErrorCheck(err);
// Get Agent HDP Flush Register Memory
hsa_amd_hdp_flush_t hdpinfo;
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_HDP_FLUSH, &hdpinfo);
DeviceErrorCheck(err);
prop->hdpMemFlushCntl = hdpinfo.HDP_MEM_FLUSH_CNTL;
prop->hdpRegFlushCntl = hdpinfo.HDP_REG_FLUSH_CNTL;
prop->memPitch = INT_MAX; //Maximum pitch in bytes allowed by memory copies (hardcoded 128 bytes in hipMallocPitch)
prop->textureAlignment = 0; //Alignment requirement for textures
prop->texturePitchAlignment = IMAGE_PITCH_ALIGNMENT; //Alignment requirment for texture pitch
prop->kernelExecTimeoutEnabled = 0; //no run time limit for running kernels on device
hsa_isa_t isa;
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AGENT_INFO_ISA, &isa);
DeviceErrorCheck(err);
std::size_t isa_sz = 0u;
hsa_isa_get_info_alt(isa, HSA_ISA_INFO_NAME_LENGTH, &isa_sz);
std::string isa_name(isa_sz, '\0');
hsa_isa_get_info_alt(isa, HSA_ISA_INFO_NAME, &isa_name.front());
if (isa_name.find("sram-ecc") != std::string::npos)
prop->ECCEnabled = 1; //Device has ECC support Enabled
else
prop->ECCEnabled = 0; //Device has ECC support disabled
prop->tccDriver = 0; // valid only for nvcc platform
return e;
}
//=================================================================================================
// ihipCtx_t
//=================================================================================================
ihipCtx_t::ihipCtx_t(ihipDevice_t* device, unsigned deviceCnt, unsigned flags)
: _ctxFlags(flags), _device(device), _criticalData(this, deviceCnt) {
// locked_reset();
LockedAccessor_CtxCrit_t crit(_criticalData);
_defaultStream = new ihipStream_t(this, getDevice()->_acc.get_default_view(), hipStreamDefault);
crit->addStream(_defaultStream);
// Reset peer list to just me:
crit->resetPeerWatchers(this);
tprintf(DB_SYNC, "created ctx with defaultStream=%p (%s)\n", _defaultStream,
ToString(_defaultStream).c_str());
};
ihipCtx_t::~ihipCtx_t() {
if (_defaultStream) {
delete _defaultStream;
_defaultStream = NULL;
}
}
// Reset the device - this is called from hipDeviceReset.
// Device may be reset multiple times, and may be reset after init.
void ihipCtx_t::locked_reset() {
// Obtain mutex access to the device critical data, release by destructor
LockedAccessor_CtxCrit_t crit(_criticalData);
//---
// Wait for pending activity to complete? TODO - check if this is required behavior:
tprintf(DB_SYNC, "locked_reset waiting for activity to complete.\n");
// Reset and remove streams:
// Delete all created streams including the default one.
for (auto streamI = crit->const_streams().begin(); streamI != crit->const_streams().end();
streamI++) {
ihipStream_t* stream = *streamI;
(*streamI)->locked_wait();
tprintf(DB_SYNC, " delete %s\n", ToString(stream).c_str());
delete stream;
}
// Clear the list.
crit->streams().clear();
// Create a fresh default stream and add it:
_defaultStream = new ihipStream_t(this, getDevice()->_acc.get_default_view(), hipStreamDefault);
crit->addStream(_defaultStream);
#if 0
// Reset peer list to just me:
crit->resetPeerWatchers(this);
// Reset and release all memory stored in the tracker:
// Reset will remove peer mapping so don't need to do this explicitly.
// FIXME - This is clearly a non-const action! Is this a context reset or a device reset - maybe should reference count?
ihipDevice_t *device = getWriteableDevice();
device->_state = 0;
am_memtracker_reset(device->_acc);
#endif
};
//---
std::string ihipCtx_t::toString() const {
std::ostringstream ss;
ss << this;
return ss.str();
};
//----
//=================================================================================================
// Utility functions, these are not part of the public HIP API
//=================================================================================================
//=================================================================================================
// This called for submissions that are sent to the null/default stream. This routine ensures
// that this new command waits for activity in the other streams to complete before proceeding.
//
// HIP_SYNC_NULL_STREAM=0 does all dependency resolutiokn on the GPU
// HIP_SYNC_NULL_STREAM=1 s legacy non-optimal mode which conservatively waits on host.
//
// If waitOnSelf is set, this additionally waits for the default stream to empty.
// In new HIP_SYNC_NULL_STREAM=0 mode, this enqueues a marker which causes the default stream to
// wait for other activity, but doesn't actually block the host. If host blocking is desired, the
// caller should set syncHost.
//
// syncToHost causes host to wait for the stream to finish.
// Note HIP_SYNC_NULL_STREAM=1 path always sync to Host.
void ihipCtx_t::locked_syncDefaultStream(bool waitOnSelf, bool syncHost) {
LockedAccessor_CtxCrit_t crit(_criticalData);
tprintf(DB_SYNC, "syncDefaultStream \n");
// Vector of ops sent to each stream that will complete before ops sent to null stream:
std::vector<hc::completion_future> depOps;
bool last_stream_waited = false;
for (auto streamI = crit->const_streams().begin(); streamI != crit->const_streams().end();
streamI++) {
ihipStream_t* stream = *streamI;
// Don't wait for streams that have "opted-out" of syncing with NULL stream.
// And - don't wait for the NULL stream, unless waitOnSelf specified.
bool waitThisStream = (!(stream->_flags & hipStreamNonBlocking)) &&
(waitOnSelf || (stream != _defaultStream));
if (HIP_SYNC_NULL_STREAM) {
if (waitThisStream) {
last_stream_waited = false;
stream->locked_wait(last_stream_waited);
}
} else {
if (waitThisStream) {
LockedAccessor_StreamCrit_t streamCrit(stream->criticalData());
// The last marker will provide appropriate visibility:
if (!streamCrit->_av.get_is_empty()) {
depOps.push_back(streamCrit->_av.create_marker(hc::accelerator_scope));
tprintf(DB_SYNC, " push marker to wait for stream=%s\n",
ToString(stream).c_str());
} else {
tprintf(DB_SYNC, " skipped stream=%s since it is empty\n",
ToString(stream).c_str());
}
}
}
}
// Enqueue a barrier to wait on all the barriers we sent above:
if (!HIP_SYNC_NULL_STREAM && !depOps.empty()) {
LockedAccessor_StreamCrit_t defaultStreamCrit(_defaultStream->criticalData());
tprintf(DB_SYNC, " null-stream wait on %zu non-empty streams. sync_host=%d\n",
depOps.size(), syncHost);
hc::completion_future defaultCf = defaultStreamCrit->_av.create_blocking_marker(
depOps.begin(), depOps.end(), hc::accelerator_scope);
if (syncHost) {
defaultCf.wait(); // TODO - account for active or blocking here.
}
}
else if ( (HIP_SYNC_NULL_STREAM && !last_stream_waited) ||
(!HIP_SYNC_NULL_STREAM && depOps.empty()) ) {
// This catches all the conditions where the printf buffer
// need to be explicitly flushed
if (syncHost) {
Kalmar::getContext()->flushPrintfBuffer();
}
}
tprintf(DB_SYNC, " syncDefaultStream depOps=%zu\n", depOps.size());
}
//---
void ihipCtx_t::locked_removeStream(ihipStream_t* s) {
LockedAccessor_CtxCrit_t crit(_criticalData);
crit->streams().remove(s);
}
//---
// Heavyweight synchronization that waits on all streams, ignoring hipStreamNonBlocking flag.
void ihipCtx_t::locked_waitAllStreams() {
LockedAccessor_CtxCrit_t crit(_criticalData);
tprintf(DB_SYNC, "waitAllStream\n");
bool need_printf_flush = false;
for (auto streamI = crit->const_streams().begin(); streamI != crit->const_streams().end();
streamI++) {
bool waited;
(*streamI)->locked_wait(waited);
need_printf_flush = !waited;
}
// When synchronizing with the last stream, if we didn't do an explicit wait,
// then we need to an extra flush to the printf buffer
if (need_printf_flush) {
Kalmar::getContext()->flushPrintfBuffer();
}
}
std::string HIP_DB_string(unsigned db) {
std::string dbStr;
bool first = true;
for (int i = 0; i < DB_MAX_FLAG; i++) {
if (db & (1 << i)) {
if (!first) {
dbStr += "+";
};
dbStr += dbName[i]._color;
dbStr += dbName[i]._shortName;
dbStr += KNRM;
first = false;
};
}
return dbStr;
}
// Callback used to process HIP_DB input, supports either
// integer or flag names separated by +
std::string HIP_DB_callback(void* var_ptr, const char* envVarString) {
int* var_ptr_int = static_cast<int*>(var_ptr);
std::string e(envVarString);
trim(&e);
if (!e.empty() && isdigit(e.c_str()[0])) {
long int v = strtol(envVarString, NULL, 0);
*var_ptr_int = (int)(v);
} else {
*var_ptr_int = 0;
std::vector<std::string> tokens;
tokenize(e, '+', &tokens);
for (auto t = tokens.begin(); t != tokens.end(); t++) {
for (int i = 0; i < DB_MAX_FLAG; i++) {
if (!strcmp(t->c_str(), dbName[i]._shortName)) {
*var_ptr_int |= (1 << i);
} // TODO - else throw error?
}
}
}
return HIP_DB_string(*var_ptr_int);
;
}
// Callback used to process list of visible devices.
std::string HIP_VISIBLE_DEVICES_callback(void* var_ptr, const char* envVarString) {
// Parse the string stream of env and store the device ids to g_hip_visible_devices global
// variable
std::string str = envVarString;
std::istringstream ss(str);
std::string device_id;
// Clean up the defult value
g_hip_visible_devices.clear();
g_visible_device = true;
// Read the visible device numbers
while (std::getline(ss, device_id, ',')) {
if (atoi(device_id.c_str()) >= 0) {
g_hip_visible_devices.push_back(atoi(device_id.c_str()));
} else { // Any device number after invalid number will not present
break;
}
}
std::string valueString;
// Print out the number of ids
for (int i = 0; i < g_hip_visible_devices.size(); i++) {
valueString += std::to_string((g_hip_visible_devices[i]));
valueString += ' ';
}
return valueString;
}
// TODO - change last arg to pointer.
void parseTrigger(std::string triggerString, std::vector<ProfTrigger>& profTriggers) {
std::vector<std::string> tidApiTokens;
tokenize(std::string(triggerString), ',', &tidApiTokens);
for (auto t = tidApiTokens.begin(); t != tidApiTokens.end(); t++) {
std::vector<std::string> oneToken;
// std::cout << "token=" << *t << "\n";
tokenize(std::string(*t), '.', &oneToken);
int tid = 1;
uint64_t apiTrigger = 0;
if (oneToken.size() == 1) {
// the case with just apiNum
apiTrigger = std::strtoull(oneToken[0].c_str(), nullptr, 0);
} else if (oneToken.size() == 2) {
// the case with tid.apiNum
tid = std::strtoul(oneToken[0].c_str(), nullptr, 0);
apiTrigger = std::strtoull(oneToken[1].c_str(), nullptr, 0);
} else {
throw ihipException(hipErrorRuntimeOther); // TODO -> bad env var?
}
if (tid > 10000) {
throw ihipException(hipErrorRuntimeOther); // TODO -> bad env var?
} else {
profTriggers.resize(tid + 1);
// std::cout << "tid:" << tid << " add: " << apiTrigger << "\n";
profTriggers[tid].add(apiTrigger);
}
}
for (int tid = 1; tid < profTriggers.size(); tid++) {
profTriggers[tid].sort();
profTriggers[tid].print(tid);
}
}
void HipReadEnv() {
/*
* Environment variables
*/
g_hip_visible_devices.push_back(0); /* Set the default value of visible devices */
READ_ENV_I(release, HIP_PRINT_ENV, 0, "Print HIP environment variables.");
//-- READ HIP_PRINT_ENV env first, since it has impact on later env var reading
// TODO: In HIP/hcc, this variable blocks after both kernel commmands and data transfer. Maybe
// should be bit-mask for each command type?
READ_ENV_I(release, HIP_LAUNCH_BLOCKING, CUDA_LAUNCH_BLOCKING,
"Make HIP kernel launches 'host-synchronous', so they block until any kernel "
"launches. Alias: CUDA_LAUNCH_BLOCKING.");
READ_ENV_S(release, HIP_LAUNCH_BLOCKING_KERNELS, 0,
"Comma-separated list of kernel names to make host-synchronous, so they block until "
"completed.");
if (!HIP_LAUNCH_BLOCKING_KERNELS.empty()) {
tokenize(HIP_LAUNCH_BLOCKING_KERNELS, ',', &g_hipLaunchBlockingKernels);
}
READ_ENV_I(release, HIP_API_BLOCKING, 0,
"Make HIP APIs 'host-synchronous', so they block until completed. Impacts "
"hipMemcpyAsync, hipMemsetAsync.");
READ_ENV_I(release, HIP_HIDDEN_FREE_MEM, 0,
"Amount of memory to hide from the free memory reported by hipMemGetInfo, specified "
"in MB. Impacts hipMemGetInfo.");
READ_ENV_C(release, HIP_DB, 0,
"Print debug info. Bitmask (HIP_DB=0xff) or flags separated by '+' "
"(HIP_DB=api+sync+mem+copy+fatbin)",
HIP_DB_callback);
if ((HIP_DB & (1 << DB_API)) && (HIP_TRACE_API == 0)) {
// Set HIP_TRACE_API default before we read it, so it is printed correctly.
HIP_TRACE_API = 1;
}
READ_ENV_I(release, HIP_TRACE_API, 0,
"Trace each HIP API call. Print function name and return code to stderr as program "
"executes.");
READ_ENV_S(release, HIP_TRACE_API_COLOR, 0,
"Color to use for HIP_API. None/Red/Green/Yellow/Blue/Magenta/Cyan/White");
READ_ENV_S(release, HIP_DB_START_API, 0,
"Comma-separated list of tid.api_seq_num for when to start debug and profiling.");
READ_ENV_S(release, HIP_DB_STOP_API, 0,
"Comma-separated list of tid.api_seq_num for when to stop debug and profiling.");
READ_ENV_C(release, HIP_VISIBLE_DEVICES, CUDA_VISIBLE_DEVICES,
"Only devices whose index is present in the sequence are visible to HIP "
"applications and they are enumerated in the order of sequence.",
HIP_VISIBLE_DEVICES_callback);
READ_ENV_I(release, HIP_WAIT_MODE, 0,
"Force synchronization mode. 1= force yield, 2=force spin, 0=defaults specified in "
"application");
READ_ENV_I(release, HIP_FORCE_P2P_HOST, 0,
"Force use of host/staging copy for peer-to-peer copies.1=always use copies, "
"2=always return false for hipDeviceCanAccessPeer");
READ_ENV_I(release, HIP_FORCE_SYNC_COPY, 0,
"Force all copies (even hipMemcpyAsync) to use sync copies");
READ_ENV_I(release, HIP_FAIL_SOC, 0,
"Fault on Sub-Optimal-Copy, rather than use a slower but functional implementation. "
" Bit 0x1=Fail on async copy with unpinned memory. Bit 0x2=Fail peer copy rather "
"than use staging buffer copy");
READ_ENV_I(release, HIP_SYNC_HOST_ALLOC, 0,
"Sync before and after all host memory allocations. May help stability");
READ_ENV_I(release, HIP_INIT_ALLOC, 0,
"If not -1, initialize allocated memory to specified byte");
READ_ENV_I(release, HIP_SYNC_NULL_STREAM, 0, "Synchronize on host for null stream submissions");
READ_ENV_I(release, HIP_FORCE_NULL_STREAM, 0,
"Force all stream allocations to secretly return the null stream");
READ_ENV_I(release, HIP_SYNC_STREAM_WAIT, 0, "hipStreamWaitEvent will synchronize to host");
READ_ENV_I(release, HIP_SYNC_FREE, 0,
"Force all calls to hipFree to sync all devices and all streams");
READ_ENV_I(release, HIP_HOST_COHERENT, 0,
"If set, all host memory will be allocated as fine-grained system memory. This "
"allows threadfence_system to work but prevents host memory from being cached on "
"GPU which may have performance impact.");
READ_ENV_I(release, HCC_OPT_FLUSH, 0,
"When set, use agent-scope fence operations rather than system-scope fence "
"operationsflush when possible. This flag controls both HIP and HCC behavior.");
READ_ENV_I(release, HIP_EVENT_SYS_RELEASE, 0,
"If set, event are created with hipEventReleaseToSystem by default. If 0, events "
"are created with hipEventReleaseToDevice by default. The defaults can be "
"overridden by specifying hipEventReleaseToSystem or hipEventReleaseToDevice flag "
"when creating the event.");
READ_ENV_I(release, HIP_DUMP_CODE_OBJECT, 0,
"If set, dump code object as __hip_dump_code_object[nnnn].o in the current directory,"
"where nnnn is the index number.");
// Some flags have both compile-time and runtime flags - generate a warning if user enables the
// runtime flag but the compile-time flag is disabled.
if (HIP_DB && !COMPILE_HIP_DB) {
fprintf(stderr,
"warning: env var HIP_DB=0x%x but COMPILE_HIP_DB=0. (perhaps enable "
"COMPILE_HIP_DB in src code before compiling?)\n",
HIP_DB);
}
if (HIP_TRACE_API && !COMPILE_HIP_TRACE_API) {
fprintf(stderr,
"warning: env var HIP_TRACE_API=0x%x but COMPILE_HIP_TRACE_API=0. (perhaps enable "
"COMPILE_HIP_TRACE_API in src code before compiling?)\n",
HIP_DB);
}
if (HIP_TRACE_API) {
HIP_DB |= 0x1;
}
if (HIP_DB) {
fprintf(stderr, "HIP_DB=0x%x [%s]\n", HIP_DB, HIP_DB_string(HIP_DB).c_str());
}
std::transform(HIP_TRACE_API_COLOR.begin(), HIP_TRACE_API_COLOR.end(),
HIP_TRACE_API_COLOR.begin(), ::tolower);
if (HIP_TRACE_API_COLOR == "none") {
API_COLOR = "";
API_COLOR_END = "";
} else if (HIP_TRACE_API_COLOR == "red") {
API_COLOR = KRED;
} else if (HIP_TRACE_API_COLOR == "green") {
API_COLOR = KGRN;
} else if (HIP_TRACE_API_COLOR == "yellow") {
API_COLOR = KYEL;
} else if (HIP_TRACE_API_COLOR == "blue") {
API_COLOR = KBLU;
} else if (HIP_TRACE_API_COLOR == "magenta") {
API_COLOR = KMAG;
} else if (HIP_TRACE_API_COLOR == "cyan") {
API_COLOR = KCYN;
} else if (HIP_TRACE_API_COLOR == "white") {
API_COLOR = KWHT;
} else {
fprintf(stderr,
"warning: env var HIP_TRACE_API_COLOR=%s must be "
"None/Red/Green/Yellow/Blue/Magenta/Cyan/White",
HIP_TRACE_API_COLOR.c_str());
};
parseTrigger(HIP_DB_START_API, g_dbStartTriggers);
parseTrigger(HIP_DB_STOP_API, g_dbStopTriggers);
};
//---
// Function called one-time at initialization time to construct a table of all GPU devices.
// HIP/CUDA uses integer "deviceIds" - these are indexes into this table.
// AMP maintains a table of accelerators, but some are emulated - ie for debug or CPU.
// This function creates a vector with only the GPU accelerators.
// It is called with C++11 call_once, which provided thread-safety.
void ihipInit() {
HipReadEnv();
/*
* Build a table of valid compute devices.
*/
auto accs = hc::accelerator::get_all();
int deviceCnt = 0;
for (int i = 0; i < accs.size(); i++) {
if (!accs[i].get_is_emulated()) {
deviceCnt++;
}
};
// Make sure the hip visible devices are within the deviceCnt range
for (int i = 0; i < g_hip_visible_devices.size(); i++) {
if ((g_hip_visible_devices[i] >= deviceCnt) ||(g_hip_visible_devices[i] < 0)){
// Make sure any DeviceID after invalid DeviceID will be erased.
g_hip_visible_devices.resize(i);
break;
}
}
hsa_status_t err = hsa_iterate_agents(findCpuAgent, &g_cpu_agent);
if (err != HSA_STATUS_INFO_BREAK) {
// didn't find a CPU.
g_initDeviceFound = false;
return;
}
g_deviceArray = new ihipDevice_t*[deviceCnt];
g_deviceCnt = 0;
if(g_visible_device) {
for (int i = 0; i < g_hip_visible_devices.size(); i++) {
int devIndex = g_hip_visible_devices[i];
if (!accs[devIndex+1].get_is_emulated()) {
g_deviceArray[g_deviceCnt] = new ihipDevice_t(g_deviceCnt, deviceCnt, accs[devIndex+1]);
g_deviceCnt++;
}
}
}else {
for (int i = 0; i < accs.size(); i++) {
if (!accs[i].get_is_emulated()) {
g_deviceArray[g_deviceCnt] = new ihipDevice_t(g_deviceCnt, deviceCnt, accs[i]);
g_deviceCnt++;
}
}
}
g_allAgents = static_cast<hsa_agent_t*>(malloc((g_deviceCnt + 1) * sizeof(hsa_agent_t)));
g_allAgents[0] = g_cpu_agent;
for (int i = 0; i < g_deviceCnt; i++) {
g_allAgents[i + 1] = g_deviceArray[i]->_hsaAgent;
}
g_numLogicalThreads = std::thread::hardware_concurrency();
// If HIP_VISIBLE_DEVICES is not set, make sure all devices are initialized
if (!g_visible_device) {
assert(deviceCnt == g_deviceCnt);
}
tprintf(DB_SYNC, "pid=%u %-30s g_numLogicalThreads=%u\n", getpid(), "<ihipInit>",
g_numLogicalThreads);
g_initDeviceFound = true;
}
namespace hip_impl {
hipError_t hip_init() {
static std::once_flag hip_initialized;
std::call_once(hip_initialized, ihipInit);
if (g_initDeviceFound) {
ihipCtxStackUpdate();
return hipSuccess;
}
else {
return hipErrorInsufficientDriver;
}
}
}
hipError_t ihipStreamSynchronize(TlsData *tls, hipStream_t stream) {
hipError_t e = hipSuccess;
if (stream == hipStreamNull) {
ihipCtx_t* ctx = ihipGetTlsDefaultCtx();
ctx->locked_syncDefaultStream(true /*waitOnSelf*/, true /*syncToHost*/);
} else {
// note this does not synchornize with the NULL stream:
bool waited;
stream->locked_wait(waited);
if (!waited) {
Kalmar::getContext()->flushPrintfBuffer();
}
e = hipSuccess;
}
return e;
}
void ihipStreamCallbackHandler(ihipStreamCallback_t* cb) {
hipError_t e = hipSuccess;
// Synchronize stream
tprintf(DB_SYNC, "ihipStreamCallbackHandler wait on stream %s\n",
ToString(cb->_stream).c_str());
GET_TLS();
e = ihipStreamSynchronize(tls, cb->_stream);
// Call registered callback function
cb->_callback(cb->_stream, e, cb->_userData);
delete cb;
}
//---
// Get the stream to use for a command submission.
//
// If stream==NULL synchronize appropriately with other streams and return the default av for the
// device. If stream is valid, return the AV to use.
hipStream_t ihipSyncAndResolveStream(hipStream_t stream, bool lockAcquired) {
if (stream == hipStreamNull) {
// Submitting to NULL stream, call locked_syncDefaultStream to wait for all other streams:
GET_TLS();
ihipCtx_t* ctx = ihipGetTlsDefaultCtx();
tprintf(DB_SYNC, "ihipSyncAndResolveStream %s wait on default stream\n",
ToString(stream).c_str());
#ifndef HIP_API_PER_THREAD_DEFAULT_STREAM
ctx->locked_syncDefaultStream(false, false);
#endif
return ctx->_defaultStream;
} else {
// Submitting to a "normal" stream, just wait for null stream:
if (!(stream->_flags & hipStreamNonBlocking)) {
if (HIP_SYNC_NULL_STREAM) {
tprintf(DB_SYNC, "ihipSyncAndResolveStream %s host-wait on default stream\n",
ToString(stream).c_str());
stream->getCtx()->_defaultStream->locked_wait();
} else {
ihipStream_t* defaultStream = stream->getCtx()->_defaultStream;
bool needGatherMarker = false; // used to gather together other markers.
hc::completion_future dcf;
{
LockedAccessor_StreamCrit_t defaultStreamCrit(defaultStream->criticalData());
// TODO - could call create_blocking_marker(queue) or uses existing marker.
if (!defaultStreamCrit->_av.get_is_empty()) {
needGatherMarker = true;
tprintf(DB_SYNC, " %s adding marker to default %s for dependency\n",
ToString(stream).c_str(), ToString(defaultStream).c_str());
dcf = defaultStreamCrit->_av.create_marker(hc::accelerator_scope);
} else {
tprintf(DB_SYNC, " %s skipping marker since default stream is empty\n",
ToString(stream).c_str());
}
}
if (needGatherMarker) {
// ensure any commands sent to this stream wait on the NULL stream before
// continuing
if (!lockAcquired) {
LockedAccessor_StreamCrit_t thisStreamCrit(stream->criticalData());
// TODO - could be "noret" version of create_blocking_marker
thisStreamCrit->_av.create_blocking_marker(dcf, hc::accelerator_scope);
} else {
// this stream is already locked (e.g., call from hipExtLaunchMultiKernelMultiDevice)
stream->criticalData()._av.create_blocking_marker(dcf, hc::accelerator_scope);
}
tprintf(
DB_SYNC,
" %s adding marker to wait for freshly recorded default-stream marker \n",
ToString(stream).c_str());
}
}
}
return stream;
}
}
void ihipPrintKernelLaunch(const char* kernelName, const grid_launch_parm* lp,
const hipStream_t stream) {
if ((HIP_TRACE_API & (1 << TRACE_KCMD)) ||
(COMPILE_HIP_DB & HIP_TRACE_API)) {
GET_TLS();
std::stringstream os;
os << tls->tidInfo.pid() << " " << tls->tidInfo.tid() << "." << tls->tidInfo.apiSeqNum() << " hipLaunchKernel '"
<< kernelName << "'"
<< " gridDim:" << lp->grid_dim << " groupDim:" << lp->group_dim << " sharedMem:+"
<< lp->dynamic_group_mem_bytes << " " << *stream;
if (COMPILE_HIP_DB && HIP_TRACE_API) {
std::string fullStr;
recordApiTrace(tls, &fullStr, os.str());
}
}
}
// Called just before a kernel is launched from hipLaunchKernel.
// Allows runtime to track some information about the stream.
hipStream_t ihipPreLaunchKernel(hipStream_t stream, dim3 grid, dim3 block, grid_launch_parm* lp,
const char* kernelNameStr, bool lockAcquired) {
stream = ihipSyncAndResolveStream(stream, lockAcquired);
lp->grid_dim.x = grid.x;
lp->grid_dim.y = grid.y;
lp->grid_dim.z = grid.z;
lp->group_dim.x = block.x;
lp->group_dim.y = block.y;
lp->group_dim.z = block.z;
lp->barrier_bit = barrier_bit_queue_default;
if (!lockAcquired) stream->lockopen_preKernelCommand();
auto &crit = stream->criticalData();
lp->av = &(crit._av);
lp->cf = nullptr;
auto acq = (HCC_OPT_FLUSH && !crit._last_op_was_a_copy) ?
HSA_FENCE_SCOPE_AGENT : HSA_FENCE_SCOPE_SYSTEM;
auto rel = HCC_OPT_FLUSH ?
HSA_FENCE_SCOPE_AGENT : HSA_FENCE_SCOPE_SYSTEM;
lp->launch_fence = (acq << HSA_PACKET_HEADER_SCACQUIRE_FENCE_SCOPE) |
(rel << HSA_PACKET_HEADER_SCRELEASE_FENCE_SCOPE);
crit._last_op_was_a_copy = false;
ihipPrintKernelLaunch(kernelNameStr, lp, stream);
return (stream);
}
hipStream_t ihipPreLaunchKernel(hipStream_t stream, size_t grid, dim3 block, grid_launch_parm* lp,
const char* kernelNameStr, bool lockAcquired) {
return ihipPreLaunchKernel(stream, dim3(grid), block, lp, kernelNameStr, lockAcquired);
}
hipStream_t ihipPreLaunchKernel(hipStream_t stream, dim3 grid, size_t block, grid_launch_parm* lp,
const char* kernelNameStr, bool lockAcquired) {
return ihipPreLaunchKernel(stream, grid, dim3(block), lp, kernelNameStr, lockAcquired);
}
hipStream_t ihipPreLaunchKernel(hipStream_t stream, size_t grid, size_t block, grid_launch_parm* lp,
const char* kernelNameStr, bool lockAcquired) {
return ihipPreLaunchKernel(stream, dim3(grid), dim3(block), lp, kernelNameStr, lockAcquired);
}
//---
// Called after kernel finishes execution.
// This releases the lock on the stream.
void ihipPostLaunchKernel(const char* kernelName, hipStream_t stream, grid_launch_parm& lp, bool unlockPostponed) {
tprintf(DB_SYNC, "ihipPostLaunchKernel, unlocking stream\n");
stream->lockclose_postKernelCommand(kernelName, lp.av, unlockPostponed);
}
//=================================================================================================
// HIP API Implementation
//
// Implementor notes:
// _ All functions should call HIP_INIT_API as first action:
// HIP_INIT_API(<function_arguments>);
//
// - ALl functions should use ihipLogStatus to return error code (not return error directly).
//=================================================================================================
//
//---
//-------------------------------------------------------------------------------------------------
const char* ihipErrorString(hipError_t hip_error) {
switch (hip_error) {
case hipSuccess:
return "hipSuccess";
case hipErrorOutOfMemory:
return "hipErrorOutOfMemory";
case hipErrorNotInitialized:
return "hipErrorNotInitialized";
case hipErrorDeinitialized:
return "hipErrorDeinitialized";
case hipErrorProfilerDisabled:
return "hipErrorProfilerDisabled";
case hipErrorProfilerNotInitialized:
return "hipErrorProfilerNotInitialized";
case hipErrorProfilerAlreadyStarted:
return "hipErrorProfilerAlreadyStarted";
case hipErrorProfilerAlreadyStopped:
return "hipErrorProfilerAlreadyStopped";
case hipErrorInsufficientDriver:
return "hipErrorInsufficientDriver";
case hipErrorInvalidImage:
return "hipErrorInvalidImage";
case hipErrorInvalidContext:
return "hipErrorInvalidContext";
case hipErrorContextAlreadyCurrent:
return "hipErrorContextAlreadyCurrent";
case hipErrorMapFailed:
return "hipErrorMapFailed";
case hipErrorUnmapFailed:
return "hipErrorUnmapFailed";
case hipErrorArrayIsMapped:
return "hipErrorArrayIsMapped";
case hipErrorAlreadyMapped:
return "hipErrorAlreadyMapped";
case hipErrorNoBinaryForGpu:
return "hipErrorNoBinaryForGpu";
case hipErrorAlreadyAcquired:
return "hipErrorAlreadyAcquired";
case hipErrorNotMapped:
return "hipErrorNotMapped";
case hipErrorNotMappedAsArray:
return "hipErrorNotMappedAsArray";
case hipErrorNotMappedAsPointer:
return "hipErrorNotMappedAsPointer";
case hipErrorECCNotCorrectable:
return "hipErrorECCNotCorrectable";
case hipErrorUnsupportedLimit:
return "hipErrorUnsupportedLimit";
case hipErrorContextAlreadyInUse:
return "hipErrorContextAlreadyInUse";
case hipErrorPeerAccessUnsupported:
return "hipErrorPeerAccessUnsupported";
case hipErrorInvalidKernelFile:
return "hipErrorInvalidKernelFile";
case hipErrorInvalidGraphicsContext:
return "hipErrorInvalidGraphicsContext";
case hipErrorInvalidSource:
return "hipErrorInvalidSource";
case hipErrorFileNotFound:
return "hipErrorFileNotFound";
case hipErrorSharedObjectSymbolNotFound:
return "hipErrorSharedObjectSymbolNotFound";
case hipErrorSharedObjectInitFailed:
return "hipErrorSharedObjectInitFailed";
case hipErrorOperatingSystem:
return "hipErrorOperatingSystem";
case hipErrorSetOnActiveProcess:
return "hipErrorSetOnActiveProcess";
case hipErrorInvalidHandle:
return "hipErrorInvalidHandle";
case hipErrorNotFound:
return "hipErrorNotFound";
case hipErrorIllegalAddress:
return "hipErrorIllegalAddress";
case hipErrorInvalidSymbol:
return "hipErrorInvalidSymbol";
case hipErrorMissingConfiguration:
return "hipErrorMissingConfiguration";
case hipErrorLaunchFailure:
return "hipErrorLaunchFailure";
case hipErrorCooperativeLaunchTooLarge:
return "hipErrorCooperativeLaunchTooLarge";
case hipErrorPriorLaunchFailure:
return "hipErrorPriorLaunchFailure";
case hipErrorLaunchTimeOut:
return "hipErrorLaunchTimeOut";
case hipErrorLaunchOutOfResources:
return "hipErrorLaunchOutOfResources";
case hipErrorInvalidDeviceFunction:
return "hipErrorInvalidDeviceFunction";
case hipErrorInvalidConfiguration:
return "hipErrorInvalidConfiguration";
case hipErrorInvalidDevice:
return "hipErrorInvalidDevice";
case hipErrorInvalidValue:
return "hipErrorInvalidValue";
case hipErrorInvalidDevicePointer:
return "hipErrorInvalidDevicePointer";
case hipErrorInvalidMemcpyDirection:
return "hipErrorInvalidMemcpyDirection";
case hipErrorUnknown:
return "hipErrorUnknown";
case hipErrorNotReady:
return "hipErrorNotReady";
case hipErrorNoDevice:
return "hipErrorNoDevice";
case hipErrorPeerAccessAlreadyEnabled:
return "hipErrorPeerAccessAlreadyEnabled";
case hipErrorPeerAccessNotEnabled:
return "hipErrorPeerAccessNotEnabled";
case hipErrorRuntimeMemory:
return "hipErrorRuntimeMemory";
case hipErrorRuntimeOther:
return "hipErrorRuntimeOther";
case hipErrorHostMemoryAlreadyRegistered:
return "hipErrorHostMemoryAlreadyRegistered";
case hipErrorHostMemoryNotRegistered:
return "hipErrorHostMemoryNotRegistered";
case hipErrorAssert:
return "hipErrorAssert";
case hipErrorNotSupported:
return "hipErrorNotSupported";
case hipErrorTbd:
return "hipErrorTbd";
default:
return "hipErrorUnknown";
};
};
// Returns true if copyEngineCtx can see the memory allocated on dstCtx and srcCtx.
// The peer-list for a context controls which contexts have access to the memory allocated on that
// context. So we check dstCtx's and srcCtx's peerList to see if the both include thisCtx.
bool ihipStream_t::canSeeMemory(const ihipCtx_t* copyEngineCtx, const hc::AmPointerInfo* dstPtrInfo,
const hc::AmPointerInfo* srcPtrInfo) {
if (copyEngineCtx == nullptr) {
return false;
}
// Make sure this is a device-to-device copy with all memory available to the requested copy
// engine
//
// TODO - pointer-info stores a deviceID not a context,may have some unusual side-effects here:
if (dstPtrInfo->_sizeBytes == 0) {
return false;
} else if (dstPtrInfo->_appId != -1) {
#if USE_APP_PTR_FOR_CTX
ihipCtx_t* dstCtx = static_cast<ihipCtx_t*>(dstPtrInfo->_appPtr);
#else
ihipCtx_t* dstCtx = ihipGetPrimaryCtx(dstPtrInfo->_appId);
#endif
if (copyEngineCtx != dstCtx) {
// Only checks peer list if contexts are different
LockedAccessor_CtxCrit_t ctxCrit(dstCtx->criticalData());
#if DB_PEER_CTX
std::cerr << "checking peer : copyEngineCtx =" << copyEngineCtx << " dstCtx =" << dstCtx
<< " peerCnt=" << ctxCrit->peerCnt() << "\n";
#endif
if (!ctxCrit->isPeerWatcher(copyEngineCtx)) {
return false;
};
}
}
// TODO - pointer-info stores a deviceID not a context,may have some unusual side-effects here:
if (srcPtrInfo->_sizeBytes == 0) {
return false;
} else if (srcPtrInfo->_appId != -1) {
#if USE_APP_PTR_FOR_CTX
ihipCtx_t* srcCtx = static_cast<ihipCtx_t*>(srcPtrInfo->_appPtr);
#else
ihipCtx_t* srcCtx = ihipGetPrimaryCtx(srcPtrInfo->_appId);
#endif
if (copyEngineCtx != srcCtx) {
// Only checks peer list if contexts are different
LockedAccessor_CtxCrit_t ctxCrit(srcCtx->criticalData());
#if DB_PEER_CTX
std::cerr << "checking peer : copyEngineCtx =" << copyEngineCtx << " srcCtx =" << srcCtx
<< " peerCnt=" << ctxCrit->peerCnt() << "\n";
#endif
if (!ctxCrit->isPeerWatcher(copyEngineCtx)) {
return false;
};
}
}
return true;
};
#define CASE_STRING(X) \
case X: \
return #X; \
break;
const char* hipMemcpyStr(unsigned memKind) {
switch (memKind) {
CASE_STRING(hipMemcpyHostToHost);
CASE_STRING(hipMemcpyHostToDevice);
CASE_STRING(hipMemcpyDeviceToHost);
CASE_STRING(hipMemcpyDeviceToDevice);
CASE_STRING(hipMemcpyDefault);
default:
return ("unknown memcpyKind");
};
}
const char* hcMemcpyStr(hc::hcCommandKind memKind) {
using namespace hc;
switch (memKind) {
CASE_STRING(hcMemcpyHostToHost);
CASE_STRING(hcMemcpyHostToDevice);
CASE_STRING(hcMemcpyDeviceToHost);
CASE_STRING(hcMemcpyDeviceToDevice);
// CASE_STRING(hcMemcpyDefault);
default:
return ("unknown memcpyKind");
};
}
// Resolve hipMemcpyDefault to a known type.
unsigned ihipStream_t::resolveMemcpyDirection(bool srcInDeviceMem, bool dstInDeviceMem) {
hipMemcpyKind kind = hipMemcpyDefault;
if (srcInDeviceMem && dstInDeviceMem) {
kind = hipMemcpyDeviceToDevice;
}
if (srcInDeviceMem && !dstInDeviceMem) {
kind = hipMemcpyDeviceToHost;
}
if (!srcInDeviceMem && !dstInDeviceMem) {
kind = hipMemcpyHostToHost;
}
if (!srcInDeviceMem && dstInDeviceMem) {
kind = hipMemcpyHostToDevice;
}
assert(kind != hipMemcpyDefault);
return kind;
}
// hipMemKind must be "resolved" to a specific direction - cannot be default.
void ihipStream_t::resolveHcMemcpyDirection(unsigned hipMemKind,
const hc::AmPointerInfo* dstPtrInfo,
const hc::AmPointerInfo* srcPtrInfo,
hc::hcCommandKind* hcCopyDir, ihipCtx_t** copyDevice,
bool* forceUnpinnedCopy) {
// Ignore what the user tells us and always resolve the direction:
// Some apps apparently rely on this.
hipMemKind = resolveMemcpyDirection(srcPtrInfo->_isInDeviceMem, dstPtrInfo->_isInDeviceMem);
switch (hipMemKind) {
case hipMemcpyHostToHost:
*hcCopyDir = hc::hcMemcpyHostToHost;
break;
case hipMemcpyHostToDevice:
*hcCopyDir = hc::hcMemcpyHostToDevice;
break;
case hipMemcpyDeviceToHost:
*hcCopyDir = hc::hcMemcpyDeviceToHost;
break;
case hipMemcpyDeviceToDevice:
*hcCopyDir = hc::hcMemcpyDeviceToDevice;
break;
default:
throw ihipException(hipErrorRuntimeOther);
};
if (srcPtrInfo->_isInDeviceMem) {
*copyDevice = ihipGetPrimaryCtx(srcPtrInfo->_appId);
} else if (dstPtrInfo->_isInDeviceMem) {
*copyDevice = ihipGetPrimaryCtx(dstPtrInfo->_appId);
} else {
*copyDevice = nullptr;
}
*forceUnpinnedCopy = false;
if (canSeeMemory(*copyDevice, dstPtrInfo, srcPtrInfo)) {
if (HIP_FORCE_P2P_HOST & 0x1) {
*forceUnpinnedCopy = true;
tprintf(DB_COPY,
"Copy engine (dev:%d agent=0x%lx) can see src and dst but "
"HIP_FORCE_P2P_HOST=0, forcing copy through staging buffers.\n",
*copyDevice ? (*copyDevice)->getDeviceNum() : -1,
*copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0);
} else {
tprintf(DB_COPY, "Copy engine (dev:%d agent=0x%lx) can see src and dst.\n",
*copyDevice ? (*copyDevice)->getDeviceNum() : -1,
*copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0);
}
} else {
*forceUnpinnedCopy = true;
tprintf(DB_COPY,
"Copy engine(dev:%d agent=0x%lx) cannot see both host and device pointers - "
"forcing copy with unpinned engine.\n",
*copyDevice ? (*copyDevice)->getDeviceNum() : -1,
*copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0);
if (HIP_FAIL_SOC & 0x2) {
fprintf(stderr,
"HIP_FAIL_SOC: P2P: copy engine(dev:%d agent=0x%lx) cannot see both host and "
"device pointers - forcing copy with unpinned engine.\n",
*copyDevice ? (*copyDevice)->getDeviceNum() : -1,
*copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0);
throw ihipException(hipErrorRuntimeOther);
}
}
}
void printPointerInfo(unsigned dbFlag, const char* tag, const void* ptr,
const hc::AmPointerInfo& ptrInfo) {
tprintf(dbFlag,
" %s=%p baseHost=%p baseDev=%p sz=%zu home_dev=%d tracked=%d isDevMem=%d "
"registered=%d allocSeqNum=%zu, appAllocationFlags=%x, appPtr=%p\n",
tag, ptr, ptrInfo._hostPointer, ptrInfo._devicePointer, ptrInfo._sizeBytes,
ptrInfo._appId, ptrInfo._sizeBytes != 0, ptrInfo._isInDeviceMem, !ptrInfo._isAmManaged,
ptrInfo._allocSeqNum, ptrInfo._appAllocationFlags, ptrInfo._appPtr);
}
// the pointer-info as returned by HC refers to the allocation
// This routine modifies the pointer-info so it appears to refer to the specific ptr and sizeBytes.
// TODO -remove this when HCC uses HSA pointer info functions directly.
void tailorPtrInfo(hc::AmPointerInfo* ptrInfo, const void* ptr, size_t sizeBytes) {
const char* ptrc = static_cast<const char*>(ptr);
if (ptrInfo->_sizeBytes == 0) {
// invalid ptrInfo, don't modify
return;
} else if (ptrInfo->_isInDeviceMem) {
assert(ptrInfo->_devicePointer != nullptr);
std::ptrdiff_t diff = ptrc - static_cast<const char*>(ptrInfo->_devicePointer);
// TODO : assert-> runtime assert that only appears in debug mode
assert(diff >= 0);
assert(diff <= ptrInfo->_sizeBytes);
ptrInfo->_devicePointer = const_cast<void*>(ptr);
if (ptrInfo->_hostPointer != nullptr) {
ptrInfo->_hostPointer = static_cast<char*>(ptrInfo->_hostPointer) + diff;
}
} else {
assert(ptrInfo->_hostPointer != nullptr);
std::ptrdiff_t diff = ptrc - static_cast<const char*>(ptrInfo->_hostPointer);
// TODO : assert-> runtime assert that only appears in debug mode
assert(diff >= 0);
assert(diff <= ptrInfo->_sizeBytes);
ptrInfo->_hostPointer = const_cast<void*>(ptr);
if (ptrInfo->_devicePointer != nullptr) {
ptrInfo->_devicePointer = static_cast<char*>(ptrInfo->_devicePointer) + diff;
}
}
assert(sizeBytes <= ptrInfo->_sizeBytes);
ptrInfo->_sizeBytes = sizeBytes;
};
bool getTailoredPtrInfo(const char* tag, hc::AmPointerInfo* ptrInfo, const void* ptr,
size_t sizeBytes) {
bool tracked = (hc::am_memtracker_getinfo(ptrInfo, ptr) == AM_SUCCESS);
printPointerInfo(DB_COPY, tag, ptr, *ptrInfo);
if (tracked) {
tailorPtrInfo(ptrInfo, ptr, sizeBytes);
printPointerInfo(DB_COPY, " mod", ptr, *ptrInfo);
}
return tracked;
};
// TODO : For registered and host memory, if the portable flag is set, we need to recognize that and
// perform appropriate copy operation. What can happen now is that Portable memory is mapped into
// multiple devices but Peer access is not enabled. i The peer detection logic doesn't see that the
// memory is already mapped and so tries to use an unpinned copy algorithm. If this is PinInPlace,
// then an error can occur. Need to track Portable flag correctly or use new RT functionality to
// query the peer status for the pointer.
//
// TODO - remove kind parm from here or use it below?
void ihipStream_t::locked_copySync(void* dst, const void* src, size_t sizeBytes, unsigned kind,
bool resolveOn) {
ihipCtx_t* ctx = this->getCtx();
const ihipDevice_t* device = ctx->getDevice();
if (device == NULL) {
throw ihipException(hipErrorInvalidDevice);
}
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes);
bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes);
// Some code in HCC and in printPointerInfo uses _sizeBytes==0 as an indication ptr is not
// valid, so check it here:
if (!dstTracked) {
assert(dstPtrInfo._sizeBytes == 0);
}
if (!srcTracked) {
assert(srcPtrInfo._sizeBytes == 0);
}
hc::hcCommandKind hcCopyDir;
ihipCtx_t* copyDevice;
bool forceUnpinnedCopy;
resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, ©Device,
&forceUnpinnedCopy);
{
LockedAccessor_StreamCrit_t crit(_criticalData);
tprintf(DB_COPY,
"copySync copyDev:%d dst=%p (phys_dev:%d, isDevMem:%d) src=%p(phys_dev:%d, "
"isDevMem:%d) sz=%zu dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, dst, dstPtrInfo._appId,
dstPtrInfo._isInDeviceMem, src, srcPtrInfo._appId, srcPtrInfo._isInDeviceMem,
sizeBytes, hcMemcpyStr(hcCopyDir), forceUnpinnedCopy);
printPointerInfo(DB_COPY, " dst", dst, dstPtrInfo);
printPointerInfo(DB_COPY, " src", src, srcPtrInfo);
crit->_av.copy_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy);
}
}
bool ihipStream_t::locked_copy2DSync(void* dst, const void* src, size_t width, size_t height, size_t srcPitch, size_t dstPitch, unsigned kind,
bool resolveOn) {
bool retStatus = true;
ihipCtx_t* ctx = this->getCtx();
const ihipDevice_t* device = ctx->getDevice();
if (device == NULL) {
throw ihipException(hipErrorInvalidDevice);
}
size_t sizeBytes = width*height;
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes);
bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes);
// Some code in HCC and in printPointerInfo uses _sizeBytes==0 as an indication ptr is not
// // valid, so check it here:
if (!dstTracked) {
assert(dstPtrInfo._sizeBytes == 0);
}
if (!srcTracked) {
assert(srcPtrInfo._sizeBytes == 0);
}
hc::hcCommandKind hcCopyDir;
ihipCtx_t* copyDevice;
bool forceUnpinnedCopy;
resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, ©Device,
&forceUnpinnedCopy);
{
LockedAccessor_StreamCrit_t crit(_criticalData);
tprintf(DB_COPY,
"copy2DSync copyDev:%d dst=%p (phys_dev:%d, isDevMem:%d) src=%p(phys_dev:%d, "
"isDevMem:%d) sz=%zu dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, dst, dstPtrInfo._appId,
dstPtrInfo._isInDeviceMem, src, srcPtrInfo._appId, srcPtrInfo._isInDeviceMem,
sizeBytes, hcMemcpyStr(hcCopyDir), forceUnpinnedCopy);
printPointerInfo(DB_COPY, " dst", dst, dstPtrInfo);
printPointerInfo(DB_COPY, " src", src, srcPtrInfo);
#if (__hcc_workweek__ >= 19101)
if(!crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy)) {
tprintf(DB_COPY,"locked_copy2DSync failed to use SDMA\n");
retStatus = false;
}
#else
crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy);
#endif
}
return retStatus;
}
void ihipStream_t::addSymbolPtrToTracker(hc::accelerator& acc, void* ptr, size_t sizeBytes) {
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo ptrInfo(NULL, ptr, ptr, sizeBytes, acc, true, false);
#else
hc::AmPointerInfo ptrInfo(NULL, ptr, sizeBytes, acc, true, false);
#endif
hc::am_memtracker_add(ptr, ptrInfo);
}
void ihipStream_t::lockedSymbolCopySync(hc::accelerator& acc, void* dst, void* src,
size_t sizeBytes, size_t offset, unsigned kind) {
if (kind == hipMemcpyHostToHost) {
acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset, Kalmar::hcMemcpyHostToHost);
}
if (kind == hipMemcpyHostToDevice) {
acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset);
}
if (kind == hipMemcpyDeviceToDevice) {
acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset, Kalmar::hcMemcpyDeviceToDevice);
}
if (kind == hipMemcpyDeviceToHost) {
acc.memcpy_symbol((void*)src, (void*)dst, sizeBytes, offset, Kalmar::hcMemcpyDeviceToHost);
}
}
void ihipStream_t::lockedSymbolCopyAsync(hc::accelerator& acc, void* dst, void* src,
size_t sizeBytes, size_t offset, unsigned kind) {
// TODO - review - this looks broken , should not be adding pointers to tracker dynamically:
if (kind == hipMemcpyHostToDevice) {
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
bool srcTracked = (hc::am_memtracker_getinfo(&srcPtrInfo, src) == AM_SUCCESS);
if (srcTracked) {
addSymbolPtrToTracker(acc, dst, sizeBytes);
locked_getAv()->copy_async((void*)src, dst, sizeBytes);
} else {
LockedAccessor_StreamCrit_t crit(_criticalData);
this->wait(crit);
acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset);
}
}
if (kind == hipMemcpyDeviceToHost) {
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
bool dstTracked = (hc::am_memtracker_getinfo(&dstPtrInfo, dst) == AM_SUCCESS);
if (dstTracked) {
addSymbolPtrToTracker(acc, src, sizeBytes);
locked_getAv()->copy_async((void*)src, dst, sizeBytes);
} else {
LockedAccessor_StreamCrit_t crit(_criticalData);
this->wait(crit);
acc.memcpy_symbol((void*)src, (void*)dst, sizeBytes, offset,
Kalmar::hcMemcpyDeviceToHost);
}
}
}
void ihipStream_t::locked_copyAsync(void* dst, const void* src, size_t sizeBytes, unsigned kind) {
const ihipCtx_t* ctx = this->getCtx();
if ((ctx == nullptr) || (ctx->getDevice() == nullptr)) {
tprintf(DB_COPY, "locked_copyAsync bad ctx or device\n");
throw ihipException(hipErrorInvalidDevice);
}
if (kind == hipMemcpyHostToHost) {
tprintf(DB_COPY, "locked_copyAsync: H2H with memcpy");
// TODO - consider if we want to perhaps use the GPU SDMA engines anyway, to avoid the
// host-side sync here and keep everything flowing on the GPU.
/* As this is a CPU op, we need to wait until all
the commands in current stream are finished.
*/
LockedAccessor_StreamCrit_t crit(_criticalData);
this->wait(crit);
memcpy(dst, src, sizeBytes);
} else {
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
tprintf(DB_COPY, "copyASync dst=%p src=%p, sz=%zu\n", dst, src, sizeBytes);
bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes);
bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes);
hc::hcCommandKind hcCopyDir;
ihipCtx_t* copyDevice;
bool forceUnpinnedCopy;
resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, ©Device,
&forceUnpinnedCopy);
tprintf(DB_COPY, " copyDev:%d dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, hcMemcpyStr(hcCopyDir),
forceUnpinnedCopy);
// "tracked" really indicates if the pointer's virtual address is available in the GPU
// address space. If both pointers are not tracked, we need to fall back to a sync copy.
if (dstTracked && srcTracked && !forceUnpinnedCopy &&
copyDevice /*code below assumes this is !nullptr*/) {
LockedAccessor_StreamCrit_t crit(_criticalData);
// Perform fast asynchronous copy - we know copyDevice != NULL based on check above
try {
if (HIP_FORCE_SYNC_COPY) {
crit->_av.copy_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc, forceUnpinnedCopy);
} else {
crit->_av.copy_async_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc);
}
} catch (Kalmar::runtime_exception) {
throw ihipException(hipErrorRuntimeOther);
};
if (HIP_API_BLOCKING) {
tprintf(DB_SYNC, "%s LAUNCH_BLOCKING for completion of hipMemcpyAsync(sz=%zu)\n",
ToString(this).c_str(), sizeBytes);
this->wait(crit);
}
} else {
if (HIP_FAIL_SOC & 0x1) {
fprintf(stderr,
"HIP_FAIL_SOC failed, async_copy requested but could not be completed "
"since src or dst not accesible to copy agent\n");
fprintf(stderr,
"copyASync copyDev:%d dst=%p (phys_dev:%d, isDevMem:%d) "
"src=%p(phys_dev:%d, isDevMem:%d) sz=%zu dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, dst, dstPtrInfo._appId,
dstPtrInfo._isInDeviceMem, src, srcPtrInfo._appId,
srcPtrInfo._isInDeviceMem, sizeBytes, hcMemcpyStr(hcCopyDir),
forceUnpinnedCopy);
fprintf(
stderr,
" dst=%p baseHost=%p baseDev=%p sz=%zu home_dev=%d tracked=%d isDevMem=%d\n",
dst, dstPtrInfo._hostPointer, dstPtrInfo._devicePointer, dstPtrInfo._sizeBytes,
dstPtrInfo._appId, dstTracked, dstPtrInfo._isInDeviceMem);
fprintf(
stderr,
" src=%p baseHost=%p baseDev=%p sz=%zu home_dev=%d tracked=%d isDevMem=%d\n",
src, srcPtrInfo._hostPointer, srcPtrInfo._devicePointer, srcPtrInfo._sizeBytes,
srcPtrInfo._appId, srcTracked, srcPtrInfo._isInDeviceMem);
throw ihipException(hipErrorRuntimeOther);
}
// Perform slow synchronous copy:
LockedAccessor_StreamCrit_t crit(_criticalData);
crit->_av.copy_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy);
}
}
}
bool ihipStream_t::locked_copy2DAsync(void* dst, const void* src, size_t width, size_t height, size_t srcPitch, size_t dstPitch, unsigned kind)
{
bool retStatus = true;
const ihipCtx_t* ctx = this->getCtx();
if ((ctx == nullptr) || (ctx->getDevice() == nullptr)) {
tprintf(DB_COPY, "locked_copy2DAsync bad ctx or device\n");
throw ihipException(hipErrorInvalidDevice);
}
hc::accelerator acc;
size_t sizeBytes = width*height;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
tprintf(DB_COPY, "copy2DAsync dst=%p src=%p, sz=%zu\n", dst, src, sizeBytes);
bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes);
bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes);
hc::hcCommandKind hcCopyDir;
ihipCtx_t* copyDevice;
bool forceUnpinnedCopy;
resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, ©Device,
&forceUnpinnedCopy);
tprintf(DB_COPY, " copyDev:%d dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, hcMemcpyStr(hcCopyDir),
forceUnpinnedCopy);
if (dstTracked && srcTracked && !forceUnpinnedCopy &&
copyDevice /*code below assumes this is !nullptr*/) {
LockedAccessor_StreamCrit_t crit(_criticalData);
try {
if (HIP_FORCE_SYNC_COPY) {
#if (__hcc_workweek__ >= 19101)
if(!crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc,
forceUnpinnedCopy)){
tprintf(DB_COPY,"locked_copy2DASync with HIP_FORCE_SYNC_COPY failed to use SDMA\n");
retStatus = false;
}
#else
crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc,
forceUnpinnedCopy);
#endif
} else {
const auto& future = crit->_av.copy2d_async_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc);
if(!future.valid()) {
tprintf(DB_COPY, "locked_copy2DAsync failed to use SDMA\n");
retStatus = false;
}
}
} catch (Kalmar::runtime_exception) {
throw ihipException(hipErrorRuntimeOther);
};
if (HIP_API_BLOCKING) {
tprintf(DB_SYNC, "%s LAUNCH_BLOCKING for completion of hipMemcpy2DAsync(sz=%zu)\n",
ToString(this).c_str(), sizeBytes);
this->wait(crit);
}
} else {
//Do sync 2D copy
LockedAccessor_StreamCrit_t crit(_criticalData);
#if (__hcc_workweek__ >= 19101)
if(!crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy)){
tprintf(DB_COPY, "locked_copy2DAsync Sync copy failed to use SDMA\n");
retStatus = false;
}
#else
crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy);
#endif
}
return retStatus;
}
hipError_t hipProfilerStart() {
HIP_INIT_API(hipProfilerStart);
return ihipLogStatus(hipSuccess);
};
hipError_t hipProfilerStop() {
HIP_INIT_API(hipProfilerStop);
return ihipLogStatus(hipSuccess);
};
//-------------------------------------------------------------------------------------------------
//-------------------------------------------------------------------------------------------------
// HCC-specific accessor functions:
//---
hipError_t hipHccGetAccelerator(int deviceId, hc::accelerator* acc) {
HIP_INIT_API(hipHccGetAccelerator, deviceId, acc);
const ihipDevice_t* device = ihipGetDevice(deviceId);
hipError_t err;
if (device == NULL) {
err = hipErrorInvalidDevice;
} else {
*acc = device->_acc;
err = hipSuccess;
}
return ihipLogStatus(err);
}
//---
hipError_t hipHccGetAcceleratorView(hipStream_t stream, hc::accelerator_view** av) {
HIP_INIT_API(hipHccGetAcceleratorView, stream, av);
if (stream == hipStreamNull) {
ihipCtx_t* device = ihipGetTlsDefaultCtx();
stream = device->_defaultStream;
}
*av = stream->locked_getAv(); // TODO - review.
hipError_t err = hipSuccess;
return ihipLogStatus(err);
}
//// TODO - add identifier numbers for streams and devices to help with debugging.
// TODO - add a contect sequence number for debug. Print operator<< ctx:0.1 (device.ctx)
namespace hip_impl {
std::unordered_set<std::string>& get_all_gpuarch() {
static std::unordered_set<std::string> r{};
static std::once_flag init;
std::call_once(init, []() {
for (int i=0; i < g_deviceCnt; i++){
r.insert("hcc-amdgcn-amd-amdhsa--gfx"+std::to_string(g_deviceArray[i]->_props.gcnArch));
}});
return r;
}
std::vector<hsa_agent_t> all_hsa_agents() {
std::vector<hsa_agent_t> r{};
std::vector<hc::accelerator> visible_accelerators;
for (int i=0; i < g_deviceCnt; i++)
visible_accelerators.push_back(g_deviceArray[i]->_acc);
for (auto&& acc : visible_accelerators) {
const auto agent = acc.get_hsa_agent();
if (!agent || !acc.is_hsa_accelerator()) continue;
r.emplace_back(*static_cast<hsa_agent_t*>(agent));
}
return r;
}
[[noreturn]]
void hip_throw(const std::exception& ex) {
#if defined(__cpp_exceptions)
if (auto rte = dynamic_cast<const std::runtime_error*>(&ex)) throw *rte;
if (auto lge = dynamic_cast<const std::logic_error*>(&ex)) throw *lge;
throw ex;
#else
std::cerr << ex.what() << std::endl;
std::terminate();
#endif
}
} // Namespace hip_impl.
| 1 | 9,053 | Formatting here and all below: missing space before open bracket for the compound statement. | ROCm-Developer-Tools-HIP | cpp |
@@ -44,7 +44,6 @@ import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.mortbay.util.ajax.JSON;
import static org.apache.solr.security.JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.AUTZ_HEADER_PROBLEM;
import static org.apache.solr.security.JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.NO_AUTZ_HEADER; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.security;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.Principal;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.Base64;
import org.apache.solr.common.util.Utils;
import org.jose4j.jwk.RsaJsonWebKey;
import org.jose4j.jwk.RsaJwkGenerator;
import org.jose4j.jws.AlgorithmIdentifiers;
import org.jose4j.jws.JsonWebSignature;
import org.jose4j.jwt.JwtClaims;
import org.jose4j.keys.BigEndianBigInteger;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
import static org.apache.solr.security.JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.AUTZ_HEADER_PROBLEM;
import static org.apache.solr.security.JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.NO_AUTZ_HEADER;
import static org.apache.solr.security.JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.SCOPE_MISSING;
public class JWTAuthPluginTest extends SolrTestCaseJ4 {
private static String testHeader;
private static String slimHeader;
private JWTAuthPlugin plugin;
private HashMap<String, Object> testJwk;
private static RsaJsonWebKey rsaJsonWebKey;
private HashMap<String, Object> testConfig;
private HashMap<String, Object> minimalConfig;
@BeforeClass
public static void beforeAll() throws Exception {
// Generate an RSA key pair, which will be used for signing and verification of the JWT, wrapped in a JWK
rsaJsonWebKey = RsaJwkGenerator.generateJwk(2048);
rsaJsonWebKey.setKeyId("k1");
JwtClaims claims = generateClaims();
JsonWebSignature jws = new JsonWebSignature();
jws.setPayload(claims.toJson());
jws.setKey(rsaJsonWebKey.getPrivateKey());
jws.setKeyIdHeaderValue(rsaJsonWebKey.getKeyId());
jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256);
String testJwt = jws.getCompactSerialization();
testHeader = "Bearer" + " " + testJwt;
claims.unsetClaim("iss");
claims.unsetClaim("aud");
claims.unsetClaim("exp");
jws.setPayload(claims.toJson());
String slimJwt = jws.getCompactSerialization();
slimHeader = "Bearer" + " " + slimJwt;
}
static JwtClaims generateClaims() {
JwtClaims claims = new JwtClaims();
claims.setIssuer("IDServer"); // who creates the token and signs it
claims.setAudience("Solr"); // to whom the token is intended to be sent
claims.setExpirationTimeMinutesInTheFuture(10); // time when the token will expire (10 minutes from now)
claims.setGeneratedJwtId(); // a unique identifier for the token
claims.setIssuedAtToNow(); // when the token was issued/created (now)
claims.setNotBeforeMinutesInThePast(2); // time before which the token is not yet valid (2 minutes ago)
claims.setSubject("solruser"); // the subject/principal is whom the token is about
claims.setStringClaim("scope", "solr:read");
claims.setClaim("name", "Solr User"); // additional claims/attributes about the subject can be added
claims.setClaim("customPrincipal", "custom"); // additional claims/attributes about the subject can be added
claims.setClaim("claim1", "foo"); // additional claims/attributes about the subject can be added
claims.setClaim("claim2", "bar"); // additional claims/attributes about the subject can be added
claims.setClaim("claim3", "foo"); // additional claims/attributes about the subject can be added
List<String> groups = Arrays.asList("group-one", "other-group", "group-three");
claims.setStringListClaim("groups", groups); // multi-valued claims work too and will end up as a JSON array
return claims;
}
@Before
public void setUp() throws Exception {
super.setUp();
// Create an auth plugin
plugin = new JWTAuthPlugin();
// Create a JWK config for security.json
testJwk = new HashMap<>();
testJwk.put("kty", rsaJsonWebKey.getKeyType());
testJwk.put("e", BigEndianBigInteger.toBase64Url(rsaJsonWebKey.getRsaPublicKey().getPublicExponent()));
testJwk.put("use", rsaJsonWebKey.getUse());
testJwk.put("kid", rsaJsonWebKey.getKeyId());
testJwk.put("alg", rsaJsonWebKey.getAlgorithm());
testJwk.put("n", BigEndianBigInteger.toBase64Url(rsaJsonWebKey.getRsaPublicKey().getModulus()));
testConfig = new HashMap<>();
testConfig.put("class", "org.apache.solr.security.JWTAuthPlugin");
testConfig.put("jwk", testJwk);
plugin.init(testConfig);
minimalConfig = new HashMap<>();
minimalConfig.put("class", "org.apache.solr.security.JWTAuthPlugin");
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
plugin.close();
}
@Test
public void initWithoutRequired() {
plugin.init(testConfig);
assertEquals(AUTZ_HEADER_PROBLEM, plugin.authenticate("foo").getAuthCode());
}
@Test
public void initFromSecurityJSONLocalJWK() throws Exception {
Path securityJson = TEST_PATH().resolve("security").resolve("jwt_plugin_jwk_security.json");
InputStream is = Files.newInputStream(securityJson);
Map<String,Object> securityConf = (Map<String, Object>) Utils.fromJSON(is);
Map<String, Object> authConf = (Map<String, Object>) securityConf.get("authentication");
plugin.init(authConf);
}
@Test
public void initFromSecurityJSONUrlJwk() throws Exception {
Path securityJson = TEST_PATH().resolve("security").resolve("jwt_plugin_jwk_url_security.json");
InputStream is = Files.newInputStream(securityJson);
Map<String,Object> securityConf = (Map<String, Object>) Utils.fromJSON(is);
Map<String, Object> authConf = (Map<String, Object>) securityConf.get("authentication");
plugin.init(authConf);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.JWT_VALIDATION_EXCEPTION, resp.getAuthCode());
assertTrue(resp.getJwtException().getMessage().contains("Connection refused"));
}
@Test
public void initWithJwk() {
HashMap<String, Object> authConf = new HashMap<>();
authConf.put("jwk", testJwk);
plugin = new JWTAuthPlugin();
plugin.init(authConf);
}
@Test
public void initWithJwkUrl() {
HashMap<String, Object> authConf = new HashMap<>();
authConf.put("jwkUrl", "https://127.0.0.1:9999/foo.jwk");
plugin = new JWTAuthPlugin();
plugin.init(authConf);
}
@Test
public void parseJwkSet() throws Exception {
plugin.parseJwkSet(testJwk);
HashMap<String, Object> testJwks = new HashMap<>();
List<Map<String, Object>> keys = new ArrayList<>();
keys.add(testJwk);
testJwks.put("keys", keys);
plugin.parseJwkSet(testJwks);
}
@Test
public void authenticateOk() {
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
assertTrue(resp.isAuthenticated());
assertEquals("solruser", resp.getPrincipal().getName());
}
@Test
public void authFailedMissingSubject() {
testConfig.put("iss", "NA");
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
assertFalse(resp.isAuthenticated());
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.JWT_VALIDATION_EXCEPTION, resp.getAuthCode());
testConfig.put("iss", "IDServer");
plugin.init(testConfig);
resp = plugin.authenticate(testHeader);
assertTrue(resp.isAuthenticated());
}
@Test
public void authFailedMissingAudience() {
testConfig.put("aud", "NA");
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
assertFalse(resp.isAuthenticated());
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.JWT_VALIDATION_EXCEPTION, resp.getAuthCode());
testConfig.put("aud", "Solr");
plugin.init(testConfig);
resp = plugin.authenticate(testHeader);
assertTrue(resp.isAuthenticated());
}
@Test
public void authFailedMissingPrincipal() {
testConfig.put("principalClaim", "customPrincipal");
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
assertTrue(resp.isAuthenticated());
testConfig.put("principalClaim", "NA");
plugin.init(testConfig);
resp = plugin.authenticate(testHeader);
assertFalse(resp.isAuthenticated());
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.PRINCIPAL_MISSING, resp.getAuthCode());
}
@Test
public void claimMatch() {
// all custom claims match regex
Map<String, String> shouldMatch = new HashMap<>();
shouldMatch.put("claim1", "foo");
shouldMatch.put("claim2", "foo|bar");
shouldMatch.put("claim3", "f\\w{2}$");
testConfig.put("claimsMatch", shouldMatch);
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
assertTrue(resp.isAuthenticated());
// Required claim does not exist
shouldMatch.clear();
shouldMatch.put("claim9", "NA");
plugin.init(testConfig);
resp = plugin.authenticate(testHeader);
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.CLAIM_MISMATCH, resp.getAuthCode());
// Required claim does not match regex
shouldMatch.clear();
shouldMatch.put("claim1", "NA");
resp = plugin.authenticate(testHeader);
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.CLAIM_MISMATCH, resp.getAuthCode());
}
@Test
public void missingIssAudExp() {
testConfig.put("requireExp", "false");
testConfig.put("requireSub", "false");
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(slimHeader);
assertTrue(resp.isAuthenticated());
// Missing exp header
testConfig.put("requireExp", true);
plugin.init(testConfig);
resp = plugin.authenticate(slimHeader);
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.JWT_VALIDATION_EXCEPTION, resp.getAuthCode());
// Missing sub header
testConfig.put("requireSub", true);
plugin.init(testConfig);
resp = plugin.authenticate(slimHeader);
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.JWT_VALIDATION_EXCEPTION, resp.getAuthCode());
}
@Test
public void algWhitelist() {
testConfig.put("algWhitelist", Arrays.asList("PS384", "PS512"));
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.JWT_VALIDATION_EXCEPTION, resp.getAuthCode());
assertTrue(resp.getErrorMessage().contains("not a whitelisted"));
}
@Test
public void scope() {
testConfig.put("scope", "solr:read solr:admin");
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
assertTrue(resp.isAuthenticated());
Principal principal = resp.getPrincipal();
assertTrue(principal instanceof VerifiedUserRoles);
Set<String> roles = ((VerifiedUserRoles)principal).getVerifiedRoles();
assertEquals(1, roles.size());
assertTrue(roles.contains("solr:read"));
}
@Test
public void wrongScope() {
testConfig.put("scope", "wrong");
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
assertFalse(resp.isAuthenticated());
assertNull(resp.getPrincipal());
assertEquals(SCOPE_MISSING, resp.getAuthCode());
}
@Test
public void noHeaderBlockUnknown() {
testConfig.put("blockUnknown", true);
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(null);
assertEquals(NO_AUTZ_HEADER, resp.getAuthCode());
}
@Test
public void noHeaderNotBlockUnknown() {
testConfig.put("blockUnknown", false);
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(null);
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.PASS_THROUGH, resp.getAuthCode());
}
@Test
public void minimalConfigPassThrough() {
testConfig.put("blockUnknown", false);
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(null);
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.PASS_THROUGH, resp.getAuthCode());
}
@Test
public void wellKnownConfig() throws IOException {
String wellKnownUrl = TEST_PATH().resolve("security").resolve("jwt_well-known-config.json").toAbsolutePath().toUri().toString();
testConfig.put("wellKnownUrl", wellKnownUrl);
testConfig.remove("jwk");
plugin.init(testConfig);
JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(null);
assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.PASS_THROUGH, resp.getAuthCode());
}
@Test(expected = SolrException.class)
public void onlyOneJwkConfig() throws IOException {
testConfig.put("jwkUrl", "http://127.0.0.1:45678/.well-known/config");
plugin.init(testConfig);
}
@Test(expected = SolrException.class)
public void wellKnownConfigNotHttps() throws IOException {
testConfig.put("wellKnownUrl", "http://127.0.0.1:45678/.well-known/config");
plugin.init(testConfig);
}
@Test(expected = SolrException.class)
public void wellKnownConfigNotReachable() {
testConfig.put("wellKnownUrl", "https://127.0.0.1:45678/.well-known/config");
plugin.init(testConfig);
}
@Test
public void wellKnownConfigFromInputstream() throws IOException {
Path configJson = TEST_PATH().resolve("security").resolve("jwt_well-known-config.json");
JWTAuthPlugin.WellKnownDiscoveryConfig config = JWTAuthPlugin.WellKnownDiscoveryConfig.parse(Files.newInputStream(configJson));
assertEquals("https://acmepaymentscorp/oauth/jwks", config.getJwksUrl());
}
@Test
public void wellKnownConfigFromString() throws IOException {
Path configJson = TEST_PATH().resolve("security").resolve("jwt_well-known-config.json");
String configString = StringUtils.join(Files.readAllLines(configJson), "\n");
JWTAuthPlugin.WellKnownDiscoveryConfig config = JWTAuthPlugin.WellKnownDiscoveryConfig.parse(configString, StandardCharsets.UTF_8);
assertEquals("https://acmepaymentscorp/oauth/jwks", config.getJwksUrl());
assertEquals("http://acmepaymentscorp", config.getIssuer());
assertEquals("http://acmepaymentscorp/oauth/auz/authorize", config.getAuthorizationEndpoint());
assertEquals(Arrays.asList("READ", "WRITE", "DELETE", "openid", "scope", "profile", "email", "address", "phone"), config.getScopesSupported());
assertEquals(Arrays.asList("code", "code id_token", "code token", "code id_token token", "token", "id_token", "id_token token"), config.getResponseTypesSupported());
}
@Test
public void xSolrAuthDataHeader() {
testConfig.put("adminUiScope", "solr:admin");
testConfig.put("authorizationEndpoint", "http://acmepaymentscorp/oauth/auz/authorize");
testConfig.put("clientId", "solr-cluster");
plugin.init(testConfig);
String headerBase64 = plugin.generateAuthDataHeader();
String headerJson = new String(Base64.base64ToByteArray(headerBase64), StandardCharsets.UTF_8);
Map<String,String> parsed = (Map<String, String>) JSON.parse(headerJson);
assertEquals("solr:admin", parsed.get("scope"));
assertEquals("http://acmepaymentscorp/oauth/auz/authorize", parsed.get("authorizationEndpoint"));
assertEquals("solr-cluster", parsed.get("client_id"));
}
} | 1 | 28,380 | Avoid using old mortbay utilities for converting JSON. Uses existing Solr Utils to convert from JSON string. | apache-lucene-solr | java |
@@ -0,0 +1,19 @@
+'use strict';
+
+const CommandOperationV2 = require('./command_v2');
+const defineAspects = require('./operation').defineAspects;
+const Aspect = require('./operation').Aspect;
+
+class CommandDirect extends CommandOperationV2 {
+ constructor(parent, command, options) {
+ super(parent, options);
+ this.command = command;
+ }
+ execute(server, callback) {
+ const command = this.command;
+ this.executeCommand(server, command, callback);
+ }
+}
+defineAspects(CommandDirect, [Aspect.EXECUTE_WITH_SELECTION]);
+
+module.exports = CommandDirect; | 1 | 1 | 17,792 | I actually think this wrapping is great, but let's call it `RunCommandOperation`? | mongodb-node-mongodb-native | js |
|
@@ -122,11 +122,15 @@ func errorsParse(c *caddy.Controller) (*ErrorHandler, error) {
}
f.Close()
- whatInt, err := strconv.Atoi(what)
- if err != nil {
- return hadBlock, c.Err("Expecting a numeric status code, got '" + what + "'")
+ if what == "*" {
+ handler.GenericErrorPage = where
+ } else {
+ whatInt, err := strconv.Atoi(what)
+ if err != nil {
+ return hadBlock, c.Err("Expecting a numeric status code or '*', got '" + what + "'")
+ }
+ handler.ErrorPages[whatInt] = where
}
- handler.ErrorPages[whatInt] = where
}
}
return hadBlock, nil | 1 | package errors
import (
"io"
"log"
"os"
"path/filepath"
"strconv"
"github.com/hashicorp/go-syslog"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddyhttp/httpserver"
)
// setup configures a new errors middleware instance.
func setup(c *caddy.Controller) error {
handler, err := errorsParse(c)
if err != nil {
return err
}
// Open the log file for writing when the server starts
c.OnStartup(func() error {
var err error
var writer io.Writer
switch handler.LogFile {
case "visible":
handler.Debug = true
case "stdout":
writer = os.Stdout
case "stderr":
writer = os.Stderr
case "syslog":
writer, err = gsyslog.NewLogger(gsyslog.LOG_ERR, "LOCAL0", "caddy")
if err != nil {
return err
}
default:
if handler.LogFile == "" {
writer = os.Stderr // default
break
}
var file *os.File
file, err = os.OpenFile(handler.LogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return err
}
if handler.LogRoller != nil {
file.Close()
handler.LogRoller.Filename = handler.LogFile
writer = handler.LogRoller.GetLogWriter()
} else {
handler.file = file
writer = file
}
}
handler.Log = log.New(writer, "", 0)
return nil
})
// When server stops, close any open log file
c.OnShutdown(func() error {
if handler.file != nil {
handler.file.Close()
}
return nil
})
httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
handler.Next = next
return handler
})
return nil
}
func errorsParse(c *caddy.Controller) (*ErrorHandler, error) {
// Very important that we make a pointer because the startup
// function that opens the log file must have access to the
// same instance of the handler, not a copy.
handler := &ErrorHandler{ErrorPages: make(map[int]string)}
cfg := httpserver.GetConfig(c)
optionalBlock := func() (bool, error) {
var hadBlock bool
for c.NextBlock() {
hadBlock = true
what := c.Val()
if !c.NextArg() {
return hadBlock, c.ArgErr()
}
where := c.Val()
if what == "log" {
if where == "visible" {
handler.Debug = true
} else {
handler.LogFile = where
if c.NextArg() {
if c.Val() == "{" {
c.IncrNest()
logRoller, err := httpserver.ParseRoller(c)
if err != nil {
return hadBlock, err
}
handler.LogRoller = logRoller
}
}
}
} else {
// Error page; ensure it exists
where = filepath.Join(cfg.Root, where)
f, err := os.Open(where)
if err != nil {
log.Printf("[WARNING] Unable to open error page '%s': %v", where, err)
}
f.Close()
whatInt, err := strconv.Atoi(what)
if err != nil {
return hadBlock, c.Err("Expecting a numeric status code, got '" + what + "'")
}
handler.ErrorPages[whatInt] = where
}
}
return hadBlock, nil
}
for c.Next() {
// weird hack to avoid having the handler values overwritten.
if c.Val() == "}" {
continue
}
// Configuration may be in a block
hadBlock, err := optionalBlock()
if err != nil {
return handler, err
}
// Otherwise, the only argument would be an error log file name or 'visible'
if !hadBlock {
if c.NextArg() {
if c.Val() == "visible" {
handler.Debug = true
} else {
handler.LogFile = c.Val()
}
}
}
}
return handler, nil
}
| 1 | 8,832 | Maybe it is better to add checking for status code duplications? It can prevent some stupid copy-paste user errors. | caddyserver-caddy | go |
@@ -5,14 +5,14 @@ describe Invoice do
invoices = Invoice.
find_all_by_stripe_customer_id(FakeStripe::CUSTOMER_ID)
- invoices.length.should eq 1
- invoices.first.stripe_invoice_id.should eq 'in_1s4JSgbcUaElzU'
+ expect(invoices.length).to eq 1
+ expect(invoices.first.stripe_invoice_id).to eq 'in_1s4JSgbcUaElzU'
end
it 'does not find invoices with a blank customer' do
- Invoice.find_all_by_stripe_customer_id(' ').length.should eq 0
- Invoice.find_all_by_stripe_customer_id('').length.should eq 0
- Invoice.find_all_by_stripe_customer_id(nil).length.should eq 0
+ expect(Invoice.find_all_by_stripe_customer_id(' ').length).to eq 0
+ expect(Invoice.find_all_by_stripe_customer_id('').length).to eq 0
+ expect(Invoice.find_all_by_stripe_customer_id(nil).length).to eq 0
end
describe 'invoice fields' do | 1 | require 'spec_helper'
describe Invoice do
it 'retrieves all invoices for a customer' do
invoices = Invoice.
find_all_by_stripe_customer_id(FakeStripe::CUSTOMER_ID)
invoices.length.should eq 1
invoices.first.stripe_invoice_id.should eq 'in_1s4JSgbcUaElzU'
end
it 'does not find invoices with a blank customer' do
Invoice.find_all_by_stripe_customer_id(' ').length.should eq 0
Invoice.find_all_by_stripe_customer_id('').length.should eq 0
Invoice.find_all_by_stripe_customer_id(nil).length.should eq 0
end
describe 'invoice fields' do
let(:invoice) { Invoice.new('in_1s4JSgbcUaElzU') }
it 'has a number equal to its subscription id and date' do
date = Time.zone.at(1369159688)
invoice.number.should == date.to_s(:invoice)
end
it 'returns the invoice total from stripe' do
invoice.total.should == 79
end
it 'returns the invoice subtotal from stripe' do
invoice.subtotal.should == 99
end
it 'returns the amount_due from stripe' do
invoice.amount_due.should == 79
end
it 'returns the invoice paid status from stripe' do
invoice.should be_paid
end
it 'returns the invoice date from stripe' do
invoice.date.should eq Time.zone.at(1369159688)
end
it 'returns true if there is a discount on the invoice' do
invoice.should be_discounted
end
it 'returns the name of the discount from stripe' do
invoice.discount_name.should eq 'railsconf'
end
it 'returns the amount of the discount from stripe' do
invoice.discount_amount.should eq 20
end
it 'returns the user who matches the stripe customer' do
user = create(:user, stripe_customer_id: FakeStripe::CUSTOMER_ID)
invoice.user.should eq user
end
it 'returns a zero balance when paid' do
invoice.balance.should eq 0.00
end
it 'returns a balance equal to the amount_due when not paid' do
stripe_invoice = Invoice.new('in_1s4JSgbcUaElzU')
stub_invoice = stub(paid: false, amount_due: 500)
Stripe::Invoice.stubs(:retrieve).returns(stub_invoice)
invoice.balance.should eq 5.00
end
describe '#amount_paid' do
it 'returns zero when not paid' do
stripe_invoice = Invoice.new('in_1s4JSgbcUaElzU')
stub_invoice = stub(paid: false)
Stripe::Invoice.stubs(:retrieve).returns(stub_invoice)
invoice.amount_paid.should eq 0.00
end
it 'returns the amount_due when paid' do
stripe_invoice = Invoice.new('in_1s4JSgbcUaElzU')
stub_invoice = stub(paid: true, amount_due: 500)
Stripe::Invoice.stubs(:retrieve).returns(stub_invoice)
invoice.amount_paid.should eq 5.00
end
end
it 'returns the user info for the user' do
user = create(
:user,
stripe_customer_id: FakeStripe::CUSTOMER_ID,
organization: 'thoughtbot',
address1: '41 Winter St.',
address2: 'Floor 7',
city: 'Boston',
state: 'MA',
zip_code: '02108',
country: 'USA'
)
invoice.user_name.should == user.name
invoice.user_organization.should eq user.organization
invoice.user_address1.should eq user.address1
invoice.user_address2.should eq user.address2
invoice.user_city.should eq user.city
invoice.user_state.should eq user.state
invoice.user_zip_code.should eq user.zip_code
invoice.user_country.should eq user.country
invoice.user_email.should eq user.email
end
it 'returns the proper partial path' do
invoice.to_partial_path.should eq 'subscriber/invoices/invoice'
end
end
context 'invoice which has discount in percent' do
let(:invoice) { Invoice.new('in_3Eh5UIbuDVdhat') }
it 'returns the correct discount amount' do
expect(invoice.discount_amount).to eq 99.00
end
end
describe '#line_items' do
it 'returns line items for all the stripe invoice lines' do
lines = stub(
'lines',
invoiceitems: [:invoiceitem],
prorations: [:proration],
subscriptions: [:subscription],
)
stripe_invoice = stub('stripe_invoice', lines: lines)
invoice = Invoice.new(stripe_invoice)
stripe_line_items = stripe_invoice.lines.invoiceitems +
stripe_invoice.lines.prorations +
stripe_invoice.lines.subscriptions
line_items = stripe_line_items.map do |stripe_line_item|
LineItem.new(stripe_line_item)
end
expect(invoice.line_items).to eq(line_items)
end
end
end
| 1 | 9,673 | Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | thoughtbot-upcase | rb |
@@ -1823,6 +1823,9 @@ mangle_return(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
IF_X64(ASSERT_TRUNCATE(val, int, opnd_get_immed_int(instr_get_src(instr, 0))));
/* addl sizeof_param_area, %xsp
* except that clobbers the flags, so we use lea */
+ /* XXX i#3307: unimplemented, we can only support simple mangling cases in
+ * mangling epilogue.
+ */
PRE(ilist, next_instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, val, OPSZ_lea))); | 1 | /* ******************************************************************************
* Copyright (c) 2010-2018 Google, Inc. All rights reserved.
* Copyright (c) 2010 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* ******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/* file "mangle.c" */
#include "../globals.h"
#include "../link.h"
#include "../fragment.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "instrlist.h"
#include "decode.h"
#include "decode_fast.h"
#include "disassemble.h"
#include "../hashtable.h"
#include "../fcache.h" /* for in_fcache */
#ifdef STEAL_REGISTER
# include "steal_reg.h"
#endif
#include "instrument.h" /* for dr_insert_call */
#include "../translate.h"
#ifdef RCT_IND_BRANCH
# include "../rct.h" /* rct_add_rip_rel_addr */
#endif
#ifdef UNIX
# include <sys/syscall.h>
#endif
#include <string.h> /* for memset */
#ifdef ANNOTATIONS
# include "../annotations.h"
#endif
/* Make code more readable by shortening long lines.
* We mark everything we add as non-app instr.
*/
#define POST instrlist_meta_postinsert
#define PRE instrlist_meta_preinsert
/***************************************************************************/
void
mangle_arch_init(void)
{
/* Nothing yet. */
}
/* Convert a short-format CTI into an equivalent one using
* near-rel-format.
* Remember, the target is kept in the 0th src array position,
* and has already been converted from an 8-bit offset to an
* absolute PC, so we can just pretend instructions are longer
* than they really are.
*/
instr_t *
convert_to_near_rel_arch(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr)
{
int opcode = instr_get_opcode(instr);
app_pc target = NULL;
if (opcode == OP_jmp_short) {
instr_set_opcode(instr, OP_jmp);
return instr;
}
if (OP_jo_short <= opcode && opcode <= OP_jnle_short) {
/* WARNING! following is OP_ enum order specific */
instr_set_opcode(instr, opcode - OP_jo_short + OP_jo);
return instr;
}
if (OP_loopne <= opcode && opcode <= OP_jecxz) {
uint mangled_sz;
uint offs;
/*
* from "info as" on GNU/linux system:
Note that the `jcxz', `jecxz', `loop', `loopz', `loope', `loopnz'
and `loopne' instructions only come in byte displacements, so that if
you use these instructions (`gcc' does not use them) you may get an
error message (and incorrect code). The AT&T 80386 assembler tries to
get around this problem by expanding `jcxz foo' to
jcxz cx_zero
jmp cx_nonzero
cx_zero: jmp foo
cx_nonzero:
*
* We use that same expansion, but we want to treat the entire
* three-instruction sequence as a single conditional branch.
* Thus we use a special instruction that stores the entire
* instruction sequence as mangled bytes, yet w/ a valid target operand
* (xref PR 251646).
* patch_branch and instr_invert_cbr
* know how to find the target pc (final 4 of 9 bytes).
* When decoding anything we've written we know the only jcxz or
* loop* instructions are part of these rewritten packages, and
* we use remangle_short_rewrite to read back in the instr.
* (have to do this everywhere call decode() except original
* interp, plus in input_trace())
*
* An alternative is to change 'jcxz foo' to:
<save eflags>
cmpb %cx,$0
je foo_restore
<restore eflags>
...
foo_restore: <restore eflags>
foo:
* However the added complications of restoring the eflags on
* the taken-branch path made me choose the former solution.
*/
/* SUMMARY:
* expand 'shortjump foo' to:
shortjump taken
jmp-short nottaken
taken: jmp foo
nottaken:
*/
if (ilist != NULL) {
/* PR 266292: for meta instrs, insert separate instrs */
/* reverse order */
opnd_t tgt = instr_get_target(instr);
instr_t *nottaken = INSTR_CREATE_label(dcontext);
instr_t *taken = INSTR_CREATE_jmp(dcontext, tgt);
ASSERT(instr_is_meta(instr));
instrlist_meta_postinsert(ilist, instr, nottaken);
instrlist_meta_postinsert(ilist, instr, taken);
instrlist_meta_postinsert(
ilist, instr,
INSTR_CREATE_jmp_short(dcontext, opnd_create_instr(nottaken)));
instr_set_target(instr, opnd_create_instr(taken));
return taken;
}
if (opnd_is_near_pc(instr_get_target(instr)))
target = opnd_get_pc(instr_get_target(instr));
else if (opnd_is_near_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
/* XXX: not using get_app_instr_xl8() b/c drdecodelib doesn't link
* mangle_shared.c.
*/
target = instr_get_translation(tgt);
if (target == NULL && instr_raw_bits_valid(tgt))
target = instr_get_raw_bits(tgt);
ASSERT(target != NULL);
} else
ASSERT_NOT_REACHED();
/* PR 251646: cti_short_rewrite: target is in src0, so operands are
* valid, but raw bits must also be valid, since they hide the multiple
* instrs. For x64, it is marked for re-relativization, but it's
* special since the target must be obtained from src0 and not
* from the raw bits (since that might not reach).
*/
/* need 9 bytes + possible addr prefix */
mangled_sz = CTI_SHORT_REWRITE_LENGTH;
if (!reg_is_pointer_sized(opnd_get_reg(instr_get_src(instr, 1))))
mangled_sz++; /* need addr prefix */
instr_allocate_raw_bits(dcontext, instr, mangled_sz);
offs = 0;
if (mangled_sz > CTI_SHORT_REWRITE_LENGTH) {
instr_set_raw_byte(instr, offs, ADDR_PREFIX_OPCODE);
offs++;
}
/* first 2 bytes: jecxz 8-bit-offset */
instr_set_raw_byte(instr, offs, decode_first_opcode_byte(opcode));
offs++;
/* remember pc-relative offsets are from start of next instr */
instr_set_raw_byte(instr, offs, (byte)2);
offs++;
/* next 2 bytes: jmp-short 8-bit-offset */
instr_set_raw_byte(instr, offs, decode_first_opcode_byte(OP_jmp_short));
offs++;
instr_set_raw_byte(instr, offs, (byte)5);
offs++;
/* next 5 bytes: jmp 32-bit-offset */
instr_set_raw_byte(instr, offs, decode_first_opcode_byte(OP_jmp));
offs++;
/* for x64 we may not reach, but we go ahead and try */
instr_set_raw_word(instr, offs, (int)(target - (instr->bytes + mangled_sz)));
offs += sizeof(int);
ASSERT(offs == mangled_sz);
LOG(THREAD, LOG_INTERP, 2, "convert_to_near_rel: jecxz/loop* opcode\n");
/* original target operand is still valid */
instr_set_operands_valid(instr, true);
return instr;
}
LOG(THREAD, LOG_INTERP, 1, "convert_to_near_rel: unknown opcode: %d %s\n", opcode,
decode_opcode_name(opcode));
ASSERT_NOT_REACHED(); /* conversion not possible OR not a short-form cti */
return instr;
}
/* For jecxz and loop*, we create 3 instructions in a single
* instr that we treat like a single conditional branch.
* On re-decoding our own output we need to recreate that instr.
* This routine assumes that the instructions encoded at pc
* are indeed a mangled cti short.
* Assumes that the first instr has already been decoded into instr,
* that pc points to the start of that instr.
* Converts instr into a new 3-raw-byte-instr with a private copy of the
* original raw bits.
* Optionally modifies the target to "target" if "target" is non-null.
* Returns the pc of the instruction after the remangled sequence.
*/
byte *
remangle_short_rewrite(dcontext_t *dcontext, instr_t *instr, byte *pc, app_pc target)
{
uint mangled_sz = CTI_SHORT_REWRITE_LENGTH;
ASSERT(instr_is_cti_short_rewrite(instr, pc));
if (*pc == ADDR_PREFIX_OPCODE)
mangled_sz++;
/* first set the target in the actual operand src0 */
if (target == NULL) {
/* acquire existing absolute target */
int rel_target = *((int *)(pc + mangled_sz - 4));
target = pc + mangled_sz + rel_target;
}
instr_set_target(instr, opnd_create_pc(target));
/* now set up the bundle of raw instructions
* we've already read the first 2-byte instruction, jecxz/loop*
* they all take up mangled_sz bytes
*/
instr_allocate_raw_bits(dcontext, instr, mangled_sz);
instr_set_raw_bytes(instr, pc, mangled_sz);
/* for x64 we may not reach, but we go ahead and try */
instr_set_raw_word(instr, mangled_sz - 4, (int)(target - (pc + mangled_sz)));
/* now make operands valid */
instr_set_operands_valid(instr, true);
return (pc + mangled_sz);
}
/***************************************************************************/
#if !defined(STANDALONE_DECODER)
int
insert_out_of_line_context_switch(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, bool save, byte *encode_pc)
{
if (save) {
/* We adjust the stack so the return address will not be clobbered,
* so we can have call/return pair to take advantage of hardware
* call return stack for better performance.
* Xref emit_clean_call_save @ x86/emit_utils.c
* The precise adjustment amount is relied upon in
* find_next_fragment_from_gencode()'s handling of in_clean_call_save().
*/
PRE(ilist, instr,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
-(int)(get_clean_call_switch_stack_size() +
get_clean_call_temp_stack_size()),
OPSZ_lea)));
}
/* We document to clients that we use r11 if we need an indirect call here. */
insert_reachable_cti(dcontext, ilist, instr, encode_pc,
save ? get_clean_call_save(dcontext _IF_X64(GENCODE_X64))
: get_clean_call_restore(dcontext _IF_X64(GENCODE_X64)),
false /*call*/, true /*returns*/, false /*!precise*/, DR_REG_R11,
NULL);
return get_clean_call_switch_stack_size();
}
void
insert_clear_eflags(dcontext_t *dcontext, clean_call_info_t *cci, instrlist_t *ilist,
instr_t *instr)
{
/* clear eflags for callee's usage */
if (cci == NULL || !cci->skip_clear_flags) {
if (dynamo_options.cleancall_ignore_eflags) {
/* we still clear DF since some compiler assumes
* DF is cleared at each function.
*/
PRE(ilist, instr, INSTR_CREATE_cld(dcontext));
} else {
/* on x64 a push immed is sign-extended to 64-bit */
PRE(ilist, instr, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(0)));
PRE(ilist, instr, INSTR_CREATE_popf(dcontext));
}
}
}
/* Pushes not only the GPRs but also xmm/ymm, xip, and xflags, in
* priv_mcontext_t order.
* The current stack pointer alignment should be passed. Use 1 if
* unknown (NOT 0).
* Returns the amount of data pushed. Does NOT fix up the xsp value pushed
* to be the value prior to any pushes for x64 as no caller needs that
* currently (they all build a priv_mcontext_t and have to do further xsp
* fixups anyway).
* Includes xmm0-5 for PR 264138.
*/
uint
insert_push_all_registers(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr, uint alignment,
opnd_t push_pc, reg_id_t scratch /*optional*/)
{
uint dstack_offs = 0;
int offs_beyond_xmm = 0;
if (cci == NULL)
cci = &default_clean_call_info;
if (cci->preserve_mcontext || cci->num_simd_skip != NUM_SIMD_REGS) {
int offs = XMM_SLOTS_SIZE + PRE_XMM_PADDING;
if (cci->preserve_mcontext && cci->skip_save_flags) {
offs_beyond_xmm = 2 * XSP_SZ; /* pc and flags */
offs += offs_beyond_xmm;
}
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0, -offs)));
dstack_offs += offs;
}
if (preserve_xmm_caller_saved()) {
/* PR 264138: we must preserve xmm0-5 if on a 64-bit kernel */
int i;
/* PR 266305: see discussion in emit_fcache_enter_shared on
* which opcode is better. Note that the AMD optimization
* guide says to use movlps+movhps for unaligned stores, but
* for simplicity and smaller code I'm using movups anyway.
*/
/* XXX i#438: once have SandyBridge processor need to measure
* cost of vmovdqu and whether worth arranging 32-byte alignment
* for all callers. B/c we put ymm at end of priv_mcontext_t, we do
* currently have 32-byte alignment for clean calls.
*/
uint opcode = move_mm_reg_opcode(ALIGNED(alignment, 16), ALIGNED(alignment, 32));
ASSERT(proc_has_feature(FEATURE_SSE));
for (i = 0; i < NUM_SIMD_SAVED; i++) {
if (!cci->simd_skip[i]) {
PRE(ilist, instr,
instr_create_1dst_1src(
dcontext, opcode,
opnd_create_base_disp(REG_XSP, REG_NULL, 0,
PRE_XMM_PADDING + i * XMM_SAVED_REG_SIZE +
offs_beyond_xmm,
OPSZ_SAVED_XMM),
opnd_create_reg(REG_SAVED_XMM0 + (reg_id_t)i)));
}
}
ASSERT(i * XMM_SAVED_REG_SIZE == XMM_SAVED_SIZE);
ASSERT(XMM_SAVED_SIZE <= XMM_SLOTS_SIZE);
}
/* pc and aflags */
if (!cci->skip_save_flags) {
ASSERT(offs_beyond_xmm == 0);
if (opnd_is_immed_int(push_pc))
PRE(ilist, instr, INSTR_CREATE_push_imm(dcontext, push_pc));
else
PRE(ilist, instr, INSTR_CREATE_push(dcontext, push_pc));
dstack_offs += XSP_SZ;
PRE(ilist, instr, INSTR_CREATE_pushf(dcontext));
dstack_offs += XSP_SZ;
} else {
ASSERT(offs_beyond_xmm == 2 * XSP_SZ || !cci->preserve_mcontext);
/* for cci->preserve_mcontext we added to the lea above so we ignore push_pc */
}
# ifdef X64
/* keep priv_mcontext_t order */
if (!cci->reg_skip[REG_R15 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R15)));
if (!cci->reg_skip[REG_R14 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R14)));
if (!cci->reg_skip[REG_R13 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R13)));
if (!cci->reg_skip[REG_R12 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R12)));
if (!cci->reg_skip[REG_R11 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R11)));
if (!cci->reg_skip[REG_R10 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R10)));
if (!cci->reg_skip[REG_R9 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R9)));
if (!cci->reg_skip[REG_R8 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R8)));
if (!cci->reg_skip[REG_RAX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RAX)));
if (!cci->reg_skip[REG_RCX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RCX)));
if (!cci->reg_skip[REG_RDX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RDX)));
if (!cci->reg_skip[REG_RBX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RBX)));
/* we do NOT match pusha xsp value */
if (!cci->reg_skip[REG_RSP - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RSP)));
if (!cci->reg_skip[REG_RBP - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RBP)));
if (!cci->reg_skip[REG_RSI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RSI)));
if (!cci->reg_skip[REG_RDI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RDI)));
dstack_offs += (NUM_GP_REGS - cci->num_regs_skip) * XSP_SZ;
# else
PRE(ilist, instr, INSTR_CREATE_pusha(dcontext));
dstack_offs += 8 * XSP_SZ;
# endif
ASSERT(cci->skip_save_flags || cci->num_simd_skip != 0 || cci->num_regs_skip != 0 ||
dstack_offs == (uint)get_clean_call_switch_stack_size());
return dstack_offs;
}
/* User should pass the alignment from insert_push_all_registers: i.e., the
* alignment at the end of all the popping, not the alignment prior to
* the popping.
*/
void
insert_pop_all_registers(dcontext_t *dcontext, clean_call_info_t *cci, instrlist_t *ilist,
instr_t *instr, uint alignment)
{
int offs_beyond_xmm = 0;
if (cci == NULL)
cci = &default_clean_call_info;
# ifdef X64
/* in priv_mcontext_t order */
if (!cci->reg_skip[REG_RDI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RDI)));
if (!cci->reg_skip[REG_RSI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RSI)));
if (!cci->reg_skip[REG_RBP - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RBP)));
/* skip xsp by popping into dead rbx */
if (!cci->reg_skip[REG_RSP - REG_XAX]) {
ASSERT(!cci->reg_skip[REG_RBX - REG_XAX]);
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RBX)));
}
if (!cci->reg_skip[REG_RBX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RBX)));
if (!cci->reg_skip[REG_RDX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RDX)));
if (!cci->reg_skip[REG_RCX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RCX)));
if (!cci->reg_skip[REG_RAX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RAX)));
if (!cci->reg_skip[REG_R8 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R8)));
if (!cci->reg_skip[REG_R9 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R9)));
if (!cci->reg_skip[REG_R10 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R10)));
if (!cci->reg_skip[REG_R11 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R11)));
if (!cci->reg_skip[REG_R12 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R12)));
if (!cci->reg_skip[REG_R13 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R13)));
if (!cci->reg_skip[REG_R14 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R14)));
if (!cci->reg_skip[REG_R15 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R15)));
# else
PRE(ilist, instr, INSTR_CREATE_popa(dcontext));
# endif
if (!cci->skip_save_flags) {
PRE(ilist, instr, INSTR_CREATE_popf(dcontext));
offs_beyond_xmm = XSP_SZ; /* pc */
;
} else if (cci->preserve_mcontext) {
offs_beyond_xmm = 2 * XSP_SZ; /* aflags + pc */
}
if (preserve_xmm_caller_saved()) {
/* PR 264138: we must preserve xmm0-5 if on a 64-bit kernel */
int i;
/* See discussion in emit_fcache_enter_shared on which opcode
* is better. */
uint opcode = move_mm_reg_opcode(ALIGNED(alignment, 32), ALIGNED(alignment, 16));
ASSERT(proc_has_feature(FEATURE_SSE));
for (i = 0; i < NUM_SIMD_SAVED; i++) {
if (!cci->simd_skip[i]) {
PRE(ilist, instr,
instr_create_1dst_1src(
dcontext, opcode, opnd_create_reg(REG_SAVED_XMM0 + (reg_id_t)i),
opnd_create_base_disp(REG_XSP, REG_NULL, 0,
PRE_XMM_PADDING + i * XMM_SAVED_REG_SIZE +
offs_beyond_xmm,
OPSZ_SAVED_XMM)));
}
}
ASSERT(i * XMM_SAVED_REG_SIZE == XMM_SAVED_SIZE);
ASSERT(XMM_SAVED_SIZE <= XMM_SLOTS_SIZE);
}
PRE(ilist, instr,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0,
PRE_XMM_PADDING + XMM_SLOTS_SIZE + offs_beyond_xmm)));
}
reg_id_t
shrink_reg_for_param(reg_id_t regular, opnd_t arg)
{
# ifdef X64
if (opnd_get_size(arg) == OPSZ_4) { /* we ignore var-sized */
/* PR 250976 #2: leave 64-bit only if an immed w/ top bit set (we
* assume user wants sign-extension; that is after all what happens
* on a push of a 32-bit immed) */
if (!opnd_is_immed_int(arg) || (opnd_get_immed_int(arg) & 0x80000000) == 0)
return reg_64_to_32(regular);
}
# endif
return regular;
}
/* Returns the change in the stack pointer.
* N.B.: due to stack alignment and minimum stack reservation, do
* not use parameters involving esp/rsp, as its value can change!
*
* This routine only supports passing arguments that are integers or
* pointers of a size equal or smaller than the register size: i.e., no
* floating-point, multimedia, or aggregate data types.
*
* For 64-bit mode, if a 32-bit immediate integer is specified as an
* argument and it has its top bit set, we assume it is intended to be
* sign-extended to 64-bits; otherwise we zero-extend it.
*
* For 64-bit mode, variable-sized argument operands may not work
* properly.
*
* Arguments that reference REG_XSP will work for clean calls, but are not guaranteed
* to work for non-clean, especially for 64-bit where we align, etc. Arguments that
* reference sub-register portions of REG_XSP are not supported.
*
* XXX PR 307874: w/ a post optimization pass, or perhaps more clever use of
* existing passes, we could do much better on calling convention and xsp conflicting
* args. We should also really consider inlining client callees (PR 218907), since
* clean calls for 64-bit are enormous (71 instrs/264 bytes for 2-arg x64; 26
* instrs/99 bytes for x86) and we could avoid all the xmm saves and replace pushf w/
* lahf.
*/
uint
insert_parameter_preparation(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
bool clean_call, uint num_args, opnd_t *args)
{
uint i;
int r;
uint preparm_padding = 0;
uint param_stack = 0, total_stack = 0;
bool push = true;
bool restore_xax = false;
bool restore_xsp = false;
/* we need two passes for PR 250976 optimization */
/* Push/mov in reverse order. We need a label so we can also add
* instrs prior to the regular param prep. So params are POST-mark, while
* pre-param-prep is POST-prev or PRE-mark.
*/
# ifdef X64
uint arg_pre_push = 0, total_pre_push = 0;
# endif
instr_t *prev = (instr == NULL) ? instrlist_last(ilist) : instr_get_prev(instr);
instr_t *mark = INSTR_CREATE_label(dcontext);
PRE(ilist, instr, mark);
/* For a clean call, xax is dead (clobbered by prepare_for_clean_call()).
* Rather than use as scratch and restore prior to each param that uses it,
* we restore once up front if any use it, and use regparms[0] as scratch,
* which is symmetric with non-clean-calls: regparms[0] is dead since we're
* doing args in reverse order. However, we then can't use regparms[0]
* directly if referenced in earlier params, but similarly for xax, so
* there's no clear better way. (prepare_for_clean_call also clobbers xsp,
* but we just disallow args that use it).
*/
ASSERT(num_args == 0 || args != NULL);
/* We can get away w/ one pass, except for PR 250976 we want calling conv
* regs to be able to refer to priv_mcontext_t as well as potentially being
* pushed: but we need to know the total # pushes ahead of time (since hard
* to mark for post-patching)
*/
for (i = 0; i < num_args; i++) {
IF_X64(bool is_pre_push = false;)
for (r = 0; r < opnd_num_regs_used(args[i]); r++) {
reg_id_t used = opnd_get_reg_used(args[i], r);
IF_X64(int parm;)
LOG(THREAD, LOG_INTERP, 4, "ipp: considering arg %d reg %d == %s\n", i, r,
reg_names[used]);
if (clean_call && !restore_xax && reg_overlap(used, REG_XAX))
restore_xax = true;
if (reg_overlap(used, REG_XSP)) {
IF_X64(CLIENT_ASSERT(clean_call,
"Non-clean-call argument: REG_XSP not supported"));
CLIENT_ASSERT(used == REG_XSP,
"Call argument: sub-reg-xsp not supported");
if (clean_call && /*x64*/ parameters_stack_padded() && !restore_xsp)
restore_xsp = true;
}
# ifdef X64
/* PR 250976 #A: count the number of pre-pushes we need */
parm = reg_parameter_num(used);
/* We can read a register used in an earlier arg since we store that
* arg later (we do reverse order), except arg0, which we use as
* scratch (we don't always need it, but not worth another pre-pass
* through all args to find out), and xsp. Otherwise, if a plain reg,
* we point at mcontext (we restore xsp slot in mcontext if nec.).
* If a mem ref, we need to pre-push onto stack.
* N.B.: this conditional is duplicated in 2nd loop.
*/
if (!is_pre_push &&
((parm == 0 && num_args > 1) || parm > (int)i ||
reg_overlap(used, REG_XSP)) &&
(!clean_call || !opnd_is_reg(args[i]))) {
total_pre_push++;
is_pre_push = true; /* ignore further regs in same arg */
}
# endif
}
}
if (parameters_stack_padded()) {
/* For x64, supposed to reserve rsp space in function prologue; we
* do next best thing and reserve it prior to setting up the args.
*/
push = false; /* store args to xsp offsets instead of pushing them */
total_stack = REGPARM_MINSTACK;
if (num_args > NUM_REGPARM)
total_stack += XSP_SZ * (num_args - NUM_REGPARM);
param_stack = total_stack;
IF_X64(total_stack += XSP_SZ * total_pre_push);
/* We assume rsp is currently 16-byte aligned. End of arguments is supposed
* to be 16-byte aligned for x64 SysV (note that retaddr will then make
* rsp 8-byte-aligned, which is ok: callee has to rectify that).
* For clean calls, prepare_for_clean_call leaves rsp aligned for x64.
* XXX PR 218790: we require users of dr_insert_call to ensure
* alignment; should we put in support to dynamically align?
*/
preparm_padding =
ALIGN_FORWARD_UINT(total_stack, REGPARM_END_ALIGN) - total_stack;
total_stack += preparm_padding;
/* we have to wait to insert the xsp adjust */
} else {
ASSERT(NUM_REGPARM == 0);
ASSERT(push);
IF_X64(ASSERT(total_pre_push == 0));
total_stack = XSP_SZ * num_args;
}
LOG(THREAD, LOG_INTERP, 3,
"insert_parameter_preparation: %d args, %d in-reg, %d pre-push, %d/%d stack\n",
num_args, NUM_REGPARM, IF_X64_ELSE(total_pre_push, 0), param_stack, total_stack);
for (i = 0; i < num_args; i++) {
/* FIXME PR 302951: we need to handle state restoration if any
* of these args references app memory. We should pull the state from
* the priv_mcontext_t on the stack if in a clean call. FIXME: what if not?
*/
opnd_t arg = args[i];
CLIENT_ASSERT(opnd_get_size(arg) == OPSZ_PTR ||
opnd_is_immed_int(arg) IF_X64(|| opnd_get_size(arg) == OPSZ_4),
"Clean call arg has unsupported size");
# ifdef X64
/* PR 250976 #A: support args that reference param regs */
for (r = 0; r < opnd_num_regs_used(arg); r++) {
reg_id_t used = opnd_get_reg_used(arg, r);
int parm = reg_parameter_num(used);
/* See comments in loop above */
if ((parm == 0 && num_args > 1) || parm > (int)i ||
reg_overlap(used, REG_XSP)) {
int disp = 0;
if (clean_call && opnd_is_reg(arg)) {
/* We can point at the priv_mcontext_t slot.
* priv_mcontext_t is at the base of dstack: compute offset
* from xsp to the field we want and replace arg.
*/
disp += opnd_get_reg_dcontext_offs(opnd_get_reg(arg));
/* skip rest of what prepare_for_clean_call adds */
disp += clean_call_beyond_mcontext();
/* skip what this routine added */
disp += total_stack;
} else {
/* Push a temp on the stack and point at it. We
* could try to optimize by juggling registers, but
* not worth it.
*/
/* xsp was adjusted up above; we simply store to xsp offsets */
disp = param_stack + XSP_SZ * arg_pre_push;
if (opnd_is_reg(arg) && opnd_get_size(arg) == OPSZ_PTR) {
POST(ilist, prev,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, disp), arg));
} else {
reg_id_t xsp_scratch = regparms[0];
/* don't want to just change size since will read extra bytes.
* can't do mem-to-mem so go through scratch reg */
if (reg_overlap(used, REG_XSP)) {
/* Get original xsp into scratch[0] and replace in arg */
if (opnd_uses_reg(arg, regparms[0])) {
xsp_scratch = REG_XAX;
ASSERT(!opnd_uses_reg(arg, REG_XAX)); /* can't use 3 */
/* FIXME: rather than putting xsp into mcontext
* slot, better to just do local get from dcontext
* like we do for 32-bit below? */
POST(ilist, prev,
instr_create_restore_from_tls(dcontext, REG_XAX,
TLS_XAX_SLOT));
}
opnd_replace_reg(&arg, REG_XSP, xsp_scratch);
}
POST(ilist, prev,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, disp),
opnd_create_reg(regparms[0])));
/* If sub-ptr-size, zero-extend is what we want so no movsxd */
POST(ilist, prev,
INSTR_CREATE_mov_ld(
dcontext,
opnd_create_reg(shrink_reg_for_param(regparms[0], arg)),
arg));
if (reg_overlap(used, REG_XSP)) {
int xsp_disp = opnd_get_reg_dcontext_offs(REG_XSP) +
clean_call_beyond_mcontext() + total_stack;
POST(ilist, prev,
INSTR_CREATE_mov_ld(
dcontext, opnd_create_reg(xsp_scratch),
OPND_CREATE_MEMPTR(REG_XSP, xsp_disp)));
if (xsp_scratch == REG_XAX) {
POST(ilist, prev,
instr_create_save_to_tls(dcontext, REG_XAX,
TLS_XAX_SLOT));
}
}
if (opnd_uses_reg(arg, regparms[0])) {
/* must restore since earlier arg might have clobbered */
int mc_disp = opnd_get_reg_dcontext_offs(regparms[0]) +
clean_call_beyond_mcontext() + total_stack;
POST(ilist, prev,
INSTR_CREATE_mov_ld(
dcontext, opnd_create_reg(regparms[0]),
OPND_CREATE_MEMPTR(REG_XSP, mc_disp)));
}
}
arg_pre_push++; /* running counter */
}
arg =
opnd_create_base_disp(REG_XSP, REG_NULL, 0, disp, opnd_get_size(arg));
break; /* once we've handled arg ignore futher reg refs */
}
}
# endif
if (i < NUM_REGPARM) {
reg_id_t regparm = shrink_reg_for_param(regparms[i], arg);
if (opnd_is_immed_int(arg) || opnd_is_instr(arg)) {
POST(ilist, mark,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(regparm), arg));
} else {
POST(ilist, mark,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(regparm), arg));
}
} else {
if (push) {
IF_X64(ASSERT_NOT_REACHED()); /* no 64-bit push_imm! */
if (opnd_is_immed_int(arg) || opnd_is_instr(arg))
POST(ilist, mark, INSTR_CREATE_push_imm(dcontext, arg));
else {
if (clean_call && opnd_uses_reg(arg, REG_XSP)) {
/* We do a purely local expansion:
* spill eax, mc->eax, esp->eax, arg->eax, push eax, restore eax
*/
reg_id_t scratch = REG_XAX;
if (opnd_uses_reg(arg, scratch)) {
scratch = REG_XCX;
ASSERT(!opnd_uses_reg(arg, scratch)); /* can't use 3 regs */
}
opnd_replace_reg(&arg, REG_XSP, scratch);
POST(ilist, mark,
instr_create_restore_from_tls(dcontext, scratch,
TLS_XAX_SLOT));
POST(ilist, mark, INSTR_CREATE_push(dcontext, arg));
POST(ilist, mark,
instr_create_restore_from_dc_via_reg(dcontext, scratch,
scratch, XSP_OFFSET));
insert_get_mcontext_base(dcontext, ilist, instr_get_next(mark),
scratch);
POST(ilist, mark,
instr_create_save_to_tls(dcontext, scratch, TLS_XAX_SLOT));
} else
POST(ilist, mark, INSTR_CREATE_push(dcontext, arg));
}
} else {
/* xsp was adjusted up above; we simply store to xsp offsets */
uint offs = REGPARM_MINSTACK + XSP_SZ * (i - NUM_REGPARM);
# ifdef X64
if (opnd_is_immed_int(arg) || opnd_is_instr(arg)) {
/* PR 250976 #3: there is no memory store of 64-bit-immediate,
* so go through scratch reg */
ASSERT(NUM_REGPARM > 0);
POST(ilist, mark,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEMPTR(REG_XSP, offs),
opnd_create_reg(regparms[0])));
POST(ilist, mark,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(regparms[0]),
arg));
} else {
# endif
if (opnd_is_memory_reference(arg)) {
/* can't do mem-to-mem so go through scratch */
reg_id_t scratch;
if (NUM_REGPARM > 0)
scratch = regparms[0];
else {
/* This happens on Mac.
* FIXME i#1370: not safe if later arg uses xax:
* local spill? Review how regparms[0] is preserved.
*/
scratch = REG_XAX;
}
POST(ilist, mark,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, offs),
opnd_create_reg(scratch)));
POST(ilist, mark,
INSTR_CREATE_mov_ld(
dcontext,
opnd_create_reg(shrink_reg_for_param(scratch, arg)),
arg));
} else {
POST(ilist, mark,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, offs), arg));
}
# ifdef X64
}
# endif
}
}
}
if (!push && total_stack > 0) {
POST(ilist, prev, /* before everything else: pre-push and args */
/* can we use sub? may as well preserve eflags */
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0, -(int)total_stack)));
}
if (restore_xsp) {
/* before restore_xax, since we're going to clobber xax */
int disp = opnd_get_reg_dcontext_offs(REG_XSP);
instr_t *where = instr_get_next(prev);
/* skip rest of what prepare_for_clean_call adds */
disp += clean_call_beyond_mcontext();
insert_get_mcontext_base(dcontext, ilist, where, REG_XAX);
PRE(ilist, where,
instr_create_restore_from_dc_via_reg(dcontext, REG_XAX, REG_XAX, XSP_OFFSET));
PRE(ilist, where,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEMPTR(REG_XSP, disp),
opnd_create_reg(REG_XAX)));
/* now we need restore_xax to be AFTER this */
prev = instr_get_prev(where);
}
if (restore_xax) {
int disp = opnd_get_reg_dcontext_offs(REG_XAX);
/* skip rest of what prepare_for_clean_call adds */
disp += clean_call_beyond_mcontext();
POST(ilist, prev, /* before everything else: pre-push, args, and stack adjust */
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX),
OPND_CREATE_MEMPTR(REG_XSP, disp)));
}
return total_stack;
}
/* If jmp_instr == NULL, uses jmp_tag, otherwise uses jmp_instr
*/
void
insert_clean_call_with_arg_jmp_if_ret_true(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, void *callee, int arg,
app_pc jmp_tag, instr_t *jmp_instr)
{
instr_t *false_popa, *jcc;
byte *encode_pc = vmcode_get_start();
prepare_for_clean_call(dcontext, NULL, ilist, instr, encode_pc);
dr_insert_call(dcontext, ilist, instr, callee, 1, OPND_CREATE_INT32(arg));
/* if the return value (xax) is 0, then jmp to internal false path */
PRE(ilist, instr, /* can't cmp w/ 64-bit immed so use test (shorter anyway) */
INSTR_CREATE_test(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XAX)));
/* fill in jcc target once have false path */
jcc = INSTR_CREATE_jcc(dcontext, OP_jz, opnd_create_pc(NULL));
PRE(ilist, instr, jcc);
/* if it falls through, then it's true, so restore and jmp to true tag
* passed in by caller
*/
cleanup_after_clean_call(dcontext, NULL, ilist, instr, encode_pc);
if (jmp_instr == NULL) {
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr,
INSTR_CREATE_jmp(dcontext, opnd_create_pc(jmp_tag)));
} else {
PRE(ilist, instr, INSTR_CREATE_jmp(dcontext, opnd_create_instr(jmp_instr)));
}
/* otherwise (if returned false), just do standard popf and continue */
/* get 1st instr of cleanup path */
false_popa = instr_get_prev(instr);
cleanup_after_clean_call(dcontext, NULL, ilist, instr, encode_pc);
false_popa = instr_get_next(false_popa);
instr_set_target(jcc, opnd_create_instr(false_popa));
}
/* If !precise, encode_pc is treated as +- a page (meant for clients
* writing an instrlist to gencode so not sure of exact placement but
* within a page).
* If encode_pc == vmcode_get_start(), checks reachability of whole
* vmcode region (meant for code going somewhere not precisely known
* in the code cache).
* Returns whether ended up using a direct cti. If inlined_tgt_instr != NULL,
* and an inlined target was used, returns a pointer to that instruction
* in *inlined_tgt_instr.
*/
bool
insert_reachable_cti(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
byte *encode_pc, byte *target, bool jmp, bool returns, bool precise,
reg_id_t scratch, instr_t **inlined_tgt_instr)
{
byte *encode_start;
byte *encode_end;
if (precise) {
encode_start = target + JMP_LONG_LENGTH;
encode_end = encode_start;
} else if (encode_pc == vmcode_get_start()) {
/* consider whole vmcode region */
encode_start = encode_pc;
encode_end = vmcode_get_end();
} else {
encode_start = (byte *)PAGE_START(encode_pc - PAGE_SIZE);
encode_end = (byte *)ALIGN_FORWARD(encode_pc + PAGE_SIZE, PAGE_SIZE);
}
if (REL32_REACHABLE(encode_start, target) && REL32_REACHABLE(encode_end, target)) {
/* For precise, we could consider a short cti, but so far no
* users are precise so we'll leave that for i#56.
*/
if (jmp)
PRE(ilist, where, INSTR_CREATE_jmp(dcontext, opnd_create_pc(target)));
else
PRE(ilist, where, INSTR_CREATE_call(dcontext, opnd_create_pc(target)));
return true;
} else {
opnd_t ind_tgt;
instr_t *inlined_tgt = NULL;
if (scratch == DR_REG_NULL) {
/* indirect through an inlined target */
inlined_tgt = instr_build_bits(dcontext, OP_UNDECODED, sizeof(target));
/* XXX: could use mov imm->xax and have target skip rex+opcode
* for clean disassembly
*/
instr_set_raw_bytes(inlined_tgt, (byte *)&target, sizeof(target));
/* this will copy the bytes for us, so we don't have to worry about
* the lifetime of the target param
*/
instr_allocate_raw_bits(dcontext, inlined_tgt, sizeof(target));
ind_tgt = opnd_create_mem_instr(inlined_tgt, 0, OPSZ_PTR);
if (inlined_tgt_instr != NULL)
*inlined_tgt_instr = inlined_tgt;
} else {
PRE(ilist, where,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(scratch),
OPND_CREATE_INTPTR(target)));
ind_tgt = opnd_create_reg(scratch);
if (inlined_tgt_instr != NULL)
*inlined_tgt_instr = NULL;
}
if (jmp)
PRE(ilist, where, INSTR_CREATE_jmp_ind(dcontext, ind_tgt));
else
PRE(ilist, where, INSTR_CREATE_call_ind(dcontext, ind_tgt));
if (inlined_tgt != NULL)
PRE(ilist, where, inlined_tgt);
return false;
}
}
/*###########################################################################
*###########################################################################
*
* M A N G L I N G R O U T I N E S
*/
#endif /* !STANDALONE_DECODER */
/* We export these mov/push utilities to drdecode */
/* If src_inst != NULL, uses it (and assumes it will be encoded at
* encode_estimate to determine whether > 32 bits or not: so if unsure where
* it will be encoded, pass a high address) as the immediate; else
* uses val.
*/
void
insert_mov_immed_arch(dcontext_t *dcontext, instr_t *src_inst, byte *encode_estimate,
ptr_int_t val, opnd_t dst, instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
instr_t *mov1, *mov2;
if (src_inst != NULL)
val = (ptr_int_t)encode_estimate;
#ifdef X64
if (X64_MODE_DC(dcontext) && !opnd_is_reg(dst)) {
if (val <= INT_MAX && val >= INT_MIN) {
/* mov is sign-extended, so we can use one move if it is all
* 0 or 1 in top 33 bits
*/
mov1 = INSTR_CREATE_mov_imm(dcontext, dst,
(src_inst == NULL)
? OPND_CREATE_INT32((int)val)
: opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, mov1);
mov2 = NULL;
} else {
/* do mov-64-bit-immed in two pieces. tiny corner-case risk of racy
* access to [dst] if this thread is suspended in between or another
* thread is trying to read [dst], but o/w we have to spill and
* restore a register.
*/
CLIENT_ASSERT(opnd_is_memory_reference(dst), "invalid dst opnd");
/* mov low32 => [mem32] */
opnd_set_size(&dst, OPSZ_4);
mov1 = INSTR_CREATE_mov_st(dcontext, dst,
(src_inst == NULL)
? OPND_CREATE_INT32((int)val)
: opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, mov1);
/* mov high32 => [mem32+4] */
if (opnd_is_base_disp(dst)) {
int disp = opnd_get_disp(dst);
CLIENT_ASSERT(disp + 4 > disp, "disp overflow");
opnd_set_disp(&dst, disp + 4);
} else {
byte *addr = opnd_get_addr(dst);
CLIENT_ASSERT(!POINTER_OVERFLOW_ON_ADD(addr, 4), "addr overflow");
dst = OPND_CREATE_ABSMEM(addr + 4, OPSZ_4);
}
mov2 = INSTR_CREATE_mov_st(dcontext, dst,
(src_inst == NULL)
? OPND_CREATE_INT32((int)(val >> 32))
: opnd_create_instr_ex(src_inst, OPSZ_4, 32));
PRE(ilist, instr, mov2);
}
} else {
#endif
mov1 = INSTR_CREATE_mov_imm(dcontext, dst,
(src_inst == NULL)
? OPND_CREATE_INTPTR(val)
: opnd_create_instr_ex(src_inst, OPSZ_PTR, 0));
PRE(ilist, instr, mov1);
mov2 = NULL;
#ifdef X64
}
#endif
if (first != NULL)
*first = mov1;
if (last != NULL)
*last = mov2;
}
/* If src_inst != NULL, uses it (and assumes it will be encoded at
* encode_estimate to determine whether > 32 bits or not: so if unsure where
* it will be encoded, pass a high address) as the immediate; else
* uses val.
*/
void
insert_push_immed_arch(dcontext_t *dcontext, instr_t *src_inst, byte *encode_estimate,
ptr_int_t val, instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
instr_t *push, *mov;
if (src_inst != NULL)
val = (ptr_int_t)encode_estimate;
#ifdef X64
if (X64_MODE_DC(dcontext)) {
/* do push-64-bit-immed in two pieces. tiny corner-case risk of racy
* access to TOS if this thread is suspended in between or another
* thread is trying to read its stack, but o/w we have to spill and
* restore a register.
*/
push = INSTR_CREATE_push_imm(dcontext,
(src_inst == NULL)
? OPND_CREATE_INT32((int)val)
: opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, push);
/* push is sign-extended, so we can skip top half if it is all 0 or 1
* in top 33 bits
*/
if (val <= INT_MAX && val >= INT_MIN) {
mov = NULL;
} else {
mov = INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, 4),
(src_inst == NULL)
? OPND_CREATE_INT32((int)(val >> 32))
: opnd_create_instr_ex(src_inst, OPSZ_4, 32));
PRE(ilist, instr, mov);
}
} else {
#endif
push = INSTR_CREATE_push_imm(dcontext,
(src_inst == NULL)
? OPND_CREATE_INT32(val)
: opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, push);
mov = NULL;
#ifdef X64
}
#endif
if (first != NULL)
*first = push;
if (last != NULL)
*last = mov;
}
#ifndef STANDALONE_DECODER /* back for rest of file */
/* Far calls and rets have double total size */
static opnd_size_t
stack_entry_size(instr_t *instr, opnd_size_t opsize)
{
if (instr_get_opcode(instr) == OP_call_far ||
instr_get_opcode(instr) == OP_call_far_ind ||
instr_get_opcode(instr) == OP_ret_far) {
/* cut OPSZ_8_rex16_short4 in half */
if (opsize == OPSZ_4)
return OPSZ_2;
else if (opsize == OPSZ_8)
return OPSZ_4;
else {
# ifdef X64
ASSERT(opsize == OPSZ_16);
return OPSZ_8;
# else
ASSERT_NOT_REACHED();
# endif
}
} else if (instr_get_opcode(instr) == OP_iret) {
/* convert OPSZ_12_rex40_short6 */
if (opsize == OPSZ_6)
return OPSZ_2;
else if (opsize == OPSZ_12)
return OPSZ_4;
else {
# ifdef X64
ASSERT(opsize == OPSZ_40);
return OPSZ_8;
# else
ASSERT_NOT_REACHED();
# endif
}
}
return opsize;
}
/* Used for fault translation */
bool
instr_check_xsp_mangling(dcontext_t *dcontext, instr_t *inst, int *xsp_adjust)
{
ASSERT(xsp_adjust != NULL);
if (instr_get_opcode(inst) == OP_push || instr_get_opcode(inst) == OP_push_imm) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: push or push_imm\n");
*xsp_adjust -= opnd_size_in_bytes(opnd_get_size(instr_get_dst(inst, 1)));
} else if (instr_get_opcode(inst) == OP_pop) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: pop\n");
*xsp_adjust += opnd_size_in_bytes(opnd_get_size(instr_get_src(inst, 1)));
}
/* 1st part of push emulation from insert_push_retaddr */
else if (instr_get_opcode(inst) == OP_lea &&
opnd_get_reg(instr_get_dst(inst, 0)) == REG_XSP &&
opnd_get_base(instr_get_src(inst, 0)) == REG_XSP &&
opnd_get_index(instr_get_src(inst, 0)) == REG_NULL) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: lea xsp adjust\n");
*xsp_adjust += opnd_get_disp(instr_get_src(inst, 0));
}
/* 2nd part of push emulation from insert_push_retaddr */
else if (instr_get_opcode(inst) == OP_mov_st &&
opnd_is_base_disp(instr_get_dst(inst, 0)) &&
opnd_get_base(instr_get_dst(inst, 0)) == REG_XSP &&
opnd_get_index(instr_get_dst(inst, 0)) == REG_NULL) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: store to stack\n");
/* nothing to track: paired lea is what we undo */
}
/* retrieval of target for call* or jmp* */
else if ((instr_get_opcode(inst) == OP_movzx &&
reg_overlap(opnd_get_reg(instr_get_dst(inst, 0)), REG_XCX)) ||
(instr_get_opcode(inst) == OP_mov_ld &&
reg_overlap(opnd_get_reg(instr_get_dst(inst, 0)), REG_XCX))) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: ib tgt to *cx\n");
/* nothing: our xcx spill restore will undo */
}
/* part of pop emulation for iretd/lretd in x64 mode */
else if (instr_get_opcode(inst) == OP_mov_ld &&
opnd_is_base_disp(instr_get_src(inst, 0)) &&
opnd_get_base(instr_get_src(inst, 0)) == REG_XSP &&
opnd_get_index(instr_get_src(inst, 0)) == REG_NULL) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: load from stack\n");
/* nothing to track: paired lea is what we undo */
}
/* part of data16 ret. once we have cs preservation (PR 271317) we'll
* need to not fail when walking over a movzx to a pop cs (right now we
* do not read the stack for the pop cs).
*/
else if (instr_get_opcode(inst) == OP_movzx &&
opnd_get_reg(instr_get_dst(inst, 0)) == REG_CX) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: movzx to cx\n");
/* nothing: our xcx spill restore will undo */
}
/* fake pop of cs for iret */
else if (instr_get_opcode(inst) == OP_add && opnd_is_reg(instr_get_dst(inst, 0)) &&
opnd_get_reg(instr_get_dst(inst, 0)) == REG_XSP &&
opnd_is_immed_int(instr_get_src(inst, 0))) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: add to xsp\n");
ASSERT(CHECK_TRUNCATE_TYPE_int(opnd_get_immed_int(instr_get_src(inst, 0))));
*xsp_adjust += (int)opnd_get_immed_int(instr_get_src(inst, 0));
}
/* popf for iret */
else if (instr_get_opcode(inst) == OP_popf) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: popf\n");
*xsp_adjust += opnd_size_in_bytes(opnd_get_size(instr_get_src(inst, 1)));
} else {
return false;
}
return true;
}
/* N.B.: keep in synch with instr_check_xsp_mangling() */
void
insert_push_retaddr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
ptr_int_t retaddr, opnd_size_t opsize)
{
if (opsize == OPSZ_2) {
ptr_int_t val = retaddr & (ptr_int_t)0x0000ffff;
/* can't do a non-default operand size with a push immed so we emulate */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -2, OPSZ_lea)));
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM16(REG_XSP, 2),
OPND_CREATE_INT16(val)));
} else if (opsize ==
OPSZ_PTR IF_X64(|| (!X64_CACHE_MODE_DC(dcontext) && opsize == OPSZ_4))) {
insert_push_immed_ptrsz(dcontext, retaddr, ilist, instr, NULL, NULL);
} else {
# ifdef X64
ptr_int_t val = retaddr & (ptr_int_t)0xffffffff;
ASSERT(opsize == OPSZ_4);
/* can't do a non-default operand size with a push immed so we emulate */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -4, OPSZ_lea)));
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, 0),
OPND_CREATE_INT32((int)val)));
# else
ASSERT_NOT_REACHED();
# endif
}
}
# ifdef CLIENT_INTERFACE
/* N.B.: keep in synch with instr_check_xsp_mangling() */
static void
insert_mov_ptr_uint_beyond_TOS(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
ptr_int_t value, opnd_size_t opsize)
{
/* we insert non-meta b/c we want faults to go to app (should only fault
* if the ret itself faulted, barring races) for simplicity: o/w our
* our-mangling sequence gets broken up and more complex.
*/
if (opsize == OPSZ_2) {
ptr_int_t val = value & (ptr_int_t)0x0000ffff;
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM16(REG_XSP, -2),
OPND_CREATE_INT16(val)));
} else if (opsize == OPSZ_4) {
ptr_int_t val = value & (ptr_int_t)0xffffffff;
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, -4),
OPND_CREATE_INT32(val)));
} else {
# ifdef X64
ptr_int_t val_low = value & (ptr_int_t)0xffffffff;
ASSERT(opsize == OPSZ_8);
if (CHECK_TRUNCATE_TYPE_int(value)) {
/* prefer a single write w/ sign-extension */
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM64(REG_XSP, -8),
OPND_CREATE_INT32(val_low)));
} else {
/* we need two 32-bit writes */
ptr_int_t val_high = (value >> 32);
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, -8),
OPND_CREATE_INT32(val_low)));
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, -4),
OPND_CREATE_INT32(val_high)));
}
# else
ASSERT_NOT_REACHED();
# endif
}
}
# endif /* CLIENT_INTERFACE */
static void
insert_push_cs(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
ptr_int_t retaddr, opnd_size_t opsize)
{
# ifdef X64
if (X64_CACHE_MODE_DC(dcontext)) {
/* "push cs" is invalid; for now we push the typical cs values.
* i#823 covers doing this more generally.
*/
insert_push_retaddr(dcontext, ilist, instr,
X64_MODE_DC(dcontext) ? CS64_SELECTOR : CS32_SELECTOR,
opsize);
} else {
# endif
opnd_t stackop;
/* we go ahead and push cs, but we won't pop into cs */
instr_t *push = INSTR_CREATE_push(dcontext, opnd_create_reg(SEG_CS));
/* 2nd dest is the stack operand size */
stackop = instr_get_dst(push, 1);
opnd_set_size(&stackop, opsize);
instr_set_dst(push, 1, stackop);
PRE(ilist, instr, push);
# ifdef X64
}
# endif
}
/* We spill to XCX(private dcontext) slot for private fragments,
* and to TLS MANGLE_XCX_SPILL_SLOT for shared fragments.
* (Except for DYNAMO_OPTION(private_ib_in_tls), for which all use tls,
* but that has a performance hit because of the extra data cache line)
* We can get away with the split by having the shared ibl routine copy
* xcx to the private dcontext, and by having the private ibl never
* target shared fragments.
* We also have to modify the xcx spill from tls to private dcontext when
* adding a shared basic block to a trace.
*
* FIXME: if we do make non-trace-head basic blocks valid indirect branch
* targets, we should have the private ibl have special code to test the
* flags and copy xcx to the tls slot if necessary.
*/
# define SAVE_TO_DC_OR_TLS(dc, flags, reg, tls_offs, dc_offs) \
((DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, (flags))) \
? instr_create_save_to_tls(dc, reg, tls_offs) \
: instr_create_save_to_dcontext((dc), (reg), (dc_offs)))
# define SAVE_TO_DC_OR_TLS_OR_REG(dc, flags, reg, tls_offs, dc_offs, dest_reg) \
((X64_CACHE_MODE_DC(dc) && \
!X64_MODE_DC(dc) IF_X64(&&DYNAMO_OPTION(x86_to_x64_ibl_opt))) \
? INSTR_CREATE_mov_ld(dc, opnd_create_reg(dest_reg), opnd_create_reg(reg)) \
: SAVE_TO_DC_OR_TLS(dc, flags, reg, tls_offs, dc_offs))
# define RESTORE_FROM_DC_OR_TLS(dc, flags, reg, tls_offs, dc_offs) \
((DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, (flags))) \
? instr_create_restore_from_tls(dc, reg, tls_offs) \
: instr_create_restore_from_dcontext((dc), (reg), (dc_offs)))
static void
mangle_far_direct_helper(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
/* FIXME i#823: we do not support other than flat 0-based CS, DS, SS, and ES.
* If the app wants to change segments in a WOW64 process, we will
* do the right thing for standard cs selector values (xref i#49).
* For other cs changes or in other modes, we do go through far_ibl
* today although we do not enact the cs change (nor bother to pass
* the selector in xbx).
*
* For WOW64, I tried keeping this a direct jmp for nice linking by doing the
* mode change in-fragment and then using a 64-bit stub with a 32-bit fragment,
* but that gets messy b/c a lot of code assumes it can create or calculate the
* size of exit stubs given nothing but the fragment flags. I tried adding
* FRAG_ENDS_IN_FAR_DIRECT but still need to pass another param to all the stub
* macros and routines for mid-trace exits and for prefixes for -disable_traces.
* So, going for treating as indirect and using the far_ibl. It's a trace
* barrier anyway, and rare. We treat it as indirect in all modes (including
* x86 builds) for simplicity (and eventually for full i#823 we'll want
* to issue cs changes there too).
*/
app_pc pc = opnd_get_pc(instr_get_target(instr));
# ifdef X64
if (!X64_MODE_DC(dcontext) &&
opnd_get_segment_selector(instr_get_target(instr)) == CS64_SELECTOR) {
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XBX, MANGLE_FAR_SPILL_SLOT,
XBX_OFFSET, REG_R10));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EBX),
OPND_CREATE_INT32(CS64_SELECTOR)));
}
# endif
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX, MANGLE_XCX_SPILL_SLOT,
XCX_OFFSET, REG_R9));
ASSERT((ptr_uint_t)pc < UINT_MAX); /* 32-bit code! */
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_INT32((ptr_uint_t)pc)));
}
/***************************************************************************
* DIRECT CALL
* Returns new next_instr
*/
instr_t *
mangle_direct_call(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, bool mangle_calls, uint flags)
{
ptr_uint_t retaddr;
app_pc target = NULL;
opnd_t pushop = instr_get_dst(instr, 1);
opnd_size_t pushsz = stack_entry_size(instr, opnd_get_size(pushop));
if (opnd_is_near_pc(instr_get_target(instr)))
target = opnd_get_pc(instr_get_target(instr));
else if (opnd_is_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
/* assumption: target's raw bits are meaningful */
target = instr_get_raw_bits(tgt);
ASSERT(target != 0);
/* FIXME case 6962: for far instr, we ignore the segment and
* assume it matches current cs */
} else if (opnd_is_far_pc(instr_get_target(instr))) {
target = opnd_get_pc(instr_get_target(instr));
/* FIXME case 6962: we ignore the segment and assume it matches current cs */
} else
ASSERT_NOT_REACHED();
if (!mangle_calls) {
/* off-trace call that will be executed natively */
/* relative target must be re-encoded */
instr_set_raw_bits_valid(instr, false);
# ifdef STEAL_REGISTER
/* FIXME: need to push edi prior to call and pop after.
* However, need to push edi prior to any args to this call,
* and it may be hard to find pre-arg-pushing spot...
* edi is supposed to be callee-saved, we're trusting this
* off-trace call to return, we may as well trust it to
* not trash edi -- these no-inline calls are dynamo's
* own routines, after all.
*/
# endif
return next_instr;
}
retaddr = get_call_return_address(dcontext, ilist, instr);
# ifdef CHECK_RETURNS_SSE2
/* ASSUMPTION: a call to the next instr is not going to ever have a
* matching ret! */
if (target == (app_pc)retaddr) {
LOG(THREAD, LOG_INTERP, 3, "found call to next instruction " PFX "\n", target);
} else {
check_return_handle_call(dcontext, ilist, next_instr);
}
/* now do the normal thing for a call */
# endif
if (instr_get_opcode(instr) == OP_call_far) {
/* N.B.: we do not support other than flat 0-based CS, DS, SS, and ES.
* if the app wants to change segments, we won't actually issue
* a segment change, and so will only work properly if the new segment
* is also 0-based. To properly issue new segments, we'd need a special
* ibl that ends in a far cti, and all prior address manipulations would
* need to be relative to the new segment, w/o messing up current segment.
* FIXME: can we do better without too much work?
* XXX: yes, for wow64: i#823: TODO mangle this like a far direct jmp
*/
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far direct call");
STATS_INC(num_far_dir_calls);
mangle_far_direct_helper(dcontext, ilist, instr, next_instr, flags);
insert_push_cs(dcontext, ilist, instr, 0, pushsz);
}
/* convert a direct call to a push of the return address */
insert_push_retaddr(dcontext, ilist, instr, retaddr, pushsz);
/* remove the call */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return next_instr;
}
# ifdef UNIX
/***************************************************************************
* Mangle the memory reference operand that uses fs/gs semgents,
* get the segment base of fs/gs into reg, and
* replace oldop with newop using reg instead of fs/gs
* The reg must not be used in the oldop, otherwise, the reg value
* is corrupted.
*/
opnd_t
mangle_seg_ref_opnd(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
opnd_t oldop, reg_id_t reg)
{
opnd_t newop;
reg_id_t seg;
ASSERT(opnd_is_far_base_disp(oldop));
seg = opnd_get_segment(oldop);
/* we only mangle fs/gs */
if (seg != SEG_GS && seg != SEG_FS)
return oldop;
# ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return oldop;
# endif
/* The reg should not be used by the oldop */
ASSERT(!opnd_uses_reg(oldop, reg));
/* XXX: this mangling is pattern-matched in translation's instr_is_seg_ref_load() */
/* get app's segment base into reg. */
PRE(ilist, where,
instr_create_restore_from_tls(dcontext, reg, os_get_app_tls_base_offset(seg)));
if (opnd_get_index(oldop) != REG_NULL && opnd_get_base(oldop) != REG_NULL) {
/* if both base and index are used, use
* lea [base, reg, 1] => reg
* to get the base + seg_base into reg.
*/
PRE(ilist, where,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(reg),
opnd_create_base_disp(opnd_get_base(oldop), reg, 1, 0, OPSZ_lea)));
}
if (opnd_get_index(oldop) != REG_NULL) {
newop = opnd_create_base_disp(reg, opnd_get_index(oldop), opnd_get_scale(oldop),
opnd_get_disp(oldop), opnd_get_size(oldop));
} else {
newop = opnd_create_base_disp(opnd_get_base(oldop), reg, 1, opnd_get_disp(oldop),
opnd_get_size(oldop));
}
return newop;
}
# endif /* UNIX */
/***************************************************************************
* INDIRECT CALL
*/
static reg_id_t
mangle_far_indirect_helper(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags, opnd_t *target_out)
{
opnd_t target = *target_out;
opnd_size_t addr_size;
reg_id_t reg_target = REG_NULL;
ASSERT(instr_get_opcode(instr) == OP_jmp_far_ind ||
instr_get_opcode(instr) == OP_call_far_ind);
/* FIXME i#823: we do not support other than flat 0-based CS, DS, SS, and ES.
* If the app wants to change segments in a WOW64 process, we will
* do the right thing for standard cs selector values (xref i#49).
* For other cs changes or in other modes, we do go through far_ibl
* today although we do not enact the cs change (nor bother to pass
* the selector in xbx).
*/
/* opnd type is i_Ep, it's not a far base disp b/c segment is at
* memory location, not specified as segment prefix on instr
* we assume register operands are marked as invalid instrs long
* before this point.
*/
ASSERT(opnd_is_base_disp(target));
/* Segment selector is the final 2 bytes.
* For non-mixed-mode, we ignore it.
* We assume DS base == target cti CS base.
*/
/* if data16 then just 2 bytes for address
* if x64 mode and Intel and rex then 8 bytes for address */
ASSERT((X64_MODE_DC(dcontext) && opnd_get_size(target) == OPSZ_10 &&
proc_get_vendor() != VENDOR_AMD) ||
opnd_get_size(target) == OPSZ_6 || opnd_get_size(target) == OPSZ_4);
if (opnd_get_size(target) == OPSZ_10) {
addr_size = OPSZ_8;
reg_target = REG_RCX;
} else if (opnd_get_size(target) == OPSZ_6) {
addr_size = OPSZ_4;
reg_target = REG_ECX;
} else /* target has OPSZ_4 */ {
addr_size = OPSZ_2;
reg_target = REG_XCX; /* caller uses movzx so size doesn't have to match */
}
# ifdef X64
if (mixed_mode_enabled()) {
/* While we don't support arbitrary segments, we do support
* mode changes using standard cs selector values (i#823).
* We save the selector into xbx.
*/
opnd_t sel = target;
opnd_set_disp(&sel, opnd_get_disp(target) + opnd_size_in_bytes(addr_size));
opnd_set_size(&sel, OPSZ_2);
/* all scratch space should be in TLS only */
ASSERT(TEST(FRAG_SHARED, flags) || DYNAMO_OPTION(private_ib_in_tls));
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XBX, MANGLE_FAR_SPILL_SLOT,
XBX_OFFSET, REG_R10));
PRE(ilist, instr, INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX), sel));
if (instr_uses_reg(instr, REG_XBX)) {
/* instr can't be both riprel (uses xax slot for mangling) and use
* a register, so we spill to the riprel (== xax) slot
*/
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, flags, REG_XBX, MANGLE_RIPREL_SPILL_SLOT,
XAX_OFFSET));
POST(ilist, instr,
instr_create_restore_from_tls(dcontext, REG_XBX,
MANGLE_RIPREL_SPILL_SLOT));
}
}
# endif
opnd_set_size(target_out, addr_size);
return reg_target;
}
instr_t *
mangle_indirect_call(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, bool mangle_calls, uint flags)
{
opnd_t target;
ptr_uint_t retaddr;
opnd_t pushop = instr_get_dst(instr, 1);
opnd_size_t pushsz = stack_entry_size(instr, opnd_get_size(pushop));
reg_id_t reg_target = REG_XCX;
if (!mangle_calls)
return next_instr;
retaddr = get_call_return_address(dcontext, ilist, instr);
/* Convert near, indirect calls. The jump to the exit_stub that
* jumps to indirect_branch_lookup was already inserted into the
* instr list by interp EXCEPT for the case in which we're converting
* an indirect call to a direct call. In that case, mangle later
* inserts a direct exit stub.
*/
/* If this call is marked for conversion, do minimal processing.
* FIXME Just a note that converted calls are not subjected to any of
* the specialized builds' processing further down.
*/
if (TEST(INSTR_IND_CALL_DIRECT, instr->flags)) {
/* convert the call to a push of the return address */
insert_push_retaddr(dcontext, ilist, instr, retaddr, pushsz);
/* remove the call */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return next_instr;
}
/* put the push AFTER the instruction that calculates
* the target, b/c if target depends on xsp we must use
* the value of xsp prior to this call instruction!
* we insert before next_instr to accomplish this.
*/
if (instr_get_opcode(instr) == OP_call_far_ind) {
/* goes right before the push of the ret addr */
insert_push_cs(dcontext, ilist, next_instr, 0, pushsz);
/* see notes below -- we don't really support switching segments,
* though we do go ahead and push cs, we won't pop into cs
*/
}
insert_push_retaddr(dcontext, ilist, next_instr, retaddr, pushsz);
/* save away xcx so that we can use it */
/* (it's restored in x86.s (indirect_branch_lookup) */
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX, MANGLE_XCX_SPILL_SLOT,
XCX_OFFSET, REG_R9));
# ifdef STEAL_REGISTER
/* Steal edi if call uses it, using original call instruction */
steal_reg(dcontext, instr, ilist);
if (ilist->flags)
restore_state(dcontext, next_instr, ilist);
/* It's impossible for our register stealing to use ecx
* because no call can simultaneously use 3 registers, right?
* Maximum is 2, in something like "call *(edi,ecx,4)"?
* If it is possible, need to make sure stealing's use of ecx
* doesn't conflict w/ our use
*/
# endif
/* change: call /2, Ev -> movl Ev, %xcx */
target = instr_get_src(instr, 0);
if (instr_get_opcode(instr) == OP_call_far_ind) {
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far indirect call");
STATS_INC(num_far_ind_calls);
reg_target = mangle_far_indirect_helper(dcontext, ilist, instr, next_instr, flags,
&target);
}
# ifdef UNIX
/* i#107, mangle the memory reference opnd that uses segment register. */
if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(target)) {
/* FIXME: we use REG_XCX to store the segment base, which might be used
* in target and cause assertion failure in mangle_seg_ref_opnd.
*/
ASSERT_BUG_NUM(107, !opnd_uses_reg(target, REG_XCX));
target = mangle_seg_ref_opnd(dcontext, ilist, instr, target, REG_XCX);
}
# endif
/* cannot call instr_reset, it will kill prev & next ptrs */
instr_free(dcontext, instr);
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_opcode(instr, opnd_get_size(target) == OPSZ_2 ? OP_movzx : OP_mov_ld);
instr_set_dst(instr, 0, opnd_create_reg(reg_target));
instr_set_src(instr, 0, target); /* src stays the same */
if (instrlist_get_translation_target(ilist) != NULL) {
/* make sure original raw bits are used for translation */
instr_set_translation(instr, instr_get_raw_bits(instr));
}
instr_set_our_mangling(instr, true);
# ifdef CHECK_RETURNS_SSE2
check_return_handle_call(dcontext, ilist, next_instr);
# endif
return next_instr;
}
/***************************************************************************
* RETURN
*/
# ifdef X64
/* Saves the selector from the top of the stack into xbx, after spilling xbx,
* for far_ibl.
*/
static void
mangle_far_return_save_selector(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
uint flags)
{
if (mixed_mode_enabled()) {
/* While we don't support arbitrary segments, we do support
* mode changes using standard cs selector values (i#823).
* We save the selector into xbx.
*/
/* We could do a pop but state xl8 is already set up to restore lea */
/* all scratch space should be in TLS only */
ASSERT(TEST(FRAG_SHARED, flags) || DYNAMO_OPTION(private_ib_in_tls));
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XBX, MANGLE_FAR_SPILL_SLOT,
XBX_OFFSET, REG_R10));
PRE(ilist, instr,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX),
OPND_CREATE_MEM16(REG_XSP, 0)));
}
}
# endif
void
mangle_return(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
instr_t *pop;
opnd_t retaddr;
opnd_size_t retsz;
# ifdef CHECK_RETURNS_SSE2
check_return_handle_return(dcontext, ilist, next_instr);
/* now do the normal ret mangling */
# endif
/* Convert returns. If aggressive we could take advantage of the
* fact that xcx is dead at the return and not bother saving it?
* The jump to the exit_stub that jumps to indirect_branch_lookup
* was already inserted into the instr list by interp. */
/* save away xcx so that we can use it */
/* (it's restored in x86.s (indirect_branch_lookup) */
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX, MANGLE_XCX_SPILL_SLOT,
XCX_OFFSET, REG_R9));
/* see if ret has an immed int operand, assumed to be 1st src */
if (instr_num_srcs(instr) > 0 && opnd_is_immed_int(instr_get_src(instr, 0))) {
/* if has an operand, return removes some stack space,
* AFTER the return address is popped
*/
int val = (int)opnd_get_immed_int(instr_get_src(instr, 0));
IF_X64(ASSERT_TRUNCATE(val, int, opnd_get_immed_int(instr_get_src(instr, 0))));
/* addl sizeof_param_area, %xsp
* except that clobbers the flags, so we use lea */
PRE(ilist, next_instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, val, OPSZ_lea)));
}
/* don't need to steal edi since return cannot use registers */
/* the retaddr operand is always the final source for all OP_ret* instrs */
retaddr = instr_get_src(instr, instr_num_srcs(instr) - 1);
retsz = stack_entry_size(instr, opnd_get_size(retaddr));
if (X64_CACHE_MODE_DC(dcontext) && retsz == OPSZ_4) {
if (instr_get_opcode(instr) == OP_iret || instr_get_opcode(instr) == OP_ret_far) {
/* N.B.: For some unfathomable reason iret and ret_far default to operand
* size 4 in 64-bit mode (making them, along w/ call_far, the only stack
* operation instructions to do so). So if we see an iret or far ret with
* OPSZ_4 in 64-bit mode we need a 4-byte pop, but since we can't actually
* generate a 4-byte pop we have to emulate it here. */
SYSLOG_INTERNAL_WARNING_ONCE("Encountered iretd/lretd in 64-bit mode!");
}
/* Note moving into ecx automatically zero extends which is what we want. */
PRE(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_MEM32(REG_RSP, 0)));
/* iret could use add since going to pop the eflags, but not lret.
* lret could combine w/ segment lea below: but not perf-crit instr, and
* anticipating cs preservation PR 271317 I'm leaving separate. */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 4, OPSZ_lea)));
} else {
/* change RET into a POP, keeping the operand size */
opnd_t memop = retaddr;
pop = INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_XCX));
/* need per-entry size, not total size (double for far ret) */
opnd_set_size(&memop, retsz);
instr_set_src(pop, 1, memop);
if (retsz == OPSZ_2)
instr_set_dst(pop, 0, opnd_create_reg(REG_CX));
/* We can't do a 4-byte pop in 64-bit mode, but excepting iretd and lretd
* handled above we should never see one. */
ASSERT(!X64_MODE_DC(dcontext) || retsz != OPSZ_4);
PRE(ilist, instr, pop);
if (retsz == OPSZ_2) {
/* we need to zero out the top 2 bytes */
PRE(ilist, instr,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_ECX),
opnd_create_reg(REG_CX)));
}
}
# ifdef CLIENT_INTERFACE
if (TEST(INSTR_CLOBBER_RETADDR, instr->flags)) {
/* we put the value in the note field earlier */
ptr_uint_t val = (ptr_uint_t)instr->note;
insert_mov_ptr_uint_beyond_TOS(dcontext, ilist, instr, val, retsz);
}
# endif
if (instr_get_opcode(instr) == OP_ret_far) {
/* FIXME i#823: we do not support other than flat 0-based CS, DS, SS, and ES.
* If the app wants to change segments in a WOW64 process, we will
* do the right thing for standard cs selector values (xref i#49).
* For other cs changes or in other modes, we do go through far_ibl
* today although we do not enact the cs change (nor bother to pass
* the selector in xbx).
*/
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far ret");
STATS_INC(num_far_rets);
# ifdef X64
mangle_far_return_save_selector(dcontext, ilist, instr, flags);
# endif
/* pop selector from stack, but not into cs, just junk it
* (the 16-bit selector is expanded to 32 bits on the push, unless data16)
*/
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0,
opnd_size_in_bytes(retsz), OPSZ_lea)));
}
if (instr_get_opcode(instr) == OP_iret) {
instr_t *popf;
/* Xref PR 215553 and PR 191977 - we actually see this on 64-bit Vista */
LOG(THREAD, LOG_INTERP, 2, "Encountered iret at " PFX " - mangling\n",
instr_get_translation(instr));
STATS_INC(num_irets);
/* In 32-bit mode this is a pop->EIP pop->CS pop->eflags.
* 64-bit mode (with either 32-bit or 64-bit operand size,
* despite the (wrong) Intel manual pseudocode: see i#833 and
* the win32.mixedmode test) extends
* the above and additionally adds pop->RSP pop->ss. N.B.: like OP_far_ret we
* ignore the CS (except mixed-mode WOW64) and SS segment changes
* (see the comments there).
*/
# ifdef X64
mangle_far_return_save_selector(dcontext, ilist, instr, flags);
# endif
/* Return address is already popped, next up is CS segment which we ignore
* (unless in mixed-mode, handled above) so
* adjust stack pointer. Note we can use an add here since the eflags will
* be written below. */
PRE(ilist, instr,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT8(opnd_size_in_bytes(retsz))));
/* Next up is xflags, we use a popf. Popf should be setting the right flags
* (it's difficult to tell because in the docs iret lists the flags it does
* set while popf lists the flags it doesn't set). The docs aren't entirely
* clear, but any flag that we or a user mode program would care about should
* be right. */
popf = INSTR_CREATE_popf(dcontext);
if (X64_CACHE_MODE_DC(dcontext) && retsz == OPSZ_4) {
/* We can't actually create a 32-bit popf and there's no easy way to
* simulate one. For now we'll do a 64-bit popf and fixup the stack offset.
* If AMD/INTEL ever start using the top half of the rflags register then
* we could have problems here. We could also break stack transparency and
* do a mov, push, popf to zero extend the value. */
PRE(ilist, instr, popf);
/* flags are already set, must use lea to fix stack */
PRE(ilist, instr,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -4, OPSZ_lea)));
} else {
/* get popf size right the same way we do it for the return address */
opnd_t memop = retaddr;
opnd_set_size(&memop, retsz);
DOCHECK(1, {
if (retsz == OPSZ_2)
ASSERT_NOT_TESTED();
});
instr_set_src(popf, 1, memop);
PRE(ilist, instr, popf);
}
/* Mangles single step exception after a popf. */
mangle_possible_single_step(dcontext, ilist, popf);
# ifdef X64
/* In x64 mode, iret additionally does pop->RSP and pop->ss. */
if (X64_MODE_DC(dcontext)) {
if (retsz == OPSZ_8)
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RSP)));
else if (retsz == OPSZ_4) {
PRE(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_ESP),
OPND_CREATE_MEM32(REG_RSP, 0)));
} else {
ASSERT_NOT_TESTED();
PRE(ilist, instr,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_ESP),
OPND_CREATE_MEM16(REG_RSP, 0)));
}
/* We're ignoring the set of SS and since we just set RSP we don't need
* to do anything to adjust the stack for the pop (since the pop would have
* occurred with the old RSP). */
}
# endif
}
/* remove the ret */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
/***************************************************************************
* INDIRECT JUMP
*/
instr_t *
mangle_indirect_jump(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
opnd_t target;
reg_id_t reg_target = REG_XCX;
/* Convert indirect branches (that are not returns). Again, the
* jump to the exit_stub that jumps to indirect_branch_lookup
* was already inserted into the instr list by interp. */
/* save away xcx so that we can use it */
/* (it's restored in x86.s (indirect_branch_lookup) */
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX, MANGLE_XCX_SPILL_SLOT,
XCX_OFFSET, REG_R9));
# ifdef STEAL_REGISTER
/* Steal edi if branch uses it, using original instruction */
steal_reg(dcontext, instr, ilist);
if (ilist->flags)
restore_state(dcontext, next_instr, ilist);
# endif
/* change: jmp /4, i_Ev -> movl i_Ev, %xcx */
target = instr_get_target(instr);
if (instr_get_opcode(instr) == OP_jmp_far_ind) {
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far indirect jump");
STATS_INC(num_far_ind_jmps);
reg_target = mangle_far_indirect_helper(dcontext, ilist, instr, next_instr, flags,
&target);
}
# ifdef UNIX
/* i#107, mangle the memory reference opnd that uses segment register. */
if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(target)) {
/* FIXME: we use REG_XCX to store segment base, which might be used
* in target and cause assertion failure in mangle_seg_ref_opnd.
*/
ASSERT_BUG_NUM(107, !opnd_uses_reg(target, REG_XCX));
target = mangle_seg_ref_opnd(dcontext, ilist, instr, target, REG_XCX);
}
# endif
/* cannot call instr_reset, it will kill prev & next ptrs */
instr_free(dcontext, instr);
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_opcode(instr, opnd_get_size(target) == OPSZ_2 ? OP_movzx : OP_mov_ld);
instr_set_dst(instr, 0, opnd_create_reg(reg_target));
instr_set_src(instr, 0, target); /* src stays the same */
if (instrlist_get_translation_target(ilist) != NULL) {
/* make sure original raw bits are used for translation */
instr_set_translation(instr, instr_get_raw_bits(instr));
}
instr_set_our_mangling(instr, true);
/* It's impossible for our register stealing to use ecx
* because no branch can simultaneously use 3 registers, right?
* Maximum is 2, in something like "jmp *(edi,ebx,4)"?
* If it is possible, need to make sure stealing's use of ecx
* doesn't conflict w/ our use = FIXME
*/
return next_instr;
}
/***************************************************************************
* FAR DIRECT JUMP
*/
void
mangle_far_direct_jump(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far direct jmp");
STATS_INC(num_far_dir_jmps);
mangle_far_direct_helper(dcontext, ilist, instr, next_instr, flags);
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
/***************************************************************************
* SYSCALL
*/
# ifdef UNIX
/* Inserts code to handle clone into ilist.
* instr is the syscall instr itself.
* Assumes that instructions exist beyond instr in ilist.
*
* CAUTION: don't use a lot of stack in the generated code because
* get_clone_record() makes assumptions about the usage of stack being
* less than a page.
*/
void
mangle_insert_clone_code(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr _IF_X64(gencode_mode_t mode))
{
/* int 0x80
* xchg xax,xcx
* jecxz child
* jmp parent
* child:
* xchg xax,xcx
* # i#149/PR 403015: the child is on the dstack so no need to swap stacks
* jmp new_thread_dynamo_start
* parent:
* xchg xax,xcx
* <post system call, etc.>
*/
instr_t *in = instr_get_next(instr);
instr_t *child = INSTR_CREATE_label(dcontext);
instr_t *parent = INSTR_CREATE_label(dcontext);
ASSERT(in != NULL);
PRE(ilist, in,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XCX)));
PRE(ilist, in, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(child)));
PRE(ilist, in, INSTR_CREATE_jmp(dcontext, opnd_create_instr(parent)));
PRE(ilist, in, child);
PRE(ilist, in,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XCX)));
/* We used to insert this directly into fragments for inlined system
* calls, but not once we eliminated clean calls out of the DR cache
* for security purposes. Thus it can be a meta jmp, or an indirect jmp.
*/
insert_reachable_cti(dcontext, ilist, in, vmcode_get_start(),
(byte *)get_new_thread_start(dcontext _IF_X64(mode)),
true /*jmp*/, false /*!returns*/, false /*!precise*/,
DR_REG_NULL /*no scratch*/, NULL);
instr_set_meta(instr_get_prev(in));
PRE(ilist, in, parent);
PRE(ilist, in,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XCX)));
}
# endif /* UNIX */
# ifdef WINDOWS
/* Note that ignore syscalls processing for XP and 2003 is a two-phase operation.
* For this reason, mangle_syscall() might be called with a 'next_instr' that's
* not an original app instruction but one inserted by the earlier mangling phase.
*/
# endif
/* XXX: any extra code here can interfere with mangle_syscall_code()
* and interrupted_inlined_syscall() which have assumptions about the
* exact code around inlined system calls.
*/
void
mangle_syscall_arch(dcontext_t *dcontext, instrlist_t *ilist, uint flags, instr_t *instr,
instr_t *next_instr)
{
# ifdef UNIX
/* Shared routine already checked method, handled INSTR_NI_SYSCALL*,
* and inserted the signal barrier and non-auto-restart nop.
* If we get here, we're dealing with an ignorable syscall.
*/
# ifdef MACOS
if (instr_get_opcode(instr) == OP_sysenter) {
/* The kernel returns control to whatever user-mode places in edx.
* We get control back here and then go to the ret ibl (since normally
* there's a call to a shared routine that does "pop edx").
*/
instr_t *post_sysenter = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, flags, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
instrlist_insert_mov_instr_addr(dcontext, post_sysenter, NULL /*in cache*/,
opnd_create_reg(REG_XDX), ilist, instr, NULL,
NULL);
/* sysenter goes here */
PRE(ilist, next_instr, post_sysenter);
PRE(ilist, next_instr,
RESTORE_FROM_DC_OR_TLS(dcontext, flags, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
PRE(ilist, next_instr,
SAVE_TO_DC_OR_TLS(dcontext, flags, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
PRE(ilist, next_instr,
INSTR_CREATE_mov_st(dcontext, opnd_create_reg(REG_XCX),
opnd_create_reg(REG_XDX)));
} else if (TEST(INSTR_BRANCH_SPECIAL_EXIT, instr->flags)) {
int num = instr_get_interrupt_number(instr);
ASSERT(instr_get_opcode(instr) == OP_int);
if (num == 0x81 || num == 0x82) {
int reason = (num == 0x81) ? EXIT_REASON_NI_SYSCALL_INT_0x81
: EXIT_REASON_NI_SYSCALL_INT_0x82;
if (DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, flags)) {
insert_shared_get_dcontext(dcontext, ilist, instr, true /*save_xdi*/);
PRE(ilist, instr,
INSTR_CREATE_mov_st(
dcontext,
opnd_create_dcontext_field_via_reg_sz(
dcontext, REG_NULL /*default*/, EXIT_REASON_OFFSET, OPSZ_2),
OPND_CREATE_INT16(reason)));
insert_shared_restore_dcontext_reg(dcontext, ilist, instr);
} else {
PRE(ilist, instr,
instr_create_save_immed16_to_dcontext(dcontext, reason,
EXIT_REASON_OFFSET));
}
}
}
# endif
# ifdef STEAL_REGISTER
/* in linux, system calls get their parameters via registers.
* edi is the last one used, but there are system calls that
* use it, so we put the real value into edi. plus things
* like fork() should get the real register values.
* it's also a good idea to put the real edi into %edi for
* debugger interrupts (int3).
*/
/* the only way we can save and then restore our dc
* ptr is to use the stack!
* this should be fine, all interrupt instructions push
* both eflags and return address on stack, so esp must
* be valid at this point. there could be an application
* assuming only 2 slots on stack will be used, we use a 3rd
* slot, could mess up that app...but what can we do?
* also, if kernel examines user stack, we could have problems.
* push edi # push dcontext ptr
* restore edi # restore app edi
* <syscall>
* push ebx
* mov edi, ebx
* mov 4(esp), edi # get dcontext ptr
* save ebx to edi slot
* pop ebx
* add 4,esp # clean up push of dcontext ptr
*/
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_EDI)));
PRE(ilist, instr, instr_create_restore_from_dcontext(dcontext, REG_EDI, XDI_OFFSET));
/* insert after in reverse order: */
POST(ilist, instr,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_ESP), OPND_CREATE_INT8(4)));
POST(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_EBX)));
POST(ilist, instr, instr_create_save_to_dcontext(dcontext, REG_EBX, XDI_OFFSET));
POST(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_EDI),
OPND_CREATE_MEM32(REG_ESP, 4)));
POST(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_EBX),
opnd_create_reg(REG_EDI)));
POST(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_EBX)));
# endif /* STEAL_REGISTER */
# else /* WINDOWS */
/* special handling of system calls is performed in shared_syscall or
* in do_syscall
*/
/* FIXME: for ignorable syscalls,
* do we need support for exiting mid-fragment prior to a syscall
* like we do on Linux, to bound time in cache?
*/
if (does_syscall_ret_to_callsite()) {
uint len = instr_length(dcontext, instr);
if (TEST(INSTR_SHARED_SYSCALL, instr->flags)) {
ASSERT(DYNAMO_OPTION(shared_syscalls));
/* this syscall will be performed by the shared_syscall code
* we just need to place a return address into the dcontext
* xsi slot or the mangle-next-tag tls slot
*/
if (DYNAMO_OPTION(shared_fragment_shared_syscalls)) {
# ifdef X64
ASSERT(instr_raw_bits_valid(instr));
/* PR 244741: no 64-bit store-immed-to-mem
* FIXME: would be nice to move this to the stub and
* use the dead rbx register!
*/
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, REG_XCX, MANGLE_NEXT_TAG_SLOT));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR((instr->bytes + len))));
PRE(ilist, instr,
INSTR_CREATE_xchg(
dcontext,
opnd_create_tls_slot(os_tls_offset(MANGLE_NEXT_TAG_SLOT)),
opnd_create_reg(REG_XCX)));
# else
PRE(ilist, instr,
INSTR_CREATE_mov_st(
dcontext,
opnd_create_tls_slot(os_tls_offset(MANGLE_NEXT_TAG_SLOT)),
OPND_CREATE_INTPTR((instr->bytes + len))));
# endif
} else {
PRE(ilist, instr,
instr_create_save_immed32_to_dcontext(
dcontext, (uint)(ptr_uint_t)(instr->bytes + len), XSI_OFFSET));
}
}
/* Handle ignorable syscall. non-ignorable system calls are
* destroyed and removed from the list at the end of this func.
*/
else if (!TEST(INSTR_NI_SYSCALL, instr->flags)) {
if (get_syscall_method() == SYSCALL_METHOD_INT && DYNAMO_OPTION(sygate_int)) {
/* for Sygate need to mangle into a call to int_syscall_addr
* is anyone going to get screwed up by this change
* (say flags change?) [-ignore_syscalls only]*/
ASSERT_NOT_TESTED();
instrlist_replace(ilist, instr, create_syscall_instr(dcontext));
instr_destroy(dcontext, instr);
} else if (get_syscall_method() == SYSCALL_METHOD_SYSCALL)
ASSERT_NOT_TESTED();
else if (get_syscall_method() == SYSCALL_METHOD_WOW64)
ASSERT_NOT_TESTED();
return;
}
} else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
/* on XP/2003 we have a choice between inserting a trampoline at the
* return pt of the sysenter, which is 0x7ffe0304 (except for
* SP2-patched XP), which is bad since it would clobber whatever's after
* the ret there (unless we used a 0xcc, like Visual Studio 2005 debugger
* does), or replacing the ret addr on the stack -- we choose the
* latter as the lesser of two transparency evils. Note that the
* page at 0x7ffe0000 can't be made writable anyway, so hooking
* isn't possible.
*/
if (TEST(INSTR_SHARED_SYSCALL, instr->flags)) {
ASSERT(DYNAMO_OPTION(shared_syscalls));
}
/* Handle ignorable syscall. non-ignorable system calls are
* destroyed and removed from the list at the end of this func.
*/
else if (!TEST(INSTR_NI_SYSCALL, instr->flags)) {
instr_t *mov_imm;
/* even w/ ignorable syscall, need to make sure regain control */
ASSERT(next_instr != NULL);
ASSERT(DYNAMO_OPTION(indcall2direct));
/* for sygate hack need to basically duplicate what is done in
* shared_syscall, but here we could be shared so would need to
* grab dcontext first etc. */
ASSERT_NOT_IMPLEMENTED(!DYNAMO_OPTION(sygate_sysenter));
/* PR 253943: we don't support sysenter in x64 */
IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* can't have 8-byte imm-to-mem */
/* FIXME PR 303413: we won't properly translate a fault in our
* app stack reference here. It's marked as our own mangling
* so we'll at least return failure from our translate routine.
*/
mov_imm = INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, 0),
opnd_create_instr(next_instr));
ASSERT(instr_is_mov_imm_to_tos(mov_imm));
PRE(ilist, instr, mov_imm);
/* do not let any encoding for length be cached!
* o/w will lose pc-relative opnd
*/
/* 'next_instr' is executed after the after-syscall vsyscall
* 'ret', which is executed natively. */
instr_set_meta(instr_get_prev(instr));
return; /* leave syscall instr alone */
}
} else {
SYSLOG_INTERNAL_ERROR("unsupported system call method");
LOG(THREAD, LOG_INTERP, 1, "don't know convention for this syscall method\n");
if (!TEST(INSTR_NI_SYSCALL, instr->flags))
return;
ASSERT_NOT_IMPLEMENTED(false);
}
/* destroy the syscall instruction */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
# endif /* WINDOWS */
}
/***************************************************************************
* NON-SYSCALL INTERRUPT
*/
void
mangle_interrupt(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
# ifdef WINDOWS
int num;
if (instr_get_opcode(instr) != OP_int)
return;
num = instr_get_interrupt_number(instr);
if (num == 0x2b) {
/* A callback finishes and returns to the interruption
* point of the thread with the instruction "int 2b".
* The interrupt ends the block; remove the instruction
* since we'll come back to dynamo to perform the
* interrupt.
*/
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
# endif /* WINDOWS */
}
/***************************************************************************
* Single step exceptions catching
*/
void
mangle_possible_single_step(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr)
{
/* Simply inserts two nops so that next instruction where a single step
* exception might occur is in the same basic block and so that the
* translation of a single step exception points back to the instruction
* which set the trap flag.
* The single step exception is a problem because
* the ExceptionAddress should be the next EIP.
*/
POST(ilist, instr, INSTR_CREATE_nop(dcontext));
/* Inserting two nops to get ExceptionAddress on the second one. */
POST(ilist, instr, INSTR_CREATE_nop(dcontext));
}
/***************************************************************************
* Single step exceptions generation
*/
void
mangle_single_step(dcontext_t *dcontext, instrlist_t *ilist, uint flags, instr_t *instr)
{
/* Sets exit reason dynamically. */
if (DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, flags)) {
insert_shared_get_dcontext(dcontext, ilist, instr, true /*save_xdi*/);
PRE(ilist, instr,
INSTR_CREATE_mov_st(
dcontext,
opnd_create_dcontext_field_via_reg_sz(dcontext, REG_NULL /*default*/,
EXIT_REASON_OFFSET, OPSZ_2),
OPND_CREATE_INT16(EXIT_REASON_SINGLE_STEP)));
insert_shared_restore_dcontext_reg(dcontext, ilist, instr);
} else {
PRE(ilist, instr,
instr_create_save_immed16_to_dcontext(dcontext, EXIT_REASON_SINGLE_STEP,
EXIT_REASON_OFFSET));
}
}
/***************************************************************************
* FLOATING POINT PC
*/
/* The offset of the last floating point PC in the saved state */
# define FNSAVE_PC_OFFS 12
# define FXSAVE_PC_OFFS 8
# define FXSAVE_SIZE 512
void
float_pc_update(dcontext_t *dcontext)
{
byte *state = *(byte **)(((byte *)dcontext->local_state) + FLOAT_PC_STATE_SLOT);
app_pc orig_pc, xl8_pc;
uint offs = 0;
LOG(THREAD, LOG_INTERP, 2, "%s: fp state " PFX "\n", __FUNCTION__, state);
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE64) {
/* Check whether the FPU state was saved */
uint64 header_bv = *(uint64 *)(state + FXSAVE_SIZE);
if (!TEST(XCR0_FP, header_bv)) {
LOG(THREAD, LOG_INTERP, 2, "%s: xsave did not save FP state => nop\n",
__FUNCTION__);
}
return;
}
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_FNSAVE) {
offs = FNSAVE_PC_OFFS;
} else {
offs = FXSAVE_PC_OFFS;
}
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_FXSAVE64 ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE64)
orig_pc = *(app_pc *)(state + offs);
else /* just bottom 32 bits of pc */
orig_pc = (app_pc)(ptr_uint_t) * (uint *)(state + offs);
if (orig_pc == NULL) {
/* no fp instr yet */
LOG(THREAD, LOG_INTERP, 2, "%s: pc is NULL\n", __FUNCTION__);
return;
}
/* i#1211-c#1: the orig_pc might be an app pc restored from fldenv */
if (!in_fcache(orig_pc) &&
/* XXX: i#698: there might be fp instr neither in fcache nor in app */
!(in_generated_routine(dcontext, orig_pc) || is_dynamo_address(orig_pc) ||
is_in_dynamo_dll(orig_pc) IF_CLIENT_INTERFACE(|| is_in_client_lib(orig_pc)))) {
bool no_xl8 = true;
# ifdef X64
if (dcontext->upcontext.upcontext.exit_reason != EXIT_REASON_FLOAT_PC_FXSAVE64 &&
dcontext->upcontext.upcontext.exit_reason != EXIT_REASON_FLOAT_PC_XSAVE64) {
/* i#1427: try to fill in the top 32 bits */
ptr_uint_t vmcode = (ptr_uint_t)vmcode_get_start();
if ((vmcode & 0xffffffff00000000) > 0) {
byte *orig_try =
(byte *)((vmcode & 0xffffffff00000000) | (ptr_uint_t)orig_pc);
if (in_fcache(orig_try)) {
LOG(THREAD, LOG_INTERP, 2,
"%s: speculating: pc " PFX " + top half of vmcode = " PFX "\n",
__FUNCTION__, orig_pc, orig_try);
orig_pc = orig_try;
no_xl8 = false;
}
}
}
# endif
if (no_xl8) {
LOG(THREAD, LOG_INTERP, 2, "%s: pc " PFX " is translated already\n",
__FUNCTION__, orig_pc);
return;
}
}
/* We must either grab thread_initexit_lock or be couldbelinking to translate */
mutex_lock(&thread_initexit_lock);
xl8_pc = recreate_app_pc(dcontext, orig_pc, NULL);
mutex_unlock(&thread_initexit_lock);
LOG(THREAD, LOG_INTERP, 2, "%s: translated " PFX " to " PFX "\n", __FUNCTION__,
orig_pc, xl8_pc);
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_FXSAVE64 ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE64)
*(app_pc *)(state + offs) = xl8_pc;
else /* just bottom 32 bits of pc */
*(uint *)(state + offs) = (uint)(ptr_uint_t)xl8_pc;
}
void
mangle_float_pc(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint *flags INOUT)
{
/* If there is a prior non-control float instr, we can inline the pc update.
* Otherwise, we go back to dispatch. In the latter case we do not support
* building traces across the float pc save: we assume it's rare.
*/
app_pc prior_float = NULL;
bool exit_is_normal = false;
int op = instr_get_opcode(instr);
opnd_t memop = instr_get_dst(instr, 0);
ASSERT(opnd_is_memory_reference(memop));
/* To simplify the code here we don't support rip-rel for local handling.
* We also don't support xsave, as it optionally writes the fpstate.
*/
if (opnd_is_base_disp(memop) && op != OP_xsave32 && op != OP_xsaveopt32 &&
op != OP_xsave64 && op != OP_xsaveopt64 && op != OP_xsavec32 &&
op != OP_xsavec64) {
instr_t *prev;
for (prev = instr_get_prev_expanded(dcontext, ilist, instr); prev != NULL;
prev = instr_get_prev_expanded(dcontext, ilist, prev)) {
dr_fp_type_t type;
if (instr_is_app(prev) && instr_is_floating_ex(prev, &type)) {
bool control_instr = false;
if (type == DR_FP_STATE /* quick check */ &&
/* Check the list from Intel Vol 1 8.1.8 */
(op == OP_fnclex || op == OP_fldcw || op == OP_fnstcw ||
op == OP_fnstsw || op == OP_fnstenv || op == OP_fldenv ||
op == OP_fwait))
control_instr = true;
if (!control_instr) {
prior_float = get_app_instr_xl8(prev);
break;
}
}
}
}
if (prior_float != NULL) {
/* We can link this */
exit_is_normal = true;
STATS_INC(float_pc_from_cache);
/* Replace the stored code cache pc with the original app pc.
* If the app memory is unwritable, instr would have already crashed.
*/
if (op == OP_fnsave || op == OP_fnstenv) {
opnd_set_disp(&memop, opnd_get_disp(memop) + FNSAVE_PC_OFFS);
opnd_set_size(&memop, OPSZ_4);
PRE(ilist, next_instr,
INSTR_CREATE_mov_st(dcontext, memop,
OPND_CREATE_INT32((int)(ptr_int_t)prior_float)));
} else if (op == OP_fxsave32) {
opnd_set_disp(&memop, opnd_get_disp(memop) + FXSAVE_PC_OFFS);
opnd_set_size(&memop, OPSZ_4);
PRE(ilist, next_instr,
INSTR_CREATE_mov_st(dcontext, memop,
OPND_CREATE_INT32((int)(ptr_int_t)prior_float)));
} else if (op == OP_fxsave64) {
opnd_set_disp(&memop, opnd_get_disp(memop) + FXSAVE_PC_OFFS);
opnd_set_size(&memop, OPSZ_8);
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)prior_float, memop, ilist,
next_instr, NULL, NULL);
} else
ASSERT_NOT_REACHED();
} else if (!DYNAMO_OPTION(translate_fpu_pc)) {
/* We only support translating when inlined.
* XXX: we can't recover the loss of coarse-grained: we live with that.
*/
exit_is_normal = true;
ASSERT_CURIOSITY(!TEST(FRAG_CANNOT_BE_TRACE, *flags) ||
/* i#1562: it could be marked as no-trace for other reasons */
TEST(FRAG_SELFMOD_SANDBOXED, *flags));
} else {
int reason = 0;
CLIENT_ASSERT(!TEST(FRAG_IS_TRACE, *flags),
"removing an FPU instr in a trace with an FPU state save "
"is not supported");
switch (op) {
case OP_fnsave:
case OP_fnstenv: reason = EXIT_REASON_FLOAT_PC_FNSAVE; break;
case OP_fxsave32: reason = EXIT_REASON_FLOAT_PC_FXSAVE; break;
case OP_fxsave64: reason = EXIT_REASON_FLOAT_PC_FXSAVE64; break;
case OP_xsave32:
case OP_xsavec32:
case OP_xsaveopt32: reason = EXIT_REASON_FLOAT_PC_XSAVE; break;
case OP_xsave64:
case OP_xsavec64:
case OP_xsaveopt64: reason = EXIT_REASON_FLOAT_PC_XSAVE64; break;
default: ASSERT_NOT_REACHED();
}
if (DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, *flags)) {
insert_shared_get_dcontext(dcontext, ilist, instr, true /*save_xdi*/);
PRE(ilist, instr,
INSTR_CREATE_mov_st(
dcontext,
opnd_create_dcontext_field_via_reg_sz(dcontext, REG_NULL /*default*/,
EXIT_REASON_OFFSET, OPSZ_2),
OPND_CREATE_INT16(reason)));
} else {
PRE(ilist, instr,
instr_create_save_immed16_to_dcontext(dcontext, reason,
EXIT_REASON_OFFSET));
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, REG_XDI, DCONTEXT_BASE_SPILL_SLOT));
}
/* At this point, xdi is spilled into DCONTEXT_BASE_SPILL_SLOT */
/* We pass the address in the xbx tls slot, which is untouched by fcache_return.
*
* XXX: handle far refs! Xref drutil_insert_get_mem_addr(), and sandbox_write()
* hitting this same issue.
*/
ASSERT_CURIOSITY(!opnd_is_far_memory_reference(memop));
if (opnd_is_base_disp(memop)) {
opnd_set_size(&memop, OPSZ_lea);
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XDI), memop));
} else {
ASSERT(opnd_is_abs_addr(memop) IF_X64(|| opnd_is_rel_addr(memop)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDI),
OPND_CREATE_INTPTR(opnd_get_addr(memop))));
}
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, REG_XDI, FLOAT_PC_STATE_SLOT));
/* Restore app %xdi */
if (TEST(FRAG_SHARED, *flags))
insert_shared_restore_dcontext_reg(dcontext, ilist, instr);
else {
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext, REG_XDI,
DCONTEXT_BASE_SPILL_SLOT));
}
}
if (exit_is_normal && DYNAMO_OPTION(translate_fpu_pc)) {
instr_t *exit_jmp = next_instr;
while (exit_jmp != NULL && !instr_is_exit_cti(exit_jmp))
exit_jmp = instr_get_next(next_instr);
ASSERT(exit_jmp != NULL);
ASSERT(instr_branch_special_exit(exit_jmp));
instr_branch_set_special_exit(exit_jmp, false);
/* XXX: there could be some other reason this was marked
* cannot-be-trace that we're undoing here...
*/
if (TEST(FRAG_CANNOT_BE_TRACE, *flags))
*flags &= ~FRAG_CANNOT_BE_TRACE;
}
}
/***************************************************************************
* CPUID FOOLING
*/
# ifdef FOOL_CPUID
/* values returned by cpuid for Mobile Pentium MMX processor (family 5, model 8)
* minus mmx (==0x00800000 in CPUID_1_EDX)
* FIXME: change model number to a Pentium w/o MMX!
*/
# define CPUID_0_EAX 0x00000001
# define CPUID_0_EBX 0x756e6547
# define CPUID_0_ECX 0x6c65746e
# define CPUID_0_EDX 0x49656e69
/* extended family, extended model, type, family, model, stepping id: */
/* 20:27, 16:19, 12:13, 8:11, 4:7, 0:3 */
# define CPUID_1_EAX 0x00000581
# define CPUID_1_EBX 0x00000000
# define CPUID_1_ECX 0x00000000
# define CPUID_1_EDX 0x000001bf
static void
mangle_cpuid(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
/* assumption: input value is put in eax on prev instr, or
* on instr prior to that and prev is an inc instr.
* alternative is to insert conditional branch...and save eflags, etc.
*/
instr_t *prev = instr_get_prev(instr);
opnd_t op;
int input, out_eax, out_ebx, out_ecx, out_edx;
LOG(THREAD, LOG_INTERP, 1, "fooling cpuid instruction!\n");
ASSERT(prev != NULL);
prev = instr_get_prev_expanded(dcontext, ilist, instr);
instr_decode(dcontext, instr);
if (!instr_valid(instr))
goto cpuid_give_up;
loginst(dcontext, 2, prev, "prior to cpuid");
/* FIXME: maybe should insert code to dispatch on eax, rather than
* this hack, which is based on photoshop, which either does
* "xor eax,eax" or "xor eax,eax; inc eax"
*/
if (!instr_is_mov_constant(prev, &input)) {
/* we only allow inc here */
if (instr_get_opcode(prev) != OP_inc)
goto cpuid_give_up;
op = instr_get_dst(prev, 0);
if (!opnd_is_reg(op) || opnd_get_reg(op) != REG_EAX)
goto cpuid_give_up;
/* now check instr before inc */
prev = instr_get_prev(prev);
if (!instr_is_mov_constant(prev, &input) || input != 0)
goto cpuid_give_up;
input = 1;
/* now check that mov 0 is into eax */
}
if (instr_num_dsts(prev) == 0)
goto cpuid_give_up;
op = instr_get_dst(prev, 0);
if (!opnd_is_reg(op) || opnd_get_reg(op) != REG_EAX)
goto cpuid_give_up;
if (input == 0) {
out_eax = CPUID_0_EAX;
out_ebx = CPUID_0_EBX;
out_ecx = CPUID_0_ECX;
out_edx = CPUID_0_EDX;
} else {
/* 1 or anything higher all return same info */
out_eax = CPUID_1_EAX;
out_ebx = CPUID_1_EBX;
out_ecx = CPUID_1_ECX;
out_edx = CPUID_1_EDX;
}
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EAX),
OPND_CREATE_INT32(out_eax)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EBX),
OPND_CREATE_INT32(out_ebx)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_INT32(out_ecx)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EDX),
OPND_CREATE_INT32(out_edx)));
/* destroy the cpuid instruction */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return;
cpuid_give_up:
LOG(THREAD, LOG_INTERP, 1, "\tcpuid fool: giving up\n");
return;
}
# endif /* FOOL_CPUID */
void
mangle_exit_cti_prefixes(dcontext_t *dcontext, instr_t *instr)
{
uint prefixes = instr_get_prefixes(instr);
if (prefixes != 0) {
bool remove = false;
/* Case 8738: while for transparency it would be best to maintain all
* prefixes, our patching and other routines make assumptions about
* the length of exit ctis. Plus our elision removes the whole
* instr in any case.
*/
if (instr_is_cbr(instr)) {
if (TESTANY(~(PREFIX_JCC_TAKEN | PREFIX_JCC_NOT_TAKEN), prefixes)) {
remove = true;
prefixes &= (PREFIX_JCC_TAKEN | PREFIX_JCC_NOT_TAKEN);
}
} else {
/* prefixes on ubr or mbr should be nops and for ubr will mess up
* our size assumptions so drop them (i#435)
*/
remove = true;
prefixes = 0;
}
if (remove) {
LOG(THREAD, LOG_INTERP, 4,
"\tremoving unknown prefixes " PFX " from " PFX "\n", prefixes,
instr_get_raw_bits(instr));
ASSERT(instr_operands_valid(instr)); /* ensure will encode w/o raw bits */
instr_set_prefixes(instr, prefixes);
}
} else if ((instr_get_opcode(instr) == OP_jmp &&
instr_length(dcontext, instr) > JMP_LONG_LENGTH) ||
(instr_is_cbr(instr) && instr_length(dcontext, instr) > CBR_LONG_LENGTH)) {
/* i#1988: remove MPX prefixes as they mess up our nop padding.
* i#1312 covers marking as actual prefixes, and we should keep them.
*/
LOG(THREAD, LOG_INTERP, 4, "\tremoving unknown jmp prefixes from " PFX "\n",
instr_get_raw_bits(instr));
instr_set_raw_bits_valid(instr, false);
}
}
# ifdef X64
/* PR 215397: re-relativize rip-relative data addresses */
/* Should return NULL if it destroy "instr". We don't support both destroying
* (done only for x86: i#393) and changing next_instr (done only for ARM).
*/
instr_t *
mangle_rel_addr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
uint opc = instr_get_opcode(instr);
app_pc tgt;
opnd_t dst, src;
ASSERT(instr_has_rel_addr_reference(instr));
instr_get_rel_addr_target(instr, &tgt);
STATS_INC(rip_rel_instrs);
# ifdef RCT_IND_BRANCH
if (TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_call)) ||
TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_jump))) {
/* PR 215408: record addresses taken via rip-relative instrs */
rct_add_rip_rel_addr(dcontext, tgt _IF_DEBUG(instr_get_translation(instr)));
}
# endif
if (opc == OP_lea) {
/* segment overrides are ignored on lea */
opnd_t immed;
dst = instr_get_dst(instr, 0);
src = instr_get_src(instr, 0);
ASSERT(opnd_is_reg(dst));
ASSERT(opnd_is_rel_addr(src));
ASSERT(opnd_get_addr(src) == tgt);
/* Replace w/ an absolute immed of the target app address, following Intel
* Table 3-59 "64-bit Mode LEA Operation with Address and Operand Size
* Attributes" */
/* FIXME PR 253446: optimization: we could leave this as rip-rel if it
* still reaches from the code cache. */
if (reg_get_size(opnd_get_reg(dst)) == OPSZ_8) {
/* PR 253327: there is no explicit addr32 marker; we assume
* that decode or the user already zeroed out the top bits
* if there was an addr32 prefix byte or the user wants
* that effect */
immed = OPND_CREATE_INTPTR((ptr_int_t)tgt);
} else if (reg_get_size(opnd_get_reg(dst)) == OPSZ_4)
immed = OPND_CREATE_INT32((int)(ptr_int_t)tgt);
else {
ASSERT(reg_get_size(opnd_get_reg(dst)) == OPSZ_2);
immed = OPND_CREATE_INT16((short)(ptr_int_t)tgt);
}
PRE(ilist, instr, INSTR_CREATE_mov_imm(dcontext, dst, immed));
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
STATS_INC(rip_rel_lea);
return NULL; /* == destroyed instr */
} else {
/* PR 251479 will automatically re-relativize if it reaches,
* but if it doesn't we need to handle that here (since that
* involves an encoding length change, which complicates many
* use cases if done at instr encode time).
* We don't yet know exactly where we're going to encode this bb,
* so we're conservative and check for all reachability from our
* heap (assumed to be a single heap: xref PR 215395, and xref
* potential secondary code caches PR 253446.
*/
if (!rel32_reachable_from_vmcode(tgt)) {
int si = -1, di = -1;
opnd_t relop, newop;
bool spill = true;
/* FIXME PR 253446: for mbr, should share the xcx spill */
reg_id_t scratch_reg = REG_XAX;
si = instr_get_rel_addr_src_idx(instr);
di = instr_get_rel_addr_dst_idx(instr);
if (si >= 0) {
relop = instr_get_src(instr, si);
ASSERT(di < 0 || opnd_same(relop, instr_get_dst(instr, di)));
/* If it's a load (OP_mov_ld, or OP_movzx, etc.), use dead reg */
if (instr_num_srcs(instr) == 1 && /* src is the rip-rel opnd */
instr_num_dsts(instr) == 1 && /* only one dest: a register */
opnd_is_reg(instr_get_dst(instr, 0)) && !instr_is_predicated(instr)) {
opnd_size_t sz = opnd_get_size(instr_get_dst(instr, 0));
reg_id_t reg = opnd_get_reg(instr_get_dst(instr, 0));
/* if target is 16 or 8 bit sub-register the whole reg is not dead
* (for 32-bit, top 32 bits are cleared) */
if (reg_is_gpr(reg) && (reg_is_32bit(reg) || reg_is_64bit(reg))) {
spill = false;
scratch_reg = opnd_get_reg(instr_get_dst(instr, 0));
if (sz == OPSZ_4)
scratch_reg = reg_32_to_64(scratch_reg);
/* we checked all opnds: should not read reg */
ASSERT(
!instr_reads_from_reg(instr, scratch_reg, DR_QUERY_DEFAULT));
STATS_INC(rip_rel_unreachable_nospill);
}
}
} else {
relop = instr_get_dst(instr, di);
}
/* PR 263369: we can't just look for instr_reads_from_reg here since
* our no-spill optimization above may miss some writes.
*/
if (spill && instr_uses_reg(instr, scratch_reg)) {
/* mbr (for which we'll use xcx once we optimize) should not
* get here: can't use registers (except xsp) */
ASSERT(scratch_reg == REG_XAX);
do {
scratch_reg++;
ASSERT(scratch_reg <= REG_STOP_64);
} while (instr_uses_reg(instr, scratch_reg));
}
ASSERT(!instr_reads_from_reg(instr, scratch_reg, DR_QUERY_DEFAULT));
ASSERT(!spill || !instr_writes_to_reg(instr, scratch_reg, DR_QUERY_DEFAULT));
/* XXX PR 253446: Optimize by looking ahead for dead registers, and
* sharing single spill across whole bb, or possibly building local code
* cache to avoid unreachability: all depending on how many rip-rel
* instrs we see. We'll watch the stats.
*/
if (spill) {
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, 0, scratch_reg, MANGLE_RIPREL_SPILL_SLOT,
XAX_OFFSET));
}
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(scratch_reg),
OPND_CREATE_INTPTR((ptr_int_t)tgt)));
newop = opnd_create_far_base_disp(opnd_get_segment(relop), scratch_reg,
REG_NULL, 0, 0, opnd_get_size(relop));
if (si >= 0)
instr_set_src(instr, si, newop);
if (di >= 0)
instr_set_dst(instr, di, newop);
/* we need the whole spill...restore region to all be marked mangle */
instr_set_our_mangling(instr, true);
if (spill) {
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, scratch_reg,
MANGLE_RIPREL_SPILL_SLOT));
}
STATS_INC(rip_rel_unreachable);
}
}
return next_instr;
}
# endif
/***************************************************************************
* Reference with segment register (fs/gs)
*/
# ifdef UNIX
static int
instr_get_seg_ref_dst_idx(instr_t *instr)
{
int i;
opnd_t opnd;
if (!instr_valid(instr))
return -1;
/* must go to level 3 operands */
for (i = 0; i < instr_num_dsts(instr); i++) {
opnd = instr_get_dst(instr, i);
if (opnd_is_far_base_disp(opnd) &&
(opnd_get_segment(opnd) == SEG_GS || opnd_get_segment(opnd) == SEG_FS))
return i;
}
return -1;
}
static int
instr_get_seg_ref_src_idx(instr_t *instr)
{
int i;
opnd_t opnd;
if (!instr_valid(instr))
return -1;
/* must go to level 3 operands */
for (i = 0; i < instr_num_srcs(instr); i++) {
opnd = instr_get_src(instr, i);
if (opnd_is_far_base_disp(opnd) &&
(opnd_get_segment(opnd) == SEG_GS || opnd_get_segment(opnd) == SEG_FS))
return i;
}
return -1;
}
static ushort tls_slots[4] = { TLS_XAX_SLOT, TLS_XCX_SLOT, TLS_XDX_SLOT, TLS_XBX_SLOT };
/* mangle the instruction OP_mov_seg, i.e. the instruction that
* read/update the segment register.
*/
void
mangle_mov_seg(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
reg_id_t seg;
opnd_t opnd, dst;
opnd_size_t dst_sz;
ASSERT(instr_get_opcode(instr) == OP_mov_seg);
ASSERT(instr_num_srcs(instr) == 1);
ASSERT(instr_num_dsts(instr) == 1);
STATS_INC(app_mov_seg_mangled);
/* for update, we simply change it to a nop because we will
* update it when dynamorio entering code cache to execute
* this basic block.
*/
dst = instr_get_dst(instr, 0);
if (opnd_is_reg(dst) && reg_is_segment(opnd_get_reg(dst))) {
app_pc xl8;
seg = opnd_get_reg(dst);
# ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return;
# endif
/* must use the original instr, which might be used by caller */
xl8 = get_app_instr_xl8(instr);
instr_reuse(dcontext, instr);
instr_set_opcode(instr, OP_nop);
instr_set_num_opnds(dcontext, instr, 0, 0);
instr_set_translation(instr, xl8);
return;
}
/* for read seg, we mangle it */
opnd = instr_get_src(instr, 0);
ASSERT(opnd_is_reg(opnd));
seg = opnd_get_reg(opnd);
ASSERT(reg_is_segment(seg));
if (seg != SEG_FS && seg != SEG_GS)
return;
# ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return;
# endif
/* There are two possible mov_seg instructions:
* 8C/r MOV r/m16,Sreg Move segment register to r/m16
* REX.W + 8C/r MOV r/m64,Sreg Move zero extended 16-bit segment
* register to r/m64
* Note, In 32-bit mode, the assembler may insert the 16-bit operand-size
* prefix with this instruction.
*/
/* we cannot replace the instruction but only change it. */
dst = instr_get_dst(instr, 0);
dst_sz = opnd_get_size(dst);
opnd =
opnd_create_sized_tls_slot(os_tls_offset(os_get_app_tls_reg_offset(seg)), OPSZ_2);
if (opnd_is_reg(dst)) { /* dst is a register */
/* mov %gs:off => reg */
instr_set_src(instr, 0, opnd);
instr_set_opcode(instr, OP_mov_ld);
if (dst_sz != OPSZ_2)
instr_set_opcode(instr, OP_movzx);
} else { /* dst is memory, need steal a register. */
reg_id_t reg;
instr_t *ti;
for (reg = REG_XAX; reg < REG_XBX; reg++) {
if (!instr_uses_reg(instr, reg))
break;
}
/* We need save the register to corresponding slot for correct restore,
* so only use the first four registers.
*/
ASSERT(reg <= REG_XBX);
/* save reg */
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, reg, tls_slots[reg - REG_XAX]));
/* restore reg */
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, reg, tls_slots[reg - REG_XAX]));
switch (dst_sz) {
case OPSZ_8: IF_NOT_X64(ASSERT(false);) break;
case OPSZ_4: IF_X64(reg = reg_64_to_32(reg);) break;
case OPSZ_2:
IF_X64(reg = reg_64_to_32(reg);)
reg = reg_32_to_16(reg);
break;
default: ASSERT(false);
}
/* mov %gs:off => reg */
ti = INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(reg), opnd);
if (dst_sz != OPSZ_2)
instr_set_opcode(ti, OP_movzx);
PRE(ilist, instr, ti);
/* change mov_seg to mov_st: mov reg => [mem] */
instr_set_src(instr, 0, opnd_create_reg(reg));
instr_set_opcode(instr, OP_mov_st);
}
}
/* mangle the instruction that reference memory via segment register */
void
mangle_seg_ref(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
int si = -1, di = -1;
opnd_t segop, newop;
bool spill = true;
reg_id_t scratch_reg = REG_XAX, seg = REG_NULL;
/* exit cti won't be seg ref */
if (instr_is_exit_cti(instr))
return;
/* mbr will be handled separatly */
if (instr_is_mbr(instr))
return;
if (instr_get_opcode(instr) == OP_lea)
return;
/* XXX: maybe using decode_cti and then a check on prefix could be
* more efficient as it only examines a few byte and avoid fully decoding
* the instruction. For simplicity, we examine every operands instead.
*/
/* 1. get ref opnd */
si = instr_get_seg_ref_src_idx(instr);
di = instr_get_seg_ref_dst_idx(instr);
if (si < 0 && di < 0)
return;
if (si >= 0) {
segop = instr_get_src(instr, si);
ASSERT(di < 0 || opnd_same(segop, instr_get_dst(instr, di)));
} else {
segop = instr_get_dst(instr, di);
}
seg = opnd_get_segment(segop);
if (seg != SEG_GS && seg != SEG_FS)
return;
# ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return;
# endif
STATS_INC(app_seg_refs_mangled);
DOLOG(3, LOG_INTERP,
{ loginst(dcontext, 3, instr, "reference with fs/gs segment"); });
/* 2. decide the scratch reg */
/* Opt: if it's a load (OP_mov_ld, or OP_movzx, etc.), use dead reg */
if (si >= 0 && instr_num_srcs(instr) == 1 && /* src is the seg ref opnd */
instr_num_dsts(instr) == 1 && /* only one dest: a register */
opnd_is_reg(instr_get_dst(instr, 0)) && !instr_is_predicated(instr)) {
reg_id_t reg = opnd_get_reg(instr_get_dst(instr, 0));
/* if target is 16 or 8 bit sub-register the whole reg is not dead
* (for 32-bit, top 32 bits are cleared) */
if (reg_is_gpr(reg) && (reg_is_32bit(reg) || reg_is_64bit(reg)) &&
/* mov [%fs:%xax] => %xax */
!instr_reads_from_reg(instr, reg, DR_QUERY_DEFAULT)) {
spill = false;
scratch_reg = reg;
# ifdef X64
if (opnd_get_size(instr_get_dst(instr, 0)) == OPSZ_4)
scratch_reg = reg_32_to_64(reg);
# endif
}
}
if (spill) {
/* we pick a scratch register from XAX, XBX, XCX, or XDX
* that has direct TLS slots.
*/
for (scratch_reg = REG_XAX; scratch_reg <= REG_XBX; scratch_reg++) {
/* the register must not be used by the instr, either read or write,
* because we will mangle it when executing the instr (no read from),
* and restore it after that instr (no write to).
*/
if (!instr_uses_reg(instr, scratch_reg))
break;
}
ASSERT(scratch_reg <= REG_XBX);
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, scratch_reg,
tls_slots[scratch_reg - REG_XAX]));
}
newop = mangle_seg_ref_opnd(dcontext, ilist, instr, segop, scratch_reg);
if (si >= 0)
instr_set_src(instr, si, newop);
if (di >= 0)
instr_set_dst(instr, di, newop);
/* we need the whole spill...restore region to all be marked mangle */
instr_set_our_mangling(instr, true);
/* FIXME: i#107 we should check the bound and raise signal if out of bound. */
DOLOG(3, LOG_INTERP, { loginst(dcontext, 3, instr, "re-wrote app tls reference"); });
if (spill) {
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, scratch_reg,
tls_slots[scratch_reg - REG_XAX]));
}
}
# endif /* UNIX */
# ifdef ANNOTATIONS
/***************************************************************************
* DR and Valgrind annotations
*/
void
mangle_annotation_helper(dcontext_t *dcontext, instr_t *label, instrlist_t *ilist)
{
dr_instr_label_data_t *label_data = instr_get_label_data_area(label);
dr_annotation_handler_t *handler = GET_ANNOTATION_HANDLER(label_data);
dr_annotation_receiver_t *receiver = handler->receiver_list;
opnd_t *args = NULL;
ASSERT(handler->type == DR_ANNOTATION_HANDLER_CALL);
while (receiver != NULL) {
if (handler->num_args != 0) {
args = HEAP_ARRAY_ALLOC(dcontext, opnd_t, handler->num_args, ACCT_CLEANCALL,
UNPROTECTED);
memcpy(args, handler->args, sizeof(opnd_t) * handler->num_args);
}
dr_insert_clean_call_ex_varg(dcontext, ilist, label,
receiver->instrumentation.callback,
receiver->save_fpstate ? DR_CLEANCALL_SAVE_FLOAT : 0,
handler->num_args, args);
if (handler->num_args != 0) {
HEAP_ARRAY_FREE(dcontext, args, opnd_t, handler->num_args, ACCT_CLEANCALL,
UNPROTECTED);
}
receiver = receiver->next;
}
}
# endif
/* END OF CONTROL-FLOW MANGLING ROUTINES
*###########################################################################
*###########################################################################
*/
/* SELF-MODIFYING-CODE SANDBOXING
*
* When we detect it, we take an exit that targets our own routine
* fragment_self_write. Dispatch checks for that target and if it finds it,
* it calls that routine, so don't worry about building a bb for it.
* Returns false if the bb has invalid instrs in the middle and it should
* be rebuilt from scratch.
*/
# undef SAVE_TO_DC_OR_TLS
# undef RESTORE_FROM_DC_OR_TLS
/* PR 244737: x64 uses tls to avoid reachability issues w/ absolute addresses */
# ifdef X64
# define SAVE_TO_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_save_to_tls((dc), (reg), (tls_offs))
# define RESTORE_FROM_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_restore_from_tls((dc), (reg), (tls_offs))
# else
# define SAVE_TO_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_save_to_dcontext((dc), (reg), (dc_offs))
# define RESTORE_FROM_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_restore_from_dcontext((dc), (reg), (dc_offs))
# endif
static void
sandbox_rep_instr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr, instr_t *next,
app_pc start_pc, app_pc end_pc /* end is open */)
{
/* put checks before instr, set some reg as a flag, act on it
* after instr (even if overwrite self will execute rep to completion)
* want to read DF to find direction (0=inc xsi/xdi, 1=dec),
* but only way to read is to do a pushf!
* Solution: if cld or std right before rep instr, use that info,
* otherwise check for BOTH directions!
* xcx is a pre-check, xsi/xdi are inc/dec after memory op, so
* xdi+xcx*opndsize == instr of NEXT write, so open-ended there:
* if DF==0:
* if (xdi < end_pc && xdi+xcx*opndsize > start_pc) => self-write
* if DF==1:
* if (xdi > start_pc && xdi-xcx*opndsize > end_pc) => self-write
* both:
* if (xdi-xcx*opndsize < end_pc && xdi+xcx*opndsize > start_pc) => self-write
* opndsize is 1,2, or 4 => use lea for mul
* lea (xdi,xcx,opndsize),xcx
*
* save flags and xax
* save xbx
* lea (xdi,xcx,opndsize),xbx
* if x64 && (start_pc > 4GB || end_pc > 4GB): save xdx
* if x64 && start_pc > 4GB: mov start_pc, xdx
* cmp xbx, IF_X64_>4GB_ELSE(xdx, start_pc)
* mov $0,xbx # for if ok
* jle ok # open b/c address of next rep write
* lea (,xcx,opndsize),xbx
* neg xbx # sub does dst - src
* add xdi,xbx
* if x64 && end_pc > 4GB: mov end_pc, xdx
* cmp xbx, IF_X64_>4GB_ELSE(xdx, end_pc)
* mov $0,xbx # for if ok
* jge ok # end is open
* mov $1,xbx
* ok:
* restore flags and xax (xax used by stos)
* if x64 && (start_pc > 4GB || end_pc > 4GB): restore xdx
* <rep instr> # doesn't use xbx
* (PR 267764/i#398: we special-case restore xbx on cxt xl8 if this instr faults)
* mov xbx,xcx # we can use xcx, it's dead since 0 after rep
* restore xbx
* jecxz ok2 # if xbx was 1 we'll fall through and exit
* mov $0,xcx
* jmp <instr after write, flag as INSTR_BRANCH_SPECIAL_EXIT>
* ok2:
* <label> # ok2 can't == next, b/c next may be ind br -> mangled w/ instrs
* # inserted before it, so jecxz would target too far
*/
instr_t *ok = INSTR_CREATE_label(dcontext);
instr_t *ok2 = INSTR_CREATE_label(dcontext);
instr_t *jmp;
app_pc after_write;
uint opndsize = opnd_size_in_bytes(opnd_get_size(instr_get_dst(instr, 0)));
uint flags =
instr_eflags_to_fragment_eflags(forward_eflags_analysis(dcontext, ilist, next));
bool use_tls = IF_X64_ELSE(true, false);
IF_X64(bool x86_to_x64_ibl_opt = DYNAMO_OPTION(x86_to_x64_ibl_opt);)
instr_t *next_app = next;
DOLOG(3, LOG_INTERP, { loginst(dcontext, 3, instr, "writes memory"); });
ASSERT(!instr_is_call_indirect(instr)); /* FIXME: can you have REP on on CALL's */
/* skip meta instrs to find next app instr (xref PR 472190) */
while (next_app != NULL && instr_is_meta(next_app))
next_app = instr_get_next(next_app);
if (next_app != NULL) {
/* client may have inserted non-meta instrs, so use translation first
* (xref PR 472190)
*/
if (instr_get_app_pc(next_app) != NULL)
after_write = instr_get_app_pc(next_app);
else if (!instr_raw_bits_valid(next_app)) {
/* next must be the final jmp! */
ASSERT(instr_is_ubr(next_app) && instr_get_next(next_app) == NULL);
after_write = opnd_get_pc(instr_get_target(next_app));
} else
after_write = instr_get_raw_bits(next_app);
} else {
after_write = end_pc;
}
insert_save_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_XDI, REG_XCX, opndsize, 0, OPSZ_lea)));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
}
if ((ptr_uint_t)start_pc > UINT_MAX) {
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDX),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XDX)));
} else {
# endif
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)start_pc)));
# ifdef X64
}
# endif
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX), OPND_CREATE_INT32(0)));
PRE(ilist, instr, INSTR_CREATE_jcc(dcontext, OP_jle, opnd_create_instr(ok)));
PRE(ilist, instr,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_NULL, REG_XCX, opndsize, 0, OPSZ_lea)));
PRE(ilist, instr, INSTR_CREATE_neg(dcontext, opnd_create_reg(REG_XBX)));
PRE(ilist, instr,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_XBX), opnd_create_reg(REG_XDI)));
# ifdef X64
if ((ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDX),
OPND_CREATE_INTPTR(end_pc)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XDX)));
} else {
# endif
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)end_pc)));
# ifdef X64
}
# endif
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX), OPND_CREATE_INT32(0)));
PRE(ilist, instr, INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(ok)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX), OPND_CREATE_INT32(1)));
PRE(ilist, instr, ok);
insert_restore_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
}
# endif
/* instr goes here */
PRE(ilist, next,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX),
opnd_create_reg(REG_XBX)));
PRE(ilist, next, RESTORE_FROM_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
PRE(ilist, next, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(ok2)));
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INT32(0))); /* on x64 top 32 bits zeroed */
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(after_write));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, next, jmp);
PRE(ilist, next, ok2);
}
static void
sandbox_write(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr, instr_t *next,
opnd_t op, app_pc start_pc, app_pc end_pc /* end is open */)
{
/* can only test for equality w/o modifying flags, so save them
* if (addr < end_pc && addr+opndsize > start_pc) => self-write
* <write memory>
* save xbx
* lea memory,xbx
* save flags and xax # after lea of memory in case memory includes xax
* if x64 && (start_pc > 4GB || end_pc > 4GB): save xcx
* if x64 && end_pc > 4GB: mov end_pc, xcx
* cmp xbx, IF_X64_>4GB_ELSE(xcx, end_pc)
* jge ok # end is open
* lea opndsize(xbx),xbx
* if x64 && start_pc > 4GB: mov start_pc, xcx
* cmp xbx, IF_X64_>4GB_ELSE(xcx, start_pc)
* jle ok # open since added size
* restore flags (using xbx) and xax
* restore xbx
* if x64 && (start_pc > 4GB || end_pc > 4GB): restore xcx
* jmp <instr after write, flag as INSTR_BRANCH_SPECIAL_EXIT>
* ok:
* restore flags and xax
* restore xbx
* if x64 && (start_pc > 4GB || end_pc > 4GB): restore xcx
*/
instr_t *ok = INSTR_CREATE_label(dcontext), *jmp;
app_pc after_write = NULL;
uint opndsize = opnd_size_in_bytes(opnd_get_size(op));
uint flags =
instr_eflags_to_fragment_eflags(forward_eflags_analysis(dcontext, ilist, next));
bool use_tls = IF_X64_ELSE(true, false);
IF_X64(bool x86_to_x64_ibl_opt = DYNAMO_OPTION(x86_to_x64_ibl_opt);)
instr_t *next_app = next;
instr_t *get_addr_at = next;
int opcode = instr_get_opcode(instr);
DOLOG(3, LOG_INTERP, { loginst(dcontext, 3, instr, "writes memory"); });
/* skip meta instrs to find next app instr (xref PR 472190) */
while (next_app != NULL && instr_is_meta(next_app))
next_app = instr_get_next(next_app);
if (next_app != NULL) {
/* client may have inserted non-meta instrs, so use translation first
* (xref PR 472190)
*/
if (instr_get_app_pc(next_app) != NULL)
after_write = instr_get_app_pc(next_app);
else if (!instr_raw_bits_valid(next_app)) {
/* next must be the final artificially added jmp! */
ASSERT(instr_is_ubr(next_app) && instr_get_next(next_app) == NULL);
/* for sure this is the last jmp out, but it
* doesn't have to be a direct jmp but instead
* it could be the exit branch we add as an
* for an indirect call - which is the only ind branch
* that writes to memory
* CALL* already means that we're leaving the block and it cannot be a selfmod
* instruction even though it writes to memory
*/
DOLOG(4, LOG_INTERP, { loginst(dcontext, 4, next_app, "next app instr"); });
after_write = opnd_get_pc(instr_get_target(next_app));
LOG(THREAD, LOG_INTERP, 4, "after_write = " PFX " next should be final jmp\n",
after_write);
} else
after_write = instr_get_raw_bits(next_app);
} else {
ASSERT_NOT_TESTED();
after_write = end_pc;
}
if (opcode == OP_ins || opcode == OP_movs || opcode == OP_stos) {
/* These instrs modify their own addressing register so we must
* get the address pre-write. None of them touch xbx.
*/
get_addr_at = instr;
ASSERT(!instr_writes_to_reg(instr, REG_XBX, DR_QUERY_DEFAULT) &&
!instr_reads_from_reg(instr, REG_XBX, DR_QUERY_DEFAULT));
}
PRE(ilist, get_addr_at,
SAVE_TO_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
/* XXX: Basically reimplementing drutil_insert_get_mem_addr(). */
/* FIXME i#986: Sandbox far writes. Not a hypothetical problem! NaCl uses
* segments for its x86 sandbox, although they are 0 based with a limit.
* qq.exe has them in sandboxed code.
*/
ASSERT_CURIOSITY(!opnd_is_far_memory_reference(op) ||
/* Standard far refs */
opcode == OP_ins || opcode == OP_movs || opcode == OP_stos);
if (opnd_is_base_disp(op)) {
/* change to OPSZ_lea for lea */
opnd_set_size(&op, OPSZ_lea);
PRE(ilist, get_addr_at, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX), op));
if ((opcode == OP_push && opnd_is_base_disp(op) &&
opnd_get_index(op) == DR_REG_NULL &&
reg_to_pointer_sized(opnd_get_base(op)) == DR_REG_XSP) ||
opcode == OP_push_imm || opcode == OP_pushf || opcode == OP_pusha ||
opcode == OP_pop /* pop into stack slot */ || opcode == OP_call ||
opcode == OP_call_ind || opcode == OP_call_far || opcode == OP_call_far_ind) {
/* Undo xsp adjustment made by the instruction itself.
* We could use get_addr_at to acquire the address pre-instruction
* for some of these, but some can read or write ebx.
*/
PRE(ilist, next,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_NULL, REG_XBX, 1,
-opnd_get_disp(op), OPSZ_lea)));
}
} else {
/* handle abs addr pointing within fragment */
/* XXX: Can optimize this by doing address comparison at translation
* time. Might happen frequently if a JIT stores data on the same page
* as its code. For now we hook into existing sandboxing code.
*/
app_pc abs_addr;
ASSERT(opnd_is_abs_addr(op) IF_X64(|| opnd_is_rel_addr(op)));
abs_addr = opnd_get_addr(op);
PRE(ilist, get_addr_at,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INTPTR(abs_addr)));
}
insert_save_eflags(dcontext, ilist, next, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next, SAVE_TO_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
if ((ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(end_pc)));
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XCX)));
} else {
# endif
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)end_pc)));
# ifdef X64
}
# endif
PRE(ilist, next, INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(ok)));
PRE(ilist, next,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_XBX, REG_NULL, 0, opndsize, OPSZ_lea)));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX) {
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XCX)));
} else {
# endif
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)start_pc)));
# ifdef X64
}
# endif
PRE(ilist, next, INSTR_CREATE_jcc(dcontext, OP_jle, opnd_create_instr(ok)));
insert_restore_eflags(dcontext, ilist, next, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
PRE(ilist, next, RESTORE_FROM_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
# endif
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(after_write));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, next, jmp);
PRE(ilist, next, ok);
insert_restore_eflags(dcontext, ilist, next, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
PRE(ilist, next, RESTORE_FROM_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
# endif
}
static bool
sandbox_top_of_bb_check_s2ro(dcontext_t *dcontext, app_pc start_pc)
{
return (DYNAMO_OPTION(sandbox2ro_threshold) > 0 &&
/* we can't make stack regions ro so don't put in the instrumentation */
!is_address_on_stack(dcontext, start_pc) &&
/* case 9098 we don't want to ever make RO untrackable driver areas */
!is_driver_address(start_pc));
}
static void
sandbox_top_of_bb(dcontext_t *dcontext, instrlist_t *ilist, bool s2ro, uint flags,
app_pc start_pc, app_pc end_pc, /* end is open */
bool for_cache,
/* for obtaining the two patch locations: */
patch_list_t *patchlist, cache_pc *copy_start_loc,
cache_pc *copy_end_loc)
{
/* add check at top of ilist that compares actual app instructions versus
* copy we saved, stored in cache right after fragment itself. leave its
* start address blank here, will be touched up after emitting this ilist.
*
* FIXME case 8165/PR 212600: optimize this: move reg restores to
* custom fcache_return, use cmpsd instead of cmpsb, etc.
*
* if eflags live entering this bb:
* save xax
* lahf
* seto %al
* endif
* if (-sandbox2ro_threshold > 0)
* if x64: save xcx
* incl &vm_area_t->exec_count (for x64, via xcx)
* cmp sandbox2ro_threshold, vm_area_t->exec_count (for x64, via xcx)
* if eflags live entering this bb, or x64:
* jl past_threshold
* if x64: restore xcx
* if eflags live entering this bb:
* jmp restore_eflags_and_exit
* else
* jmp start_pc marked as selfmod exit
* endif
* past_threshold:
* else
* jge start_pc marked as selfmod exit
* endif
* endif
* if (-sandbox2ro_threshold == 0) && !x64)
* save xcx
* endif
* save xsi
* save xdi
* if stats:
* inc num_sandbox_execs stat (for x64, via xsi)
* endif
* mov start_pc,xsi
* mov copy_start_pc,xdi # 1 opcode byte, then offset
* # => patch point 1
* cmpsb
* if copy_size > 1 # not an opt: for correctness: if "repe cmpsb" has xcx==0, it
* # doesn't touch eflags and we treat cmp results as cmpsb results
* jne check_results
* if x64 && start_pc > 4GB
* mov start_pc, xcx
* cmp xsi, xcx
* else
* cmp xsi, start_pc
* endif
* mov copy_size-1, xcx # -1 b/c we already checked 1st byte
* jge forward
* mov copy_end_pc - 1, xdi # -1 b/c it is the end of this basic block
* # => patch point 2
* mov end_pc - 1, xsi
* forward:
* repe cmpsb
* endif # copy_size > 1
* check_results:
* restore xcx
* restore xsi
* restore xdi
* if eflags live:
* je start_bb
* restore_eflags_and_exit:
* add $0x7f,%al
* sahf
* restore xax
* jmp start_pc marked as selfmod exit
* else
* jne start_pc marked as selfmod exit
* endif
* start_bb:
* if eflags live:
* add $0x7f,%al
* sahf
* restore xax
* endif
*/
instr_t *instr, *jmp;
instr_t *restore_eflags_and_exit = NULL;
bool use_tls = IF_X64_ELSE(true, false);
IF_X64(bool x86_to_x64_ibl_opt = DYNAMO_OPTION(x86_to_x64_ibl_opt);)
bool saved_xcx = false;
instr_t *check_results = INSTR_CREATE_label(dcontext);
instr = instrlist_first_expanded(dcontext, ilist);
insert_save_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
if (s2ro) {
/* It's difficult to use lea/jecxz here as we want to use a shared
* counter but no lock, and thus need a relative comparison, while
* lea/jecxz can only do an exact comparison. We could be exact by
* having a separate counter per (private) fragment but by spilling
* eflags we can inc memory, making the scheme here not inefficient.
*/
uint thresh = DYNAMO_OPTION(sandbox2ro_threshold);
uint *counter;
if (for_cache)
counter = get_selfmod_exec_counter(start_pc);
else {
/* Won't find exec area since not a real fragment (probably
* a recreation post-flush). Won't execute, so NULL is fine.
*/
counter = NULL;
}
# ifdef X64
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
saved_xcx = true;
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(counter)));
PRE(ilist, instr, INSTR_CREATE_inc(dcontext, OPND_CREATE_MEM32(REG_XCX, 0)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, OPND_CREATE_MEM32(REG_XCX, 0),
OPND_CREATE_INT_32OR8((int)thresh)));
# else
PRE(ilist, instr,
INSTR_CREATE_inc(dcontext, OPND_CREATE_ABSMEM(counter, OPSZ_4)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, OPND_CREATE_ABSMEM(counter, OPSZ_4),
OPND_CREATE_INT_32OR8(thresh)));
# endif
if (TEST(FRAG_WRITES_EFLAGS_6, flags) IF_X64(&&false)) {
jmp = INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
} else {
instr_t *past_threshold = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
INSTR_CREATE_jcc_short(dcontext, OP_jl_short,
opnd_create_instr(past_threshold)));
# ifdef X64
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
# endif
if (!TEST(FRAG_WRITES_EFLAGS_6, flags)) {
ASSERT(restore_eflags_and_exit == NULL);
restore_eflags_and_exit = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
INSTR_CREATE_jmp(dcontext,
opnd_create_instr(restore_eflags_and_exit)));
}
# ifdef X64
else {
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
}
# endif
PRE(ilist, instr, past_threshold);
}
}
if (!saved_xcx) {
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XSI, TLS_XBX_SLOT, XSI_OFFSET));
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XDI, TLS_XDX_SLOT, XDI_OFFSET));
DOSTATS({
if (GLOBAL_STATS_ON()) {
/* We only do global inc, not bothering w/ thread-private stats.
* We don't care about races: ballpark figure is good enough.
* We could do a direct inc of memory for 32-bit.
*/
PRE(ilist, instr,
INSTR_CREATE_mov_imm(
dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INTPTR(GLOBAL_STAT_ADDR(num_sandbox_execs))));
PRE(ilist, instr,
INSTR_CREATE_inc(
dcontext,
opnd_create_base_disp(REG_XSI, REG_NULL, 0, 0, OPSZ_STATS)));
}
});
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDI),
/* will become copy start */
OPND_CREATE_INTPTR(start_pc)));
if (patchlist != NULL) {
ASSERT(copy_start_loc != NULL);
add_patch_marker(patchlist, instr_get_prev(instr), PATCH_ASSEMBLE_ABSOLUTE,
-(short)sizeof(cache_pc), (ptr_uint_t *)copy_start_loc);
}
PRE(ilist, instr, INSTR_CREATE_cmps_1(dcontext));
/* For a 1-byte copy size we cannot use "repe cmpsb" as it won't
* touch eflags and we'll treat the cmp results as cmpsb results, which
* doesn't work (cmp will never be equal)
*/
if (end_pc - start_pc > 1) {
instr_t *forward = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
INSTR_CREATE_jcc(dcontext, OP_jne, opnd_create_instr(check_results)));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX) {
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSI),
opnd_create_reg(REG_XCX)));
} else {
# endif
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INT32((int)(ptr_int_t)start_pc)));
# ifdef X64
}
# endif
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(end_pc - (start_pc + 1))));
/* i#2155: In the case where the direction flag is set, xsi will be lesser
* than start_pc after cmps, and the jump branch will not be taken.
*/
PRE(ilist, instr, INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(forward)));
/* i#2155: The immediate value is only psychological
* since it will be modified in finalize_selfmod_sandbox.
*/
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDI),
/* will become copy end */
OPND_CREATE_INTPTR(end_pc - 1)));
if (patchlist != NULL) {
ASSERT(copy_end_loc != NULL);
add_patch_marker(patchlist, instr_get_prev(instr), PATCH_ASSEMBLE_ABSOLUTE,
-(short)sizeof(cache_pc), (ptr_uint_t *)copy_end_loc);
}
/* i#2155: The next rep cmps comparison will be done backward,
* and thus it should be started at end_pc - 1
* because current basic block is [start_pc:end_pc-1].
*/
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INTPTR(end_pc - 1)));
PRE(ilist, instr, forward);
PRE(ilist, instr, INSTR_CREATE_rep_cmps_1(dcontext));
}
PRE(ilist, instr, check_results);
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XSI, TLS_XBX_SLOT, XSI_OFFSET));
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XDI, TLS_XDX_SLOT, XDI_OFFSET));
if (!TEST(FRAG_WRITES_EFLAGS_6, flags)) {
instr_t *start_bb = INSTR_CREATE_label(dcontext);
PRE(ilist, instr, INSTR_CREATE_jcc(dcontext, OP_je, opnd_create_instr(start_bb)));
if (restore_eflags_and_exit != NULL) /* somebody needs this label */
PRE(ilist, instr, restore_eflags_and_exit);
insert_restore_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) &&
x86_to_x64_ibl_opt));
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
PRE(ilist, instr, start_bb);
} else {
jmp = INSTR_CREATE_jcc(dcontext, OP_jne, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
}
insert_restore_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
/* fall-through to bb start */
}
/* returns false if failed to add sandboxing b/c of a problematic ilist --
* invalid instrs, elided ctis, etc.
*/
bool
insert_selfmod_sandbox(dcontext_t *dcontext, instrlist_t *ilist, uint flags,
app_pc start_pc, app_pc end_pc, /* end is open */
bool record_translation, bool for_cache)
{
instr_t *instr, *next;
if (!INTERNAL_OPTION(hw_cache_consistency))
return true; /* nothing to do */
/* this code assumes bb covers single, contiguous region */
ASSERT((flags & FRAG_HAS_DIRECT_CTI) == 0);
/* store first instr so loop below will skip top check */
instr = instrlist_first_expanded(dcontext, ilist);
instrlist_set_our_mangling(ilist, true); /* PR 267260 */
if (record_translation) {
/* skip client instrumentation, if any, as is done below */
while (instr != NULL && instr_is_meta(instr))
instr = instr_get_next_expanded(dcontext, ilist, instr);
/* make sure inserted instrs translate to the proper original instr */
ASSERT(instr != NULL && instr_get_translation(instr) != NULL);
instrlist_set_translation_target(ilist, instr_get_translation(instr));
}
sandbox_top_of_bb(dcontext, ilist, sandbox_top_of_bb_check_s2ro(dcontext, start_pc),
flags, start_pc, end_pc, for_cache, NULL, NULL, NULL);
if (INTERNAL_OPTION(sandbox_writes)) {
for (; instr != NULL; instr = next) {
int i, opcode;
opnd_t op;
opcode = instr_get_opcode(instr);
if (!instr_valid(instr)) {
/* invalid instr -- best to truncate block here, easiest way
* to do that and get all flags right is to re-build it,
* but this time we'll use full decode so we'll avoid the discrepancy
* between fast and full decode on invalid instr detection.
*/
if (record_translation)
instrlist_set_translation_target(ilist, NULL);
instrlist_set_our_mangling(ilist, false); /* PR 267260 */
return false;
}
/* don't mangle anything that mangle inserts! */
next = instr_get_next_expanded(dcontext, ilist, instr);
if (instr_is_meta(instr))
continue;
if (record_translation) {
/* make sure inserted instrs translate to the proper original instr */
ASSERT(instr_get_translation(instr) != NULL);
instrlist_set_translation_target(ilist, instr_get_translation(instr));
}
if (opcode == OP_rep_ins || opcode == OP_rep_movs || opcode == OP_rep_stos) {
sandbox_rep_instr(dcontext, ilist, instr, next, start_pc, end_pc);
continue;
}
/* FIXME case 8165: optimize for multiple push/pop */
for (i = 0; i < instr_num_dsts(instr); i++) {
op = instr_get_dst(instr, i);
if (opnd_is_memory_reference(op)) {
/* ignore CALL* since last anyways */
if (instr_is_call_indirect(instr)) {
ASSERT(next != NULL && !instr_raw_bits_valid(next));
/* FIXME case 8165: why do we ever care about the last
* instruction modifying anything?
*/
/* conversion of IAT calls (but not elision)
* transforms this into a direct CALL,
* in that case 'next' is a direct jmp
* fall through, so has no exit flags
*/
ASSERT(EXIT_IS_CALL(instr_exit_branch_type(next)) ||
(DYNAMO_OPTION(IAT_convert) &&
TEST(INSTR_IND_CALL_DIRECT, instr->flags)));
LOG(THREAD, LOG_INTERP, 3,
" ignoring CALL* at end of fragment\n");
/* This test could be done outside of this loop on
* destinations, but since it is rare it is faster
* to do it here. Using continue instead of break in case
* it gets moved out.
*/
continue;
}
if (opnd_is_abs_addr(op) IF_X64(|| opnd_is_rel_addr(op))) {
app_pc abs_addr = opnd_get_addr(op);
uint size = opnd_size_in_bytes(opnd_get_size(op));
if (!POINTER_OVERFLOW_ON_ADD(abs_addr, size) &&
(abs_addr + size < start_pc || abs_addr >= end_pc)) {
/* This is an absolute memory reference that points
* outside the current basic block and doesn't need
* sandboxing.
*/
continue;
}
}
sandbox_write(dcontext, ilist, instr, next, op, start_pc, end_pc);
}
}
}
}
if (record_translation)
instrlist_set_translation_target(ilist, NULL);
instrlist_set_our_mangling(ilist, false); /* PR 267260 */
return true;
}
/* Offsets within selfmod sandbox top-of-bb code that we patch once
* the code is emitted, as the values depend on the emitted address.
* These vary by whether sandbox_top_of_bb_check_s2ro() and whether
* eflags are not written, all written, or just OF is written.
* For the copy_size == 1 variation, we simply ignore the 2nd patch point.
*/
static bool selfmod_s2ro[] = { false, true };
static uint selfmod_eflags[] = { FRAG_WRITES_EFLAGS_6, FRAG_WRITES_EFLAGS_OF, 0 };
# define SELFMOD_NUM_S2RO (sizeof(selfmod_s2ro) / sizeof(selfmod_s2ro[0]))
# define SELFMOD_NUM_EFLAGS (sizeof(selfmod_eflags) / sizeof(selfmod_eflags[0]))
# ifdef X64 /* additional complexity: start_pc > 4GB? */
static app_pc selfmod_gt4G[] = { NULL, (app_pc)(POINTER_MAX - 2) /*so end can be +2*/ };
# define SELFMOD_NUM_GT4G (sizeof(selfmod_gt4G) / sizeof(selfmod_gt4G[0]))
# endif
uint selfmod_copy_start_offs[SELFMOD_NUM_S2RO][SELFMOD_NUM_EFLAGS] IF_X64([
SELFMOD_NUM_GT4G]);
uint selfmod_copy_end_offs[SELFMOD_NUM_S2RO][SELFMOD_NUM_EFLAGS] IF_X64([
SELFMOD_NUM_GT4G]);
void
set_selfmod_sandbox_offsets(dcontext_t *dcontext)
{
int i, j;
# ifdef X64
int k;
# endif
instrlist_t ilist;
patch_list_t patch;
static byte buf[256];
uint len;
/* We assume this is called at init, when .data is +w and we need no
* synch on accessing buf */
ASSERT(!dynamo_initialized);
for (i = 0; i < SELFMOD_NUM_S2RO; i++) {
for (j = 0; j < SELFMOD_NUM_EFLAGS; j++) {
# ifdef X64
for (k = 0; k < SELFMOD_NUM_GT4G; k++) {
# endif
cache_pc start_pc, end_pc;
app_pc app_start;
instr_t *inst;
instrlist_init(&ilist);
/* sandbox_top_of_bb assumes there's an instr there */
instrlist_append(&ilist, INSTR_CREATE_label(dcontext));
init_patch_list(&patch, PATCH_TYPE_ABSOLUTE);
app_start = IF_X64_ELSE(selfmod_gt4G[k], NULL);
sandbox_top_of_bb(dcontext, &ilist, selfmod_s2ro[i], selfmod_eflags[j],
/* we must have a >1-byte region to get
* both patch points */
app_start, app_start + 2, false, &patch, &start_pc,
&end_pc);
/* The exit cti's may not reachably encode (normally
* they'd be mangled away) so we munge them first
*/
for (inst = instrlist_first(&ilist); inst != NULL;
inst = instr_get_next(inst)) {
if (instr_is_exit_cti(inst)) {
instr_set_target(inst, opnd_create_pc(buf));
}
}
len = encode_with_patch_list(dcontext, &patch, &ilist, buf);
ASSERT(len < BUFFER_SIZE_BYTES(buf));
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(start_pc - buf)));
selfmod_copy_start_offs[i][j] IF_X64([k]) = (uint)(start_pc - buf);
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(end_pc - buf)));
selfmod_copy_end_offs[i][j] IF_X64([k]) = (uint)(end_pc - buf);
LOG(THREAD, LOG_EMIT, 3, "selfmod offs %d %d" IF_X64(" %d") ": %u %u\n",
i, j, IF_X64_(k) selfmod_copy_start_offs[i][j] IF_X64([k]),
selfmod_copy_end_offs[i][j] IF_X64([k]));
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
# ifdef X64
}
# endif
}
}
}
void
finalize_selfmod_sandbox(dcontext_t *dcontext, fragment_t *f)
{
cache_pc copy_pc = FRAGMENT_SELFMOD_COPY_PC(f);
byte *pc;
int i, j;
# ifdef X64
int k = ((ptr_uint_t)f->tag) > UINT_MAX ? 1 : 0;
# endif
i = (sandbox_top_of_bb_check_s2ro(dcontext, f->tag)) ? 1 : 0;
j = (TEST(FRAG_WRITES_EFLAGS_6, f->flags)
? 0
: (TEST(FRAG_WRITES_EFLAGS_OF, f->flags) ? 1 : 2));
pc = FCACHE_ENTRY_PC(f) + selfmod_copy_start_offs[i][j] IF_X64([k]);
/* The copy start gets updated after sandbox_top_of_bb. */
*((cache_pc *)pc) = copy_pc;
if (FRAGMENT_SELFMOD_COPY_CODE_SIZE(f) > 1) {
pc = FCACHE_ENTRY_PC(f) + selfmod_copy_end_offs[i][j] IF_X64([k]);
/* i#2155: The copy end gets updated.
* This value will be used in the case where the direction flag is set.
* It will then be the starting point for the backward repe cmps.
*/
*((cache_pc *)pc) = (copy_pc + FRAGMENT_SELFMOD_COPY_CODE_SIZE(f) - 1);
} /* else, no 2nd patch point */
}
#endif /* !STANDALONE_DECODER */
/***************************************************************************/
| 1 | 15,145 | This lea should match instr_check_xsp_mangling and be undone. I don't think i#3307 applies here. | DynamoRIO-dynamorio | c |
@@ -87,6 +87,11 @@ type Store struct {
tipIndex *TipIndex
}
+type ipldStateThing interface {
+ Get(ctx context.Context, c cid.Cid, out interface{}) error
+ Put(ctx context.Context, v interface{}) (cid.Cid, error)
+}
+
// NewStore constructs a new default store.
func NewStore(ds repo.Datastore, cst state.IpldStore, stl state.TreeLoader, genesisCid cid.Cid) *Store {
return &Store{ | 1 | package chain
import (
"context"
"runtime/debug"
"sync"
"github.com/cskr/pubsub"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"github.com/filecoin-project/go-filecoin/actor/builtin"
"github.com/filecoin-project/go-filecoin/metrics/tracing"
"github.com/filecoin-project/go-filecoin/repo"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/types"
)
// NewHeadTopic is the topic used to publish new heads.
const NewHeadTopic = "new-head"
// GenesisKey is the key at which the genesis Cid is written in the datastore.
var GenesisKey = datastore.NewKey("/consensus/genesisCid")
var logStore = logging.Logger("chain.store")
var headKey = datastore.NewKey("/chain/heaviestTipSet")
type ipldSource struct {
// cst is a store allowing access
// (un)marshalling and interop with go-ipld-hamt.
cborStore state.IpldStore
}
func newSource(cst state.IpldStore) *ipldSource {
return &ipldSource{
cborStore: cst,
}
}
// GetBlock retrieves a filecoin block by cid from the IPLD store.
func (source *ipldSource) GetBlock(ctx context.Context, c cid.Cid) (*types.Block, error) {
var block types.Block
err := source.cborStore.Get(ctx, c, &block)
if err != nil {
return nil, errors.Wrapf(err, "failed to get block %s", c.String())
}
return &block, nil
}
// Store is a generic implementation of the Store interface.
// It works(tm) for now.
type Store struct {
// ipldSource is a wrapper around ipld storage. It is used
// for reading filecoin block and state objects kept by the node.
stateAndBlockSource *ipldSource
// stateTreeeLoader is used for loading the state tree from a
// CborIPLDStore
stateTreeLoader state.TreeLoader
// ds is the datastore for the chain's private metadata which consists
// of the tipset key to state root cid mapping, and the heaviest tipset
// key.
ds repo.Datastore
// genesis is the CID of the genesis block.
genesis cid.Cid
// head is the tipset at the head of the best known chain.
head types.TipSet
// Protects head and genesisCid.
mu sync.RWMutex
// headEvents is a pubsub channel that publishes an event every time the head changes.
// We operate under the assumption that tipsets published to this channel
// will always be queued and delivered to subscribers in the order discovered.
// Successive published tipsets may be supersets of previously published tipsets.
// TODO: rename to notifications. Also, reconsider ordering assumption depending
// on decisions made around the FC node notification system.
headEvents *pubsub.PubSub
// Tracks tipsets by height/parentset for use by expected consensus.
tipIndex *TipIndex
}
// NewStore constructs a new default store.
func NewStore(ds repo.Datastore, cst state.IpldStore, stl state.TreeLoader, genesisCid cid.Cid) *Store {
return &Store{
stateAndBlockSource: newSource(cst),
stateTreeLoader: stl,
ds: ds,
headEvents: pubsub.New(128),
tipIndex: NewTipIndex(),
genesis: genesisCid,
}
}
// Load rebuilds the Store's caches by traversing backwards from the
// most recent best head as stored in its datastore. Because Load uses a
// content addressed datastore it guarantees that parent blocks are correctly
// resolved from the datastore. Furthermore Load ensures that all tipsets
// references correctly have the same parent height, weight and parent set.
// However, Load DOES NOT validate state transitions, it assumes that the
// tipset were only Put to the Store after checking for valid transitions.
//
// Furthermore Load trusts that the Store's backing datastore correctly
// preserves the cids of the heaviest tipset under the "headKey" datastore key.
// If the headKey cids are tampered with and invalid blocks added to the datastore
// then Load could be tricked into loading an invalid chain. Load will error if the
// head does not link back to the expected genesis block, or the Store's
// datastore does not store a link in the chain. In case of error the caller
// should not consider the chain useable and propagate the error.
func (store *Store) Load(ctx context.Context) (err error) {
ctx, span := trace.StartSpan(ctx, "Store.Load")
defer tracing.AddErrorEndSpan(ctx, span, &err)
// Clear the tipset index.
store.tipIndex = NewTipIndex()
headTsKey, err := store.loadHead()
if err != nil {
return err
}
headTs, err := LoadTipSetBlocks(ctx, store.stateAndBlockSource, headTsKey)
if err != nil {
return errors.Wrap(err, "error loading head tipset")
}
startHeight := headTs.At(0).Height
logStore.Infof("start loading chain at tipset: %s, height: %d", headTsKey.String(), startHeight)
// Ensure we only produce 10 log messages regardless of the chain height.
logStatusEvery := uint64(startHeight / 10)
var genesii types.TipSet
// Provide tipsets directly from the block store, not from the tipset index which is
// being rebuilt by this traversal.
tipsetProvider := TipSetProviderFromBlocks(ctx, store.stateAndBlockSource)
for iterator := IterAncestors(ctx, tipsetProvider, headTs); !iterator.Complete(); err = iterator.Next() {
if err != nil {
return err
}
height, err := iterator.Value().Height()
if err != nil {
return err
}
if logStatusEvery != 0 && (height%logStatusEvery) == 0 {
logStore.Infof("load tipset: %s, height: %v", iterator.Value().String(), height)
}
stateRoot, err := store.loadStateRoot(iterator.Value())
if err != nil {
return err
}
err = store.PutTipSetAndState(ctx, &TipSetAndState{
TipSet: iterator.Value(),
TipSetStateRoot: stateRoot,
})
if err != nil {
return err
}
genesii = iterator.Value()
}
// Check genesis here.
if genesii.Len() != 1 {
return errors.Errorf("load terminated with tipset of %d blocks, expected genesis with exactly 1", genesii.Len())
}
loadCid := genesii.At(0).Cid()
if !loadCid.Equals(store.genesis) {
return errors.Errorf("expected genesis cid: %s, loaded genesis cid: %s", store.genesis, loadCid)
}
logStore.Infof("finished loading %d tipsets from %s", startHeight, headTs.String())
// Set actual head.
return store.SetHead(ctx, headTs)
}
// loadHead loads the latest known head from disk.
func (store *Store) loadHead() (types.TipSetKey, error) {
var emptyCidSet types.TipSetKey
bb, err := store.ds.Get(headKey)
if err != nil {
return emptyCidSet, errors.Wrap(err, "failed to read headKey")
}
var cids types.TipSetKey
err = cbor.DecodeInto(bb, &cids)
if err != nil {
return emptyCidSet, errors.Wrap(err, "failed to cast headCids")
}
return cids, nil
}
func (store *Store) loadStateRoot(ts types.TipSet) (cid.Cid, error) {
h, err := ts.Height()
if err != nil {
return cid.Undef, err
}
key := datastore.NewKey(makeKey(ts.String(), h))
bb, err := store.ds.Get(key)
if err != nil {
return cid.Undef, errors.Wrapf(err, "failed to read tipset key %s", ts.String())
}
var stateRoot cid.Cid
err = cbor.DecodeInto(bb, &stateRoot)
if err != nil {
return cid.Undef, errors.Wrapf(err, "failed to cast state root of tipset %s", ts.String())
}
return stateRoot, nil
}
// PutTipSetAndState persists the blocks of a tipset and the tipset index.
func (store *Store) PutTipSetAndState(ctx context.Context, tsas *TipSetAndState) error {
// Update tipindex.
err := store.tipIndex.Put(tsas)
if err != nil {
return err
}
// Persist the state mapping.
if err = store.writeTipSetAndState(tsas); err != nil {
return err
}
return nil
}
// GetTipSet returns the tipset identified by `key`.
func (store *Store) GetTipSet(key types.TipSetKey) (types.TipSet, error) {
return store.tipIndex.GetTipSet(key.String())
}
// GetTipSetState returns the aggregate state of the tipset identified by `key`.
func (store *Store) GetTipSetState(ctx context.Context, key types.TipSetKey) (state.Tree, error) {
stateCid, err := store.tipIndex.GetTipSetStateRoot(key.String())
if err != nil {
return nil, err
}
return store.stateTreeLoader.LoadStateTree(ctx, store.stateAndBlockSource.cborStore, stateCid, builtin.Actors)
}
// GetTipSetStateRoot returns the aggregate state root CID of the tipset identified by `key`.
func (store *Store) GetTipSetStateRoot(key types.TipSetKey) (cid.Cid, error) {
return store.tipIndex.GetTipSetStateRoot(key.String())
}
// HasTipSetAndState returns true iff the default store's tipindex is indexing
// the tipset identified by `key`.
func (store *Store) HasTipSetAndState(ctx context.Context, key string) bool {
return store.tipIndex.Has(key)
}
// GetTipSetAndStatesByParentsAndHeight returns the the tipsets and states tracked by
// the default store's tipIndex that have parents identified by `parentKey`.
func (store *Store) GetTipSetAndStatesByParentsAndHeight(parentKey string, h uint64) ([]*TipSetAndState, error) {
return store.tipIndex.GetByParentsAndHeight(parentKey, h)
}
// HasTipSetAndStatesWithParentsAndHeight returns true if the default store's tipindex
// contains any tipset identified by `parentKey`.
func (store *Store) HasTipSetAndStatesWithParentsAndHeight(parentKey string, h uint64) bool {
return store.tipIndex.HasByParentsAndHeight(parentKey, h)
}
// HeadEvents returns a pubsub interface the pushes events each time the
// default store's head is reset.
func (store *Store) HeadEvents() *pubsub.PubSub {
return store.headEvents
}
// SetHead sets the passed in tipset as the new head of this chain.
func (store *Store) SetHead(ctx context.Context, ts types.TipSet) error {
logStore.Debugf("SetHead %s", ts.String())
// Add logging to debug sporadic test failure.
if !ts.Defined() {
logStore.Error("publishing empty tipset")
logStore.Error(debug.Stack())
}
if err := store.setHeadPersistent(ctx, ts); err != nil {
return err
}
// Publish an event that we have a new head.
store.HeadEvents().Pub(ts, NewHeadTopic)
return nil
}
func (store *Store) setHeadPersistent(ctx context.Context, ts types.TipSet) error {
store.mu.Lock()
defer store.mu.Unlock()
// Ensure consistency by storing this new head on disk.
if errInner := store.writeHead(ctx, ts.Key()); errInner != nil {
return errors.Wrap(errInner, "failed to write new Head to datastore")
}
store.head = ts
return nil
}
// writeHead writes the given cid set as head to disk.
func (store *Store) writeHead(ctx context.Context, cids types.TipSetKey) error {
logStore.Debugf("WriteHead %s", cids.String())
val, err := cbor.DumpObject(cids)
if err != nil {
return err
}
return store.ds.Put(headKey, val)
}
// writeTipSetAndState writes the tipset key and the state root id to the
// datastore.
func (store *Store) writeTipSetAndState(tsas *TipSetAndState) error {
if tsas.TipSetStateRoot == cid.Undef {
return errors.New("attempting to write state root cid.Undef")
}
val, err := cbor.DumpObject(tsas.TipSetStateRoot)
if err != nil {
return err
}
// datastore keeps key:stateRoot (k,v) pairs.
h, err := tsas.TipSet.Height()
if err != nil {
return err
}
key := datastore.NewKey(makeKey(tsas.TipSet.String(), h))
return store.ds.Put(key, val)
}
// GetHead returns the current head tipset cids.
func (store *Store) GetHead() types.TipSetKey {
store.mu.RLock()
defer store.mu.RUnlock()
if !store.head.Defined() {
return types.TipSetKey{}
}
return store.head.Key()
}
// BlockHeight returns the chain height of the head tipset.
func (store *Store) BlockHeight() (uint64, error) {
store.mu.RLock()
defer store.mu.RUnlock()
return store.head.Height()
}
// GenesisCid returns the genesis cid of the chain tracked by the default store.
func (store *Store) GenesisCid() cid.Cid {
store.mu.Lock()
defer store.mu.Unlock()
return store.genesis
}
// Stop stops all activities and cleans up.
func (store *Store) Stop() {
store.headEvents.Shutdown()
}
| 1 | 20,264 | I like the idea of this being an interface as it makes the test setup a bit easier, thoughts? | filecoin-project-venus | go |
@@ -608,7 +608,8 @@ class ColdcardPlugin(HW_PluginBase):
pubkey_deriv_info = wallet.get_public_keys_with_deriv_info(address)
pubkey_hexes = sorted([pk.hex() for pk in list(pubkey_deriv_info)])
xfp_paths = []
- for pubkey in pubkey_deriv_info:
+ for pubkey_hex in pubkey_hexes:
+ pubkey = bytes.fromhex(pubkey_hex)
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix, only_der_suffix=False)
xfp_int = xfp_int_from_xfp_bytes(fp_bytes) | 1 | #
# Coldcard Electrum plugin main code.
#
#
import os, time, io
import traceback
from typing import TYPE_CHECKING, Optional
import struct
from electrum import bip32
from electrum.bip32 import BIP32Node, InvalidMasterKeyVersionBytes
from electrum.i18n import _
from electrum.plugin import Device, hook
from electrum.keystore import Hardware_KeyStore, KeyStoreWithMPK
from electrum.transaction import PartialTransaction
from electrum.wallet import Standard_Wallet, Multisig_Wallet, Abstract_Wallet
from electrum.util import bfh, bh2u, versiontuple, UserFacingException
from electrum.base_wizard import ScriptTypeNotSupported
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase, HardwareClientBase
from ..hw_wallet.plugin import LibraryFoundButUnusable, only_hook_if_libraries_available
_logger = get_logger(__name__)
try:
import hid
from ckcc.protocol import CCProtocolPacker, CCProtocolUnpacker
from ckcc.protocol import CCProtoError, CCUserRefused, CCBusyError
from ckcc.constants import (MAX_MSG_LEN, MAX_BLK_LEN, MSG_SIGNING_MAX_LENGTH, MAX_TXN_LEN,
AF_CLASSIC, AF_P2SH, AF_P2WPKH, AF_P2WSH, AF_P2WPKH_P2SH, AF_P2WSH_P2SH)
from ckcc.client import ColdcardDevice, COINKITE_VID, CKCC_PID, CKCC_SIMULATOR_PATH
requirements_ok = True
class ElectrumColdcardDevice(ColdcardDevice):
# avoid use of pycoin for MiTM message signature test
def mitm_verify(self, sig, expect_xpub):
# verify a signature (65 bytes) over the session key, using the master bip32 node
# - customized to use specific EC library of Electrum.
pubkey = BIP32Node.from_xkey(expect_xpub).eckey
try:
pubkey.verify_message_hash(sig[1:65], self.session_key)
return True
except:
return False
except ImportError:
requirements_ok = False
COINKITE_VID = 0xd13e
CKCC_PID = 0xcc10
CKCC_SIMULATED_PID = CKCC_PID ^ 0x55aa
class CKCCClient(HardwareClientBase):
def __init__(self, plugin, handler, dev_path, *, is_simulator=False):
HardwareClientBase.__init__(self, plugin=plugin)
self.device = plugin.device
self.handler = handler
# if we know what the (xfp, xpub) "should be" then track it here
self._expected_device = None
if is_simulator:
self.dev = ElectrumColdcardDevice(dev_path, encrypt=True)
else:
# open the real HID device
with self.device_manager().hid_lock:
hd = hid.device(path=dev_path)
hd.open_path(dev_path)
self.dev = ElectrumColdcardDevice(dev=hd, encrypt=True)
# NOTE: MiTM test is delayed until we have a hint as to what XPUB we
# should expect. It's also kinda slow.
def __repr__(self):
return '<CKCCClient: xfp=%s label=%r>' % (xfp2str(self.dev.master_fingerprint),
self.label())
def verify_connection(self, expected_xfp: int, expected_xpub=None):
ex = (expected_xfp, expected_xpub)
if self._expected_device == ex:
# all is as expected
return
if expected_xpub is None:
expected_xpub = self.dev.master_xpub
if ( (self._expected_device is not None)
or (self.dev.master_fingerprint != expected_xfp)
or (self.dev.master_xpub != expected_xpub)):
# probably indicating programing error, not hacking
_logger.info(f"xpubs. reported by device: {self.dev.master_xpub}. "
f"stored in file: {expected_xpub}")
raise RuntimeError("Expecting %s but that's not what's connected?!" %
xfp2str(expected_xfp))
# check signature over session key
# - mitm might have lied about xfp and xpub up to here
# - important that we use value capture at wallet creation time, not some value
# we read over USB today
self.dev.check_mitm(expected_xpub=expected_xpub)
self._expected_device = ex
if not getattr(self, 'ckcc_xpub', None):
self.ckcc_xpub = expected_xpub
_logger.info("Successfully verified against MiTM")
def is_pairable(self):
# can't do anything w/ devices that aren't setup (this code not normally reachable)
return bool(self.dev.master_xpub)
def timeout(self, cutoff):
# nothing to do?
pass
def close(self):
# close the HID device (so can be reused)
with self.device_manager().hid_lock:
self.dev.close()
self.dev = None
def is_initialized(self):
return bool(self.dev.master_xpub)
def label(self):
# 'label' of this Coldcard. Warning: gets saved into wallet file, which might
# not be encrypted, so better for privacy if based on xpub/fingerprint rather than
# USB serial number.
if self.dev.is_simulator:
lab = 'Coldcard Simulator ' + xfp2str(self.dev.master_fingerprint)
elif not self.dev.master_fingerprint:
# failback; not expected
lab = 'Coldcard #' + self.dev.serial
else:
lab = 'Coldcard ' + xfp2str(self.dev.master_fingerprint)
# Hack zone: during initial setup I need the xfp and master xpub but
# very few objects are passed between the various steps of base_wizard.
# Solution: return a string with some hidden metadata
# - see <https://stackoverflow.com/questions/7172772/abc-for-string>
# - needs to work w/ deepcopy
class LabelStr(str):
def __new__(cls, s, xfp=None, xpub=None):
self = super().__new__(cls, str(s))
self.xfp = getattr(s, 'xfp', xfp)
self.xpub = getattr(s, 'xpub', xpub)
return self
return LabelStr(lab, self.dev.master_fingerprint, self.dev.master_xpub)
def has_usable_connection_with_device(self):
# Do end-to-end ping test
try:
self.ping_check()
return True
except:
return False
def get_xpub(self, bip32_path, xtype):
assert xtype in ColdcardPlugin.SUPPORTED_XTYPES
_logger.info('Derive xtype = %r' % xtype)
xpub = self.dev.send_recv(CCProtocolPacker.get_xpub(bip32_path), timeout=5000)
# TODO handle timeout?
# change type of xpub to the requested type
try:
node = BIP32Node.from_xkey(xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(_('Invalid xpub magic. Make sure your {} device is set to the correct chain.')
.format(self.device)) from None
if xtype != 'standard':
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
def ping_check(self):
# check connection is working
assert self.dev.session_key, 'not encrypted?'
req = b'1234 Electrum Plugin 4321' # free up to 59 bytes
try:
echo = self.dev.send_recv(CCProtocolPacker.ping(req))
assert echo == req
except:
raise RuntimeError("Communication trouble with Coldcard")
def show_address(self, path, addr_fmt):
# prompt user w/ address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_address(path, addr_fmt), timeout=None)
def show_p2sh_address(self, *args, **kws):
# prompt user w/ p2sh address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_p2sh_address(*args, **kws), timeout=None)
def get_version(self):
# gives list of strings
return self.dev.send_recv(CCProtocolPacker.version(), timeout=1000).split('\n')
def sign_message_start(self, path, msg):
# this starts the UX experience.
self.dev.send_recv(CCProtocolPacker.sign_message(msg, path), timeout=None)
def sign_message_poll(self):
# poll device... if user has approved, will get tuple: (addr, sig) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_msg(), timeout=None)
def sign_transaction_start(self, raw_psbt: bytes, *, finalize: bool = False):
# Multiple steps to sign:
# - upload binary
# - start signing UX
# - wait for coldcard to complete process, or have it refused.
# - download resulting txn
assert 20 <= len(raw_psbt) < MAX_TXN_LEN, 'PSBT is too big'
dlen, chk = self.dev.upload_file(raw_psbt)
resp = self.dev.send_recv(CCProtocolPacker.sign_transaction(dlen, chk, finalize=finalize),
timeout=None)
if resp != None:
raise ValueError(resp)
def sign_transaction_poll(self):
# poll device... if user has approved, will get tuple: (legnth, checksum) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_txn(), timeout=None)
def download_file(self, length, checksum, file_number=1):
# get a file
return self.dev.download_file(length, checksum, file_number=file_number)
class Coldcard_KeyStore(Hardware_KeyStore):
hw_type = 'coldcard'
device = 'Coldcard'
plugin: 'ColdcardPlugin'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.ux_busy = False
# we need to know at least the fingerprint of the master xpub to verify against MiTM
# - device reports these value during encryption setup process
# - full xpub value now optional
lab = d['label']
self.ckcc_xpub = getattr(lab, 'xpub', None) or d.get('ckcc_xpub', None)
def dump(self):
# our additions to the stored data about keystore -- only during creation?
d = Hardware_KeyStore.dump(self)
d['ckcc_xpub'] = self.ckcc_xpub
return d
def get_xfp_int(self) -> int:
xfp = self.get_root_fingerprint()
assert xfp is not None
return xfp_int_from_xfp_bytes(bfh(xfp))
def get_client(self):
# called when user tries to do something like view address, sign somthing.
# - not called during probing/setup
# - will fail if indicated device can't produce the xpub (at derivation) expected
rv = self.plugin.get_client(self)
if rv:
xfp_int = self.get_xfp_int()
rv.verify_connection(xfp_int, self.ckcc_xpub)
return rv
def give_error(self, message, clear_client=False):
self.logger.info(message)
if not self.ux_busy:
self.handler.show_error(message)
else:
self.ux_busy = False
if clear_client:
self.client = None
raise UserFacingException(message)
def wrap_busy(func):
# decorator: function takes over the UX on the device.
def wrapper(self, *args, **kwargs):
try:
self.ux_busy = True
return func(self, *args, **kwargs)
finally:
self.ux_busy = False
return wrapper
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(_('Encryption and decryption are currently not supported for {}').format(self.device))
@wrap_busy
def sign_message(self, sequence, message, password):
# Sign a message on device. Since we have big screen, of course we
# have to show the message unabiguously there first!
try:
msg = message.encode('ascii', errors='strict')
assert 1 <= len(msg) <= MSG_SIGNING_MAX_LENGTH
except (UnicodeError, AssertionError):
# there are other restrictions on message content,
# but let the device enforce and report those
self.handler.show_error('Only short (%d max) ASCII messages can be signed.'
% MSG_SIGNING_MAX_LENGTH)
return b''
client = self.get_client()
path = self.get_derivation_prefix() + ("/%d/%d" % sequence)
try:
cl = self.get_client()
try:
self.handler.show_message("Signing message (using %s)..." % path)
cl.sign_message_start(path, msg)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = cl.sign_message_poll()
if resp is not None:
break
finally:
self.handler.finished()
assert len(resp) == 2
addr, raw_sig = resp
# already encoded in Bitcoin fashion, binary.
assert 40 < len(raw_sig) <= 65
return raw_sig
except (CCUserRefused, CCBusyError) as exc:
self.handler.show_error(str(exc))
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except Exception as e:
self.give_error(e, True)
# give empty bytes for error cases; it seems to clear the old signature box
return b''
@wrap_busy
def sign_transaction(self, tx, password):
# Upload PSBT for signing.
# - we can also work offline (without paired device present)
if tx.is_complete():
return
client = self.get_client()
assert client.dev.master_fingerprint == self.get_xfp_int()
raw_psbt = tx.serialize_as_bytes()
try:
try:
self.handler.show_message("Authorize Transaction...")
client.sign_transaction_start(raw_psbt)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = client.sign_transaction_poll()
if resp is not None:
break
rlen, rsha = resp
# download the resulting txn.
raw_resp = client.download_file(rlen, rsha)
finally:
self.handler.finished()
except (CCUserRefused, CCBusyError) as exc:
self.logger.info(f'Did not sign: {exc}')
self.handler.show_error(str(exc))
return
except BaseException as e:
self.logger.exception('')
self.give_error(e, True)
return
tx2 = PartialTransaction.from_raw_psbt(raw_resp)
# apply partial signatures back into txn
tx.combine_with_other_psbt(tx2)
# caller's logic looks at tx now and if it's sufficiently signed,
# will send it if that's the user's intent.
@staticmethod
def _encode_txin_type(txin_type):
# Map from Electrum code names to our code numbers.
return {'standard': AF_CLASSIC, 'p2pkh': AF_CLASSIC,
'p2sh': AF_P2SH,
'p2wpkh-p2sh': AF_P2WPKH_P2SH,
'p2wpkh': AF_P2WPKH,
'p2wsh-p2sh': AF_P2WSH_P2SH,
'p2wsh': AF_P2WSH,
}[txin_type]
@wrap_busy
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation_prefix()[2:] + "/%d/%d"%sequence
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_address(address_path, addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
@wrap_busy
def show_p2sh_address(self, M, script, xfp_paths, txin_type):
client = self.get_client()
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_p2sh_address(M, xfp_paths, script, addr_fmt=addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}.\n{}\n\n{}'.format(
_('Error showing address'),
_('Make sure you have imported the correct wallet description '
'file on the device for this multisig wallet.'),
str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
class ColdcardPlugin(HW_PluginBase):
keystore_class = Coldcard_KeyStore
minimum_library = (0, 7, 7)
DEVICE_IDS = [
(COINKITE_VID, CKCC_PID),
(COINKITE_VID, CKCC_SIMULATED_PID)
]
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
self.device_manager().register_enumerate_func(self.detect_simulator)
def get_library_version(self):
import ckcc
try:
version = ckcc.__version__
except AttributeError:
version = 'unknown'
if requirements_ok:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def detect_simulator(self):
# if there is a simulator running on this machine,
# return details about it so it's offered as a pairing choice
fn = CKCC_SIMULATOR_PATH
if os.path.exists(fn):
return [Device(path=fn,
interface_number=-1,
id_=fn,
product_key=(COINKITE_VID, CKCC_SIMULATED_PID),
usage_page=0,
transport_ui_string='simulator')]
return []
def create_client(self, device, handler):
if handler:
self.handler = handler
# We are given a HID device, or at least some details about it.
# Not sure why not we aren't just given a HID library handle, but
# the 'path' is unabiguous, so we'll use that.
try:
rv = CKCCClient(self, handler, device.path,
is_simulator=(device.product_key[1] == CKCC_SIMULATED_PID))
return rv
except Exception as e:
self.logger.exception('late failure connecting to device?')
return None
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
# this seems to be part of the pairing process only, not during normal ops?
# base_wizard:on_hw_derivation
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
client.ping_check()
xpub = client.get_xpub(derivation, xtype)
return xpub
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['CKCCClient']:
# Acquire a connection to the hardware device (via USB)
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
if client is not None:
client.ping_check()
return client
@staticmethod
def export_ms_wallet(wallet: Multisig_Wallet, fp, name):
# Build the text file Coldcard needs to understand the multisig wallet
# it is participating in. All involved Coldcards can share same file.
assert isinstance(wallet, Multisig_Wallet)
print('# Exported from Electrum', file=fp)
print(f'Name: {name:.20s}', file=fp)
print(f'Policy: {wallet.m} of {wallet.n}', file=fp)
print(f'Format: {wallet.txin_type.upper()}' , file=fp)
xpubs = []
derivs = set()
for xpub, ks in zip(wallet.get_master_public_keys(), wallet.get_keystores()): # type: str, KeyStoreWithMPK
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix=[], only_der_suffix=False)
fp_hex = fp_bytes.hex().upper()
der_prefix_str = bip32.convert_bip32_intpath_to_strpath(der_full)
xpubs.append( (fp_hex, xpub, der_prefix_str) )
derivs.add(der_prefix_str)
# Derivation doesn't matter too much to the Coldcard, since it
# uses key path data from PSBT or USB request as needed. However,
# if there is a clear value, provide it.
if len(derivs) == 1:
print("Derivation: " + derivs.pop(), file=fp)
print('', file=fp)
assert len(xpubs) == wallet.n
for xfp, xpub, der_prefix in xpubs:
if derivs:
# show as a comment if unclear
print(f'# derivation: {der_prefix}', file=fp)
print(f'{xfp}: {xpub}\n', file=fp)
def show_address(self, wallet, address, keystore: 'Coldcard_KeyStore' = None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
txin_type = wallet.get_txin_type(address)
# Standard_Wallet => not multisig, must be bip32
if type(wallet) is Standard_Wallet:
sequence = wallet.get_address_index(address)
keystore.show_address(sequence, txin_type)
elif type(wallet) is Multisig_Wallet:
assert isinstance(wallet, Multisig_Wallet) # only here for type-hints in IDE
# More involved for P2SH/P2WSH addresses: need M, and all public keys, and their
# derivation paths. Must construct script, and track fingerprints+paths for
# all those keys
pubkey_deriv_info = wallet.get_public_keys_with_deriv_info(address)
pubkey_hexes = sorted([pk.hex() for pk in list(pubkey_deriv_info)])
xfp_paths = []
for pubkey in pubkey_deriv_info:
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix, only_der_suffix=False)
xfp_int = xfp_int_from_xfp_bytes(fp_bytes)
xfp_paths.append([xfp_int] + list(der_full))
script = bfh(wallet.pubkeys_to_scriptcode(pubkey_hexes))
keystore.show_p2sh_address(wallet.m, script, xfp_paths, txin_type)
else:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
def xfp_int_from_xfp_bytes(fp_bytes: bytes) -> int:
return int.from_bytes(fp_bytes, byteorder="little", signed=False)
def xfp2str(xfp: int) -> str:
# Standardized way to show an xpub's fingerprint... it's a 4-byte string
# and not really an integer. Used to show as '0x%08x' but that's wrong endian.
return struct.pack('<I', xfp).hex().lower()
# EOF
| 1 | 13,776 | Nit: I believe the electrum convention is to use `bfh` instead of `bytes.fromhex`. Great catch on this bug! | spesmilo-electrum | py |
@@ -1910,7 +1910,10 @@ def _getPredictedField(options):
predictedFieldInfo = info
break
- assert predictedFieldInfo
+ if predictedFieldInfo is None:
+ raise Exception(
+ "Predicted field '%s' does not exist in included fields." % predictedField
+ )
predictedFieldType = predictedFieldInfo['fieldType']
return predictedField, predictedFieldType | 1 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This utility can generate an OPF experiment and permutation script based on
a data file and other optional arguments.
"""
import os
import types
import json
import re
import sys
import math
from datetime import timedelta
import copy
import shutil
import itertools as iter
import pprint
import string
import collections
import traceback
import validictory
import tempfile
from optparse import (OptionParser, OptionGroup)
from pkg_resources import resource_stream
from nupic.data.dictutils import DictObj
from nupic.frameworks.opf import jsonschema
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.support.configuration import Configuration
from nupic.support.enum import Enum
#############################################################################
# Global constants
# Space characters representing one level of indent in our generated python
# data structures
_INDENT_STEP = 2
_ONE_INDENT = ' ' * _INDENT_STEP
_ILLEGAL_FIELDNAME_CHARACTERS = "\\"
METRIC_WINDOW = int(Configuration.get("nupic.opf.metricWindow"))
#############################################################################
# Enum to characterize potential generation environments
OpfEnvironment = Enum(Grok='grok',
Experiment='opfExperiment')
#############################################################################
class _ExpGeneratorException(Exception):
""" Base class for all ExpGenerator-specific exceptions
"""
pass
#############################################################################
class _CreateDirectoryException(_ExpGeneratorException):
""" Raised on error creating the experiment directory
Attributes:
reason: the reason the exception was raised (usually an exception)
"""
def __init__(self, dirPath, reason):
"""
dirPath: the path that we attempted to create for experiment files
reason: any object that can be converted to a string that explains
the reason (may be an exception)
"""
super(_CreateDirectoryException, self).__init__(
("Error creating directory " + \
"<%s>: %s.") % (str(dirPath), str(reason)))
self.reason = reason
#############################################################################
class _InvalidFunctionArgException(_ExpGeneratorException):
"""
This exception may be raised in response to invalid or incompatible function
arguments.
"""
pass
#############################################################################
class _InvalidCommandArgException(_ExpGeneratorException):
"""
This exception may be raised in response to invalid or incompatible command
arguments/values. When the program is executed from command-line, the handler
is expected to report the error (_outputErrorReport()) and exit the program
with error status=1.
"""
pass
#############################################################################
class _ErrorReportingException(_ExpGeneratorException):
"""
This exception may be raised by our error result reporting code. When
this exception is handled, there is no point in calling the error result
reporting code again. The typical response should be to re-raise this
exception.
"""
def __init__(self, problem, precursor):
"""
problem: a string-convertible object that describes the problem
experienced by the error-reporting funciton.
precursor: a string-convertible object that explains
the original error that the error-reporting function
was attempting to report when it encountered its own failure.
"""
super(_ErrorReportingException, self).__init__(
("Encountered error: '%s' while reporting " + \
"error: '%s'.") \
% (problem, precursor))
#############################################################################
class FieldTypeError(_ExpGeneratorException):
pass
#############################################################################
def _makeUsageErrorStr(errorString, usageString):
""" Combines an error string and usage string into a regular format, so they
all look consistent.
"""
return "ERROR: %s (%s)" % (errorString, usageString)
#############################################################################
def _handleShowSchemaOption():
""" Displays command schema to stdout and exit program
"""
print "\n============== BEGIN INPUT SCHEMA for --description =========>>"
print(json.dumps(_getExperimentDescriptionSchema(), indent=_INDENT_STEP*2))
print "\n<<============== END OF INPUT SCHEMA for --description ========"
return
#############################################################################
def _handleDescriptionOption(cmdArgStr, outDir, usageStr, hsVersion,
claDescriptionTemplateFile):
"""
Parses and validates the --description option args and executes the
request
Parameters:
-----------------------------------------------------------------------
cmdArgStr: JSON string compatible with _gExperimentDescriptionSchema
outDir: where to place generated experiment files
usageStr: program usage string
hsVersion: which version of hypersearch permutations file to generate, can
be 'v1' or 'v2'
claDescriptionTemplateFile: Filename containing the template description
retval: nothing
"""
# convert --description arg from JSON string to dict
try:
args = json.loads(cmdArgStr)
except Exception, e:
raise _InvalidCommandArgException(
_makeUsageErrorStr(
("JSON arg parsing failed for --description: %s\n" + \
"ARG=<%s>") % (str(e), cmdArgStr), usageStr))
#print "PARSED JSON ARGS=\n%s" % (json.dumps(args, indent=4))
filesDescription = _generateExperiment(args, outDir, hsVersion=hsVersion,
claDescriptionTemplateFile = claDescriptionTemplateFile)
pprint.pprint(filesDescription)
return
#############################################################################
def _handleDescriptionFromFileOption(filename, outDir, usageStr, hsVersion,
claDescriptionTemplateFile):
"""
Parses and validates the --descriptionFromFile option and executes the
request
Parameters:
-----------------------------------------------------------------------
filename: File from which we'll extract description JSON
outDir: where to place generated experiment files
usageStr: program usage string
hsVersion: which version of hypersearch permutations file to generate, can
be 'v1' or 'v2'
claDescriptionTemplateFile: Filename containing the template description
retval: nothing
"""
try:
fileHandle = open(filename, 'r')
JSONStringFromFile = fileHandle.read().splitlines()
JSONStringFromFile = ''.join(JSONStringFromFile)
except Exception, e:
raise _InvalidCommandArgException(
_makeUsageErrorStr(
("File open failed for --descriptionFromFile: %s\n" + \
"ARG=<%s>") % (str(e), filename), usageStr))
_handleDescriptionOption(JSONStringFromFile, outDir, usageStr,
hsVersion=hsVersion,
claDescriptionTemplateFile = claDescriptionTemplateFile)
return
#############################################################################
def _isInt(x, precision = 0.0001):
"""
Return (isInt, intValue) for a given floating point number.
Parameters:
----------------------------------------------------------------------
x: floating point number to evaluate
precision: desired precision
retval: (isInt, intValue)
isInt: True if x is close enough to an integer value
intValue: x as an integer
"""
xInt = int(round(x))
return (abs(x - xInt) < precision * x, xInt)
#############################################################################
def _isString(obj):
"""
returns whether or not the object is a string
"""
return type(obj) in types.StringTypes
#############################################################################
def _quoteAndEscape(string):
"""
string: input string (ascii or unicode)
Returns: a quoted string with characters that are represented in python via
escape sequences converted to those escape sequences
"""
assert _isString(string)
return pprint.pformat(string)
#############################################################################
def _indentLines(str, indentLevels = 1, indentFirstLine=True):
""" Indent all lines in the given string
str: input string
indentLevels: number of levels of indentation to apply
indentFirstLine: if False, the 1st line will not be indented
Returns: The result string with all lines indented
"""
indent = _ONE_INDENT * indentLevels
lines = str.splitlines(True)
result = ''
if len(lines) > 0 and not indentFirstLine:
first = 1
result += lines[0]
else:
first = 0
for line in lines[first:]:
result += indent + line
return result
#############################################################################
def _isCategory(fieldType):
"""Prediction function for determining whether a function is a categorical
variable or a scalar variable. Mainly used for determining the appropriate
metrics.
"""
if fieldType == 'string':
return True
if fieldType == 'int' or fieldType=='float':
return False
#############################################################################
def _generateMetricSpecString(inferenceElement, metric,
params=None, field=None,
returnLabel=False):
""" Generates the string representation of a MetricSpec object, and returns
the metric key associated with the metric.
Parameters:
-----------------------------------------------------------------------
inferenceElement:
An InferenceElement value that indicates which part of the inference this
metric is computed on
metric:
The type of the metric being computed (e.g. aae, avg_error)
params:
A dictionary of parameters for the metric. The keys are the parameter names
and the values should be the parameter values (e.g. window=200)
field:
The name of the field for which this metric is being computed
returnLabel:
If True, returns the label of the MetricSpec that was generated
"""
metricSpecArgs = dict(metric=metric,
field=field,
params=params,
inferenceElement=inferenceElement)
metricSpecAsString = "MetricSpec(%s)" % \
', '.join(['%s=%r' % (item[0],item[1])
for item in metricSpecArgs.iteritems()])
if not returnLabel:
return metricSpecAsString
spec = MetricSpec(**metricSpecArgs)
metricLabel = spec.getLabel()
return metricSpecAsString, metricLabel
#############################################################################
def _generateFileFromTemplates(templateFileNames, outputFilePath,
replacementDict):
""" Generates a file by applying token replacements to the given template
file
templateFileName:
A list of template file names; these files are assumed to be in
the same directory as the running ExpGenerator.py script.
ExpGenerator will perform the substitution and concanetate
the files in the order they are specified
outputFilePath: Absolute path of the output file
replacementDict:
A dictionary of token/replacement pairs
"""
# Find out where we're running from so we know where to find templates
installPath = os.path.dirname(__file__)
outputFile = open(outputFilePath, "w")
outputLines = []
inputLines = []
firstFile = True
for templateFileName in templateFileNames:
# Separate lines from each file by two blank lines.
if not firstFile:
inputLines.extend(['\n']*2)
firstFile = False
inputFilePath = os.path.join(installPath, templateFileName)
inputFile = open(inputFilePath)
inputLines.extend(inputFile.readlines())
inputFile.close()
print "Writing ", len(inputLines), "lines..."
for line in inputLines:
tempLine = line
# Enumerate through each key in replacementDict and replace with value
for k, v in replacementDict.iteritems():
if v is None:
v = "None"
tempLine = re.sub(k, v, tempLine)
outputFile.write(tempLine)
outputFile.close()
#############################################################################
def _generateEncoderChoicesV1(fieldInfo):
""" Return a list of possible encoder parameter combinations for the given
field and the default aggregation function to use. Each parameter combination
is a dict defining the parameters for the encoder. Here is an example
return value for the encoderChoicesList:
[
None,
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,1)
},
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,3)
},
],
Parameters:
--------------------------------------------------
fieldInfo: item from the 'includedFields' section of the
description JSON object
retval: (encoderChoicesList, aggFunction)
encoderChoicesList: a list of encoder choice lists for this field.
Most fields will generate just 1 encoder choice list.
DateTime fields can generate 2 or more encoder choice lists,
one for dayOfWeek, one for timeOfDay, etc.
aggFunction: name of aggregation function to use for this
field type
"""
width = 7
fieldName = fieldInfo['fieldName']
fieldType = fieldInfo['fieldType']
encoderChoicesList = []
# Scalar?
if fieldType in ['float', 'int']:
aggFunction = 'mean'
encoders = [None]
for n in (13, 50, 150, 500):
encoder = dict(type='ScalarSpaceEncoder', name=fieldName, fieldname=fieldName,
n=n, w=width, clipInput=True,space="absolute")
if 'minValue' in fieldInfo:
encoder['minval'] = fieldInfo['minValue']
if 'maxValue' in fieldInfo:
encoder['maxval'] = fieldInfo['maxValue']
encoders.append(encoder)
encoderChoicesList.append(encoders)
# String?
elif fieldType == 'string':
aggFunction = 'first'
encoders = [None]
encoder = dict(type='SDRCategoryEncoder', name=fieldName,
fieldname=fieldName, n=100, w=width)
encoders.append(encoder)
encoderChoicesList.append(encoders)
# Datetime?
elif fieldType == 'datetime':
aggFunction = 'first'
# First, the time of day representation
encoders = [None]
for radius in (1, 8):
encoder = dict(type='DateEncoder', name='%s_timeOfDay' % (fieldName),
fieldname=fieldName, timeOfDay=(width, radius))
encoders.append(encoder)
encoderChoicesList.append(encoders)
# Now, the day of week representation
encoders = [None]
for radius in (1, 3):
encoder = dict(type='DateEncoder', name='%s_dayOfWeek' % (fieldName),
fieldname=fieldName, dayOfWeek=(width, radius))
encoders.append(encoder)
encoderChoicesList.append(encoders)
else:
raise RuntimeError("Unsupported field type '%s'" % (fieldType))
# Return results
return (encoderChoicesList, aggFunction)
#############################################################################
def _generateEncoderStringsV1(includedFields):
""" Generate and return the following encoder related substitution variables:
encoderSpecsStr:
For the base description file, this string defines the default
encoding dicts for each encoder. For example:
'__gym_encoder' : { 'fieldname': 'gym',
'n': 13,
'name': 'gym',
'type': 'SDRCategoryEncoder',
'w': 7},
'__address_encoder' : { 'fieldname': 'address',
'n': 13,
'name': 'address',
'type': 'SDRCategoryEncoder',
'w': 7}
encoderSchemaStr:
For the base description file, this is a list containing a
DeferredDictLookup entry for each encoder. For example:
[DeferredDictLookup('__gym_encoder'),
DeferredDictLookup('__address_encoder'),
DeferredDictLookup('__timestamp_timeOfDay_encoder'),
DeferredDictLookup('__timestamp_dayOfWeek_encoder'),
DeferredDictLookup('__consumption_encoder')],
permEncoderChoicesStr:
For the permutations file, this defines the possible
encoder dicts for each encoder. For example:
'__timestamp_dayOfWeek_encoder': [
None,
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,1)
},
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,3)
},
],
'__field_consumption_encoder': [
None,
{'fieldname':'consumption',
'name': 'consumption',
'type':'AdaptiveScalarEncoder',
'n': 13,
'w': 7,
}
]
Parameters:
--------------------------------------------------
includedFields: item from the 'includedFields' section of the
description JSON object. This is a list of dicts, each
dict defining the field name, type, and optional min
and max values.
retval: (encoderSpecsStr, encoderSchemaStr permEncoderChoicesStr)
"""
# ------------------------------------------------------------------------
# First accumulate the possible choices for each encoder
encoderChoicesList = []
for fieldInfo in includedFields:
fieldName = fieldInfo['fieldName']
# Get the list of encoder choices for this field
(choicesList, aggFunction) = _generateEncoderChoicesV1(fieldInfo)
encoderChoicesList.extend(choicesList)
# ------------------------------------------------------------------------
# Generate the string containing the encoder specs and encoder schema. See
# the function comments for an example of the encoderSpecsStr and
# encoderSchemaStr
#
encoderSpecsList = []
for encoderChoices in encoderChoicesList:
# Use the last choice as the default in the base file because the 1st is
# often None
encoder = encoderChoices[-1]
# Check for bad characters
for c in _ILLEGAL_FIELDNAME_CHARACTERS:
if encoder['name'].find(c) >= 0:
raise _ExpGeneratorException("Illegal character in field: %r (%r)" % (
c, encoder['name']))
encoderSpecsList.append("%s: \n%s%s" % (
_quoteAndEscape(encoder['name']),
2*_ONE_INDENT,
pprint.pformat(encoder, indent=2*_INDENT_STEP)))
encoderSpecsStr = ',\n '.join(encoderSpecsList)
# ------------------------------------------------------------------------
# Generate the string containing the permutation encoder choices. See the
# function comments above for an example of the permEncoderChoicesStr
permEncoderChoicesList = []
for encoderChoices in encoderChoicesList:
permEncoderChoicesList.append("%s: %s," % (
_quoteAndEscape(encoderChoices[-1]['name']),
pprint.pformat(encoderChoices, indent=2*_INDENT_STEP)))
permEncoderChoicesStr = '\n'.join(permEncoderChoicesList)
permEncoderChoicesStr = _indentLines(permEncoderChoicesStr, 1,
indentFirstLine=False)
# Return results
return (encoderSpecsStr, permEncoderChoicesStr)
#############################################################################
def _generatePermEncoderStr(options, encoderDict):
""" Generate the string that defines the permutations to apply for a given
encoder.
Parameters:
-----------------------------------------------------------------------
options: experiment params
encoderDict: the encoder dict, which gets placed into the description.py
For example, if the encoderDict contains:
'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
The return string will contain:
"PermuteEncoder(fieldName='consumption',
encoderClass='AdaptiveScalarEncoder',
w=21,
n=PermuteInt(28, 521),
clipInput=True)"
"""
permStr = ""
# If it's the encoder for the classifier input, then it's always present so
# put it in as a dict in the permutations.py file instead of a
# PermuteEncoder().
if encoderDict.get('classifierOnly', False):
permStr = "dict("
for key, value in encoderDict.items():
if key == "name":
continue
if key == 'n' and encoderDict['type'] != 'SDRCategoryEncoder':
permStr += "n=PermuteInt(%d, %d), " % (encoderDict["w"] + 7,
encoderDict["w"] + 500)
else:
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
else:
# Scalar encoders
if encoderDict["type"] in ["ScalarSpaceEncoder", "AdaptiveScalarEncoder",
"ScalarEncoder", "LogEncoder"]:
permStr = "PermuteEncoder("
for key, value in encoderDict.items():
if key == "fieldname":
key = "fieldName"
elif key == "type":
key = "encoderClass"
elif key == "name":
continue
if key == "n":
permStr += "n=PermuteInt(%d, %d), " % (encoderDict["w"] + 1,
encoderDict["w"] + 500)
elif key == "runDelta":
if value and not "space" in encoderDict:
permStr += "space=PermuteChoices([%s,%s]), " \
% (_quoteAndEscape("delta"), _quoteAndEscape("absolute"))
encoderDict.pop("runDelta")
else:
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
# Category encoder
elif encoderDict["type"] in ["SDRCategoryEncoder"]:
permStr = "PermuteEncoder("
for key, value in encoderDict.items():
if key == "fieldname":
key = "fieldName"
elif key == "type":
key = "encoderClass"
elif key == "name":
continue
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
# Datetime encoder
elif encoderDict["type"] in ["DateEncoder"]:
permStr = "PermuteEncoder("
for key, value in encoderDict.items():
if key == "fieldname":
key = "fieldName"
elif key == "type":
continue
elif key == "name":
continue
if key == "timeOfDay":
permStr += "encoderClass='%s.timeOfDay', " % (encoderDict["type"])
permStr += "radius=PermuteFloat(0.5, 12), "
permStr += "w=%d, " % (value[0])
elif key == "dayOfWeek":
permStr += "encoderClass='%s.dayOfWeek', " % (encoderDict["type"])
permStr += "radius=PermuteFloat(1, 6), "
permStr += "w=%d, " % (value[0])
elif key == "weekend":
permStr += "encoderClass='%s.weekend', " % (encoderDict["type"])
permStr += "radius=PermuteChoices([1]), "
permStr += "w=%d, " % (value)
else:
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
else:
raise RuntimeError("Unsupported encoder type '%s'" % \
(encoderDict["type"]))
return permStr
#############################################################################
def _generateEncoderStringsV2(includedFields, options):
""" Generate and return the following encoder related substitution variables:
encoderSpecsStr:
For the base description file, this string defines the default
encoding dicts for each encoder. For example:
__gym_encoder = { 'fieldname': 'gym',
'n': 13,
'name': 'gym',
'type': 'SDRCategoryEncoder',
'w': 7},
__address_encoder = { 'fieldname': 'address',
'n': 13,
'name': 'address',
'type': 'SDRCategoryEncoder',
'w': 7}
permEncoderChoicesStr:
For the permutations file, this defines the possible
encoder dicts for each encoder. For example:
'__gym_encoder' : PermuteEncoder('gym', 'SDRCategoryEncoder', w=7,
n=100),
'__address_encoder' : PermuteEncoder('address', 'SDRCategoryEncoder',
w=7, n=100),
'__timestamp_dayOfWeek_encoder' : PermuteEncoder('timestamp',
'DateEncoder.timeOfDay', w=7, radius=PermuteChoices([1, 8])),
'__consumption_encoder': PermuteEncoder('consumption', 'AdaptiveScalarEncoder',
w=7, n=PermuteInt(13, 500, 20), minval=0,
maxval=PermuteInt(100, 300, 25)),
Parameters:
--------------------------------------------------
includedFields: item from the 'includedFields' section of the
description JSON object. This is a list of dicts, each
dict defining the field name, type, and optional min
and max values.
retval: (encoderSpecsStr permEncoderChoicesStr)
"""
width = 21
encoderDictsList = []
# If this is a NontemporalClassification experiment, then the
# the "predicted" field (the classification value) should be marked to ONLY
# go to the classifier
if options['inferenceType'] in ["NontemporalClassification",
"NontemporalMultiStep",
"TemporalMultiStep",
"MultiStep"]:
classifierOnlyField = options['inferenceArgs']['predictedField']
else:
classifierOnlyField = None
# ==========================================================================
# For each field, generate the default encoding dict and PermuteEncoder
# constructor arguments
for fieldInfo in includedFields:
fieldName = fieldInfo['fieldName']
fieldType = fieldInfo['fieldType']
# ---------
# Scalar?
if fieldType in ['float', 'int']:
# n=100 is reasonably hardcoded value for n when used by description.py
# The swarming will use PermuteEncoder below, where n is variable and
# depends on w
runDelta = fieldInfo.get("runDelta", False)
if runDelta or "space" in fieldInfo:
encoderDict = dict(type='ScalarSpaceEncoder', name=fieldName,
fieldname=fieldName, n=100, w=width, clipInput=True)
if runDelta:
encoderDict["runDelta"] = True
else:
encoderDict = dict(type='AdaptiveScalarEncoder', name=fieldName,
fieldname=fieldName, n=100, w=width, clipInput=True)
if 'minValue' in fieldInfo:
encoderDict['minval'] = fieldInfo['minValue']
if 'maxValue' in fieldInfo:
encoderDict['maxval'] = fieldInfo['maxValue']
# If both min and max were specified, use a non-adaptive encoder
if ('minValue' in fieldInfo and 'maxValue' in fieldInfo) \
and (encoderDict['type'] == 'AdaptiveScalarEncoder'):
encoderDict['type'] = 'ScalarEncoder'
# Defaults may have been over-ridden by specifying an encoder type
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
if 'space' in fieldInfo:
encoderDict['space'] = fieldInfo['space']
encoderDictsList.append(encoderDict)
# ---------
# String?
elif fieldType == 'string':
encoderDict = dict(type='SDRCategoryEncoder', name=fieldName,
fieldname=fieldName, n=100+width, w=width)
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
# ---------
# Datetime?
elif fieldType == 'datetime':
# First, the time of day representation
encoderDict = dict(type='DateEncoder', name='%s_timeOfDay' % (fieldName),
fieldname=fieldName, timeOfDay=(width, 1))
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
# Now, the day of week representation
encoderDict = dict(type='DateEncoder', name='%s_dayOfWeek' % (fieldName),
fieldname=fieldName, dayOfWeek=(width, 1))
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
# Now, the day of week representation
encoderDict = dict(type='DateEncoder', name='%s_weekend' % (fieldName),
fieldname=fieldName, weekend=(width))
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
else:
raise RuntimeError("Unsupported field type '%s'" % (fieldType))
# -----------------------------------------------------------------------
# If this was the predicted field, insert another encoder that sends it
# to the classifier only
if fieldName == classifierOnlyField:
clEncoderDict = dict(encoderDict)
clEncoderDict['classifierOnly'] = True
clEncoderDict['name'] = '_classifierInput'
encoderDictsList.append(clEncoderDict)
# If the predicted field needs to be excluded, take it out of the encoder
# lists
if options["inferenceArgs"]["inputPredictedField"] == "no":
encoderDictsList.remove(encoderDict)
# Remove any encoders not in fixedFields
if options.get('fixedFields') is not None:
tempList=[]
for encoderDict in encoderDictsList:
if encoderDict['name'] in options['fixedFields']:
tempList.append(encoderDict)
encoderDictsList = tempList
# ==========================================================================
# Now generate the encoderSpecsStr and permEncoderChoicesStr strings from
# encoderDictsList and constructorStringList
encoderSpecsList = []
permEncoderChoicesList = []
for encoderDict in encoderDictsList:
if encoderDict['name'].find('\\') >= 0:
raise _ExpGeneratorException("Illegal character in field: '\\'")
# Check for bad characters
for c in _ILLEGAL_FIELDNAME_CHARACTERS:
if encoderDict['name'].find(c) >= 0:
raise _ExpGeneratorException("Illegal character %s in field %r" %(c, encoderDict['name']))
constructorStr = _generatePermEncoderStr(options, encoderDict)
encoderKey = _quoteAndEscape(encoderDict['name'])
encoderSpecsList.append("%s: %s%s" % (
encoderKey,
2*_ONE_INDENT,
pprint.pformat(encoderDict, indent=2*_INDENT_STEP)))
# Each permEncoderChoicesStr is of the form:
# PermuteEncoder('gym', 'SDRCategoryEncoder',
# w=7, n=100),
permEncoderChoicesList.append("%s: %s," % (encoderKey, constructorStr))
# Join into strings
encoderSpecsStr = ',\n '.join(encoderSpecsList)
permEncoderChoicesStr = '\n'.join(permEncoderChoicesList)
permEncoderChoicesStr = _indentLines(permEncoderChoicesStr, 1,
indentFirstLine=True)
# Return results
return (encoderSpecsStr, permEncoderChoicesStr)
#############################################################################
def _handleJAVAParameters(options):
""" Handle legacy options (TEMPORARY) """
# Find the correct InferenceType for the Model
if 'inferenceType' not in options:
prediction = options.get('prediction', {InferenceType.TemporalNextStep:
{'optimize':True}})
inferenceType = None
for infType, value in prediction.iteritems():
if value['optimize']:
inferenceType = infType
break
if inferenceType == 'temporal':
inferenceType = InferenceType.TemporalNextStep
if inferenceType != InferenceType.TemporalNextStep:
raise _ExpGeneratorException("Unsupported inference type %s" % \
(inferenceType))
options['inferenceType'] = inferenceType
# Find the best value for the predicted field
if 'predictionField' in options:
if 'inferenceArgs' not in options:
options['inferenceArgs'] = {'predictedField': options['predictionField']}
elif 'predictedField' not in options['inferenceArgs']:
options['inferenceArgs']['predictedField'] = options['predictionField']
#############################################################################
def _getPropertyValue(schema, propertyName, options):
"""Checks to see if property is specified in 'options'. If not, reads the
default value from the schema"""
if propertyName not in options:
paramsSchema = schema['properties'][propertyName]
if 'default' in paramsSchema:
options[propertyName] = paramsSchema['default']
else:
options[propertyName] = None
#############################################################################
def _getExperimentDescriptionSchema():
"""
Returns the experiment description schema. This implementation loads it in
from file experimentDescriptionSchema.json.
Parameters:
--------------------------------------------------------------------------
Returns: returns a dict representing the experiment description schema.
"""
installPath = os.path.dirname(os.path.abspath(__file__))
schemaFilePath = os.path.join(installPath, "experimentDescriptionSchema.json")
return json.loads(open(schemaFilePath, 'r').read())
#############################################################################
def _generateExperiment(options, outputDirPath, hsVersion,
claDescriptionTemplateFile):
""" Executes the --description option, which includes:
1. Perform provider compatibility checks
2. Preprocess the training and testing datasets (filter, join providers)
3. If test dataset omitted, split the training dataset into training
and testing datasets.
4. Gather statistics about the training and testing datasets.
5. Generate experiment scripts (description.py, permutaions.py)
Parameters:
--------------------------------------------------------------------------
options: dictionary that matches the schema defined by the return value of
_getExperimentDescriptionSchema(); NOTE: this arg may be modified
by this function.
outputDirPath: where to place generated files
hsVersion: which version of hypersearch permutations file to generate, can
be 'v1' or 'v2'
claDescriptionTemplateFile: Filename containing the template description
Returns: on success, returns a dictionary per _experimentResultsJSONSchema;
raises exception on error
Assumption1: input train and test files have identical field metadata
"""
_gExperimentDescriptionSchema = _getExperimentDescriptionSchema()
# Validate JSON arg using JSON schema validator
try:
validictory.validate(options, _gExperimentDescriptionSchema)
except Exception, e:
raise _InvalidCommandArgException(
("JSON arg validation failed for option --description: " + \
"%s\nOPTION ARG=%s") % (str(e), pprint.pformat(options)))
# Validate the streamDef
streamSchema = json.load(resource_stream(jsonschema.__name__,
'stream_def.json'))
try:
validictory.validate(options['streamDef'], streamSchema)
except Exception, e:
raise _InvalidCommandArgException(
("JSON arg validation failed for streamDef " + \
"%s\nOPTION ARG=%s") % (str(e), json.dumps(options)))
# -----------------------------------------------------------------------
# Handle legacy parameters from JAVA API server
# TODO: remove this!
_handleJAVAParameters(options)
# -----------------------------------------------------------------------
# Get default values
for propertyName in _gExperimentDescriptionSchema['properties']:
_getPropertyValue(_gExperimentDescriptionSchema, propertyName, options)
if options['inferenceArgs'] is not None:
infArgs = _gExperimentDescriptionSchema['properties']['inferenceArgs']
for schema in infArgs['type']:
if isinstance(schema, dict):
for propertyName in schema['properties']:
_getPropertyValue(schema, propertyName, options['inferenceArgs'])
if options['anomalyParams'] is not None:
anomalyArgs = _gExperimentDescriptionSchema['properties']['anomalyParams']
for schema in anomalyArgs['type']:
if isinstance(schema, dict):
for propertyName in schema['properties']:
_getPropertyValue(schema, propertyName, options['anomalyParams'])
# If the user specified nonTemporalClassification, make sure prediction
# steps is 0
predictionSteps = options['inferenceArgs'].get('predictionSteps', None)
if options['inferenceType'] == InferenceType.NontemporalClassification:
if predictionSteps is not None and predictionSteps != [0]:
raise RuntimeError("When NontemporalClassification is used, prediction"
" steps must be [0]")
# -------------------------------------------------------------------------
# If the user asked for 0 steps of prediction, then make this a spatial
# classification experiment
if predictionSteps == [0] \
and options['inferenceType'] in ['NontemporalMultiStep',
'TemporalMultiStep',
'MultiStep']:
options['inferenceType'] = InferenceType.NontemporalClassification
# If NontemporalClassification was chosen as the inferenceType, then the
# predicted field can NOT be used as an input
if options["inferenceType"] == InferenceType.NontemporalClassification:
if options["inferenceArgs"]["inputPredictedField"] == "yes" \
or options["inferenceArgs"]["inputPredictedField"] == "auto":
raise RuntimeError("When the inference type is NontemporalClassification"
" inputPredictedField must be set to 'no'")
options["inferenceArgs"]["inputPredictedField"] = "no"
# -----------------------------------------------------------------------
# Process the swarmSize setting, if provided
swarmSize = options['swarmSize']
if swarmSize is None:
if options["inferenceArgs"]["inputPredictedField"] is None:
options["inferenceArgs"]["inputPredictedField"] = "auto"
elif swarmSize == 'small':
if options['minParticlesPerSwarm'] is None:
options['minParticlesPerSwarm'] = 3
if options['iterationCount'] is None:
options['iterationCount'] = 100
if options['maxModels'] is None:
options['maxModels'] = 1
if options["inferenceArgs"]["inputPredictedField"] is None:
options["inferenceArgs"]["inputPredictedField"] = "yes"
elif swarmSize == 'medium':
if options['minParticlesPerSwarm'] is None:
options['minParticlesPerSwarm'] = 5
if options['iterationCount'] is None:
options['iterationCount'] = 4000
if options['maxModels'] is None:
options['maxModels'] = 200
if options["inferenceArgs"]["inputPredictedField"] is None:
options["inferenceArgs"]["inputPredictedField"] = "auto"
elif swarmSize == 'large':
if options['minParticlesPerSwarm'] is None:
options['minParticlesPerSwarm'] = 15
#options['killUselessSwarms'] = False
#options['minFieldContribution'] = -1000
#options['maxFieldBranching'] = 10
#options['tryAll3FieldCombinations'] = True
options['tryAll3FieldCombinationsWTimestamps'] = True
if options["inferenceArgs"]["inputPredictedField"] is None:
options["inferenceArgs"]["inputPredictedField"] = "auto"
else:
raise RuntimeError("Unsupported swarm size: %s" % (swarmSize))
# -----------------------------------------------------------------------
# Get token replacements
tokenReplacements = dict()
#--------------------------------------------------------------------------
# Generate the encoder related substitution strings
includedFields = options['includedFields']
if hsVersion == 'v1':
(encoderSpecsStr, permEncoderChoicesStr) = \
_generateEncoderStringsV1(includedFields)
elif hsVersion in ['v2', 'ensemble']:
(encoderSpecsStr, permEncoderChoicesStr) = \
_generateEncoderStringsV2(includedFields, options)
else:
raise RuntimeError("Unsupported hsVersion of %s" % (hsVersion))
#--------------------------------------------------------------------------
# Generate the string containing the sensor auto-reset dict.
if options['resetPeriod'] is not None:
sensorAutoResetStr = pprint.pformat(options['resetPeriod'],
indent=2*_INDENT_STEP)
else:
sensorAutoResetStr = 'None'
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in options['streamDef']:
for key in aggregationPeriod.keys():
if key in options['streamDef']['aggregation']:
aggregationPeriod[key] = options['streamDef']['aggregation'][key]
if 'fields' in options['streamDef']['aggregation']:
for (fieldName, func) in options['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
# Form the aggregation strings
aggregationInfoStr = "%s" % (pprint.pformat(aggregationInfo,
indent=2*_INDENT_STEP))
# -----------------------------------------------------------------------
# Generate the string defining the dataset. This is basically the
# streamDef, but referencing the aggregation we already pulled out into the
# config dict (which enables permuting over it)
datasetSpec = options['streamDef']
if 'aggregation' in datasetSpec:
datasetSpec.pop('aggregation')
if hasAggregation:
datasetSpec['aggregation'] = '$SUBSTITUTE'
datasetSpecStr = pprint.pformat(datasetSpec, indent=2*_INDENT_STEP)
datasetSpecStr = datasetSpecStr.replace(
"'$SUBSTITUTE'", "config['aggregationInfo']")
datasetSpecStr = _indentLines(datasetSpecStr, 2, indentFirstLine=False)
# -----------------------------------------------------------------------
# Was computeInterval specified with Multistep prediction? If so, this swarm
# should permute over different aggregations
computeInterval = options['computeInterval']
if computeInterval is not None \
and options['inferenceType'] in ['NontemporalMultiStep',
'TemporalMultiStep',
'MultiStep']:
# Compute the predictAheadTime based on the minAggregation (specified in
# the stream definition) and the number of prediction steps
predictionSteps = options['inferenceArgs'].get('predictionSteps', [1])
if len(predictionSteps) > 1:
raise _InvalidCommandArgException("Invalid predictionSteps: %s. " \
"When computeInterval is specified, there can only be one " \
"stepSize in predictionSteps." % predictionSteps)
if max(aggregationInfo.values()) == 0:
raise _InvalidCommandArgException("Missing or nil stream aggregation: "
"When computeInterval is specified, then the stream aggregation "
"interval must be non-zero.")
# Compute the predictAheadTime
numSteps = predictionSteps[0]
predictAheadTime = dict(aggregationPeriod)
for key in predictAheadTime.iterkeys():
predictAheadTime[key] *= numSteps
predictAheadTimeStr = pprint.pformat(predictAheadTime,
indent=2*_INDENT_STEP)
# This tells us to plug in a wildcard string for the prediction steps that
# we use in other parts of the description file (metrics, inferenceArgs,
# etc.)
options['dynamicPredictionSteps'] = True
else:
options['dynamicPredictionSteps'] = False
predictAheadTimeStr = "None"
# -----------------------------------------------------------------------
# Save environment-common token substitutions
tokenReplacements['\$EXP_GENERATOR_PROGRAM_PATH'] = \
_quoteAndEscape(os.path.abspath(__file__))
# If the "uber" metric 'MultiStep' was specified, then plug in TemporalMultiStep
# by default
inferenceType = options['inferenceType']
if inferenceType == 'MultiStep':
inferenceType = InferenceType.TemporalMultiStep
tokenReplacements['\$INFERENCE_TYPE'] = "'%s'" % inferenceType
# Nontemporal classificaion uses only encoder and classifier
if inferenceType == InferenceType.NontemporalClassification:
tokenReplacements['\$SP_ENABLE'] = "False"
tokenReplacements['\$TP_ENABLE'] = "False"
else:
tokenReplacements['\$SP_ENABLE'] = "True"
tokenReplacements['\$TP_ENABLE'] = "True"
tokenReplacements['\$CLA_CLASSIFIER_IMPL'] = ""
tokenReplacements['\$ANOMALY_PARAMS'] = pprint.pformat(
options['anomalyParams'], indent=2*_INDENT_STEP)
tokenReplacements['\$ENCODER_SPECS'] = encoderSpecsStr
tokenReplacements['\$SENSOR_AUTO_RESET'] = sensorAutoResetStr
tokenReplacements['\$AGGREGATION_INFO'] = aggregationInfoStr
tokenReplacements['\$DATASET_SPEC'] = datasetSpecStr
if options['iterationCount'] is None:
options['iterationCount'] = -1
tokenReplacements['\$ITERATION_COUNT'] \
= str(options['iterationCount'])
tokenReplacements['\$SP_POOL_PCT'] \
= str(options['spCoincInputPoolPct'])
tokenReplacements['\$HS_MIN_PARTICLES'] \
= str(options['minParticlesPerSwarm'])
tokenReplacements['\$SP_PERM_CONNECTED'] \
= str(options['spSynPermConnected'])
tokenReplacements['\$FIELD_PERMUTATION_LIMIT'] \
= str(options['fieldPermutationLimit'])
tokenReplacements['\$PERM_ENCODER_CHOICES'] \
= permEncoderChoicesStr
predictionSteps = options['inferenceArgs'].get('predictionSteps', [1])
predictionStepsStr = ','.join([str(x) for x in predictionSteps])
tokenReplacements['\$PREDICTION_STEPS'] = "'%s'" % (predictionStepsStr)
tokenReplacements['\$PREDICT_AHEAD_TIME'] = predictAheadTimeStr
# Option permuting over SP synapse decrement value
tokenReplacements['\$PERM_SP_CHOICES'] = ""
if options['spPermuteDecrement'] \
and options['inferenceType'] != 'NontemporalClassification':
tokenReplacements['\$PERM_SP_CHOICES'] = \
_ONE_INDENT +"'synPermInactiveDec': PermuteFloat(0.0003, 0.1),\n"
# The TP permutation parameters are not required for non-temporal networks
if options['inferenceType'] in ['NontemporalMultiStep',
'NontemporalClassification']:
tokenReplacements['\$PERM_TP_CHOICES'] = ""
else:
tokenReplacements['\$PERM_TP_CHOICES'] = \
" 'activationThreshold': PermuteInt(12, 16),\n" \
+ " 'minThreshold': PermuteInt(9, 12),\n" \
+ " 'pamLength': PermuteInt(1, 5),\n"
# If the inference type is just the generic 'MultiStep', then permute over
# temporal/nonTemporal multistep
if options['inferenceType'] == 'MultiStep':
tokenReplacements['\$PERM_INFERENCE_TYPE_CHOICES'] = \
" 'inferenceType': PermuteChoices(['NontemporalMultiStep', " \
+ "'TemporalMultiStep']),"
else:
tokenReplacements['\$PERM_INFERENCE_TYPE_CHOICES'] = ""
# The Classifier permutation parameters are only required for
# Multi-step inference types
if options['inferenceType'] in ['NontemporalMultiStep', 'TemporalMultiStep',
'MultiStep', 'TemporalAnomaly',
'NontemporalClassification']:
tokenReplacements['\$PERM_CL_CHOICES'] = \
" 'alpha': PermuteFloat(0.0001, 0.1),\n"
else:
tokenReplacements['\$PERM_CL_CHOICES'] = ""
# The Permutations alwaysIncludePredictedField setting.
# * When the experiment description has 'inputPredictedField' set to 'no', we
# simply do not put in an encoder for the predicted field.
# * When 'inputPredictedField' is set to 'auto', we include an encoder for the
# predicted field and swarming tries it out just like all the other fields.
# * When 'inputPredictedField' is set to 'yes', we include this setting in
# the permutations file which informs swarming to always use the
# predicted field (the first swarm will be the predicted field only)
tokenReplacements['\$PERM_ALWAYS_INCLUDE_PREDICTED_FIELD'] = \
"inputPredictedField = '%s'" % \
(options["inferenceArgs"]["inputPredictedField"])
# The Permutations minFieldContribution setting
if options.get('minFieldContribution', None) is not None:
tokenReplacements['\$PERM_MIN_FIELD_CONTRIBUTION'] = \
"minFieldContribution = %d" % (options['minFieldContribution'])
else:
tokenReplacements['\$PERM_MIN_FIELD_CONTRIBUTION'] = ""
# The Permutations killUselessSwarms setting
if options.get('killUselessSwarms', None) is not None:
tokenReplacements['\$PERM_KILL_USELESS_SWARMS'] = \
"killUselessSwarms = %r" % (options['killUselessSwarms'])
else:
tokenReplacements['\$PERM_KILL_USELESS_SWARMS'] = ""
# The Permutations maxFieldBranching setting
if options.get('maxFieldBranching', None) is not None:
tokenReplacements['\$PERM_MAX_FIELD_BRANCHING'] = \
"maxFieldBranching = %r" % (options['maxFieldBranching'])
else:
tokenReplacements['\$PERM_MAX_FIELD_BRANCHING'] = ""
# The Permutations tryAll3FieldCombinations setting
if options.get('tryAll3FieldCombinations', None) is not None:
tokenReplacements['\$PERM_TRY_ALL_3_FIELD_COMBINATIONS'] = \
"tryAll3FieldCombinations = %r" % (options['tryAll3FieldCombinations'])
else:
tokenReplacements['\$PERM_TRY_ALL_3_FIELD_COMBINATIONS'] = ""
# The Permutations tryAll3FieldCombinationsWTimestamps setting
if options.get('tryAll3FieldCombinationsWTimestamps', None) is not None:
tokenReplacements['\$PERM_TRY_ALL_3_FIELD_COMBINATIONS_W_TIMESTAMPS'] = \
"tryAll3FieldCombinationsWTimestamps = %r" % \
(options['tryAll3FieldCombinationsWTimestamps'])
else:
tokenReplacements['\$PERM_TRY_ALL_3_FIELD_COMBINATIONS_W_TIMESTAMPS'] = ""
# The Permutations fieldFields setting
if options.get('fixedFields', None) is not None:
tokenReplacements['\$PERM_FIXED_FIELDS'] = \
"fixedFields = %r" % (options['fixedFields'])
else:
tokenReplacements['\$PERM_FIXED_FIELDS'] = ""
# The Permutations fastSwarmModelParams setting
if options.get('fastSwarmModelParams', None) is not None:
tokenReplacements['\$PERM_FAST_SWARM_MODEL_PARAMS'] = \
"fastSwarmModelParams = %r" % (options['fastSwarmModelParams'])
else:
tokenReplacements['\$PERM_FAST_SWARM_MODEL_PARAMS'] = ""
# The Permutations maxModels setting
if options.get('maxModels', None) is not None:
tokenReplacements['\$PERM_MAX_MODELS'] = \
"maxModels = %r" % (options['maxModels'])
else:
tokenReplacements['\$PERM_MAX_MODELS'] = ""
# --------------------------------------------------------------------------
# The Aggregation choices have to be determined when we are permuting over
# aggregations.
if options['dynamicPredictionSteps']:
debugAgg = True
# First, we need to error check to insure that computeInterval is an integer
# multiple of minAggregation (aggregationPeriod)
quotient = aggregationDivide(computeInterval, aggregationPeriod)
(isInt, multiple) = _isInt(quotient)
if not isInt or multiple < 1:
raise _InvalidCommandArgException("Invalid computeInterval: %s. "
"computeInterval must be an integer multiple of the stream "
"aggregation (%s)." % (computeInterval, aggregationPeriod))
# The valid aggregation choices are governed by the following constraint,
# 1.) (minAggregation * N) * M = predictAheadTime
# (minAggregation * N) * M = maxPredictionSteps * minAggregation
# N * M = maxPredictionSteps
#
# 2.) computeInterval = K * aggregation
# computeInterval = K * (minAggregation * N)
#
# where: aggregation = minAggregation * N
# K, M and N are integers >= 1
# N = aggregation / minAggregation
# M = predictionSteps, for a particular aggregation
# K = number of predictions within each compute interval
#
# Let's build up a a list of the possible N's that satisfy the
# N * M = maxPredictionSteps constraint
mTimesN = float(predictionSteps[0])
possibleNs = []
for n in xrange(1, int(mTimesN)+1):
m = mTimesN / n
mInt = int(round(m))
if mInt < 1:
break
if abs(m - mInt) > 0.0001 * m:
continue
possibleNs.append(n)
if debugAgg:
print "All integer factors of %d are: %s" % (mTimesN, possibleNs)
# Now go through and throw out any N's that don't satisfy the constraint:
# computeInterval = K * (minAggregation * N)
aggChoices = []
for n in possibleNs:
# Compute minAggregation * N
agg = dict(aggregationPeriod)
for key in agg.iterkeys():
agg[key] *= n
# Make sure computeInterval is an integer multiple of the aggregation
# period
quotient = aggregationDivide(computeInterval, agg)
#print computeInterval, agg
#print quotient
#import sys; sys.exit()
(isInt, multiple) = _isInt(quotient)
if not isInt or multiple < 1:
continue
aggChoices.append(agg)
# Only eveluate up to 5 different aggregations
aggChoices = aggChoices[-5:]
if debugAgg:
print "Aggregation choices that will be evaluted during swarming:"
for agg in aggChoices:
print " ==>", agg
print
tokenReplacements['\$PERM_AGGREGATION_CHOICES'] = (
"PermuteChoices(%s)" % (
pprint.pformat(aggChoices, indent=2*_INDENT_STEP)))
else:
tokenReplacements['\$PERM_AGGREGATION_CHOICES'] = aggregationInfoStr
# Generate the inferenceArgs replacement tokens
_generateInferenceArgs(options, tokenReplacements)
# Generate the metric replacement tokens
_generateMetricsSubstitutions(options, tokenReplacements)
# -----------------------------------------------------------------------
# Generate Control dictionary
environment = options['environment']
if environment == OpfEnvironment.Grok:
tokenReplacements['\$ENVIRONMENT'] = "'%s'"%OpfEnvironment.Grok
controlTemplate = "grokEnvironmentTemplate.tpl"
elif environment == OpfEnvironment.Experiment:
tokenReplacements['\$ENVIRONMENT'] = "'%s'"%OpfEnvironment.Experiment
controlTemplate = "opfExperimentTemplate.tpl"
else:
raise _InvalidCommandArgException("Invalid environment type %s"% environment)
# -----------------------------------------------------------------------
if outputDirPath is None:
outputDirPath = tempfile.mkdtemp()
if not os.path.exists(outputDirPath):
os.makedirs(outputDirPath)
print "Generating experiment files in directory: %s..." % (outputDirPath)
descriptionPyPath = os.path.join(outputDirPath, "description.py")
_generateFileFromTemplates([claDescriptionTemplateFile, controlTemplate],
descriptionPyPath,
tokenReplacements)
permutationsPyPath = os.path.join(outputDirPath, "permutations.py")
if hsVersion == 'v1':
_generateFileFromTemplates(['permutationsTemplateV1.tpl'],permutationsPyPath,
tokenReplacements)
elif hsVersion == 'ensemble':
_generateFileFromTemplates(['permutationsTemplateEnsemble.tpl'],permutationsPyPath,
tokenReplacements)
elif hsVersion == 'v2':
_generateFileFromTemplates(['permutationsTemplateV2.tpl'],permutationsPyPath,
tokenReplacements)
else:
raise(ValueError("This permutation version is not supported yet: %s" %
hsVersion))
print "done."
#############################################################################
def _generateMetricsSubstitutions(options, tokenReplacements):
"""Generate the token substitution for metrics related fields.
This includes:
\$METRICS
\$LOGGED_METRICS
\$PERM_OPTIMIZE_SETTING
"""
# -----------------------------------------------------------------------
#
options['loggedMetrics'] = [".*"]
# -----------------------------------------------------------------------
# Generate the required metrics
metricList, optimizeMetricLabel = _generateMetricSpecs(options)
metricListString = ",\n".join(metricList)
metricListString = _indentLines(metricListString, 2, indentFirstLine=False)
permOptimizeSettingStr = 'minimize = "%s"' % optimizeMetricLabel
# -----------------------------------------------------------------------
# Specify which metrics should be logged
loggedMetricsListAsStr = "[%s]" % (", ".join(["'%s'"% ptrn
for ptrn in options['loggedMetrics']]))
tokenReplacements['\$LOGGED_METRICS'] \
= loggedMetricsListAsStr
tokenReplacements['\$METRICS'] = metricListString
tokenReplacements['\$PERM_OPTIMIZE_SETTING'] \
= permOptimizeSettingStr
#############################################################################
def _generateMetricSpecs(options):
""" Generates the Metrics for a given InferenceType
Parameters:
-------------------------------------------------------------------------
options: ExpGenerator options
retval: (metricsList, optimizeMetricLabel)
metricsList: list of metric string names
optimizeMetricLabel: Name of the metric which to optimize over
"""
inferenceType = options['inferenceType']
inferenceArgs = options['inferenceArgs']
predictionSteps = inferenceArgs['predictionSteps']
metricWindow = options['metricWindow']
if metricWindow is None:
metricWindow = int(Configuration.get("nupic.opf.metricWindow"))
metricSpecStrings = []
optimizeMetricLabel = ""
# -----------------------------------------------------------------------
# Generate the metrics specified by the expGenerator paramters
metricSpecStrings.extend(_generateExtraMetricSpecs(options))
# -----------------------------------------------------------------------
optimizeMetricSpec = None
# If using a dynamically computed prediction steps (i.e. when swarming
# over aggregation is requested), then we will plug in the variable
# predictionSteps in place of the statically provided predictionSteps
# from the JSON description.
if options['dynamicPredictionSteps']:
assert len(predictionSteps) == 1
predictionSteps = ['$REPLACE_ME']
# -----------------------------------------------------------------------
# Metrics for temporal prediction
if inferenceType in (InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep,
InferenceType.NontemporalClassification,
'MultiStep'):
predictedFieldName, predictedFieldType = _getPredictedField(options)
isCategory = _isCategory(predictedFieldType)
metricNames = ('avg_err',) if isCategory else ('aae', 'altMAPE')
trivialErrorMetric = 'avg_err' if isCategory else 'altMAPE'
oneGramErrorMetric = 'avg_err' if isCategory else 'altMAPE'
movingAverageBaselineName = 'moving_mode' if isCategory else 'moving_mean'
# Multi-step metrics
for metricName in metricNames:
metricSpec, metricLabel = \
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.multiStepBestPredictions,
metric='multiStep',
params={'errorMetric': metricName,
'window':metricWindow,
'steps': predictionSteps},
returnLabel=True)
metricSpecStrings.append(metricSpec)
# If the custom error metric was specified, add that
if options["customErrorMetric"] is not None :
metricParams = dict(options["customErrorMetric"])
metricParams['errorMetric'] = 'custom_error_metric'
metricParams['steps'] = predictionSteps
# If errorWindow is not specified, make it equal to the default window
if not "errorWindow" in metricParams:
metricParams["errorWindow"] = metricWindow
metricSpec, metricLabel =_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.multiStepPredictions,
metric="multiStep",
params=metricParams,
returnLabel=True)
metricSpecStrings.append(metricSpec)
# If this is the first specified step size, optimize for it. Be sure to
# escape special characters since this is a regular expression
optimizeMetricSpec = metricSpec
metricLabel = metricLabel.replace('[', '\\[')
metricLabel = metricLabel.replace(']', '\\]')
optimizeMetricLabel = metricLabel
if options["customErrorMetric"] is not None :
optimizeMetricLabel = ".*custom_error_metric.*"
# Add in the trivial metrics
if options["runBaselines"] \
and inferenceType != InferenceType.NontemporalClassification:
for steps in predictionSteps:
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric="trivial",
params={'window':metricWindow,
"errorMetric":trivialErrorMetric,
'steps': steps})
)
##Add in the One-Gram baseline error metric
#metricSpecStrings.append(
# _generateMetricSpecString(field=predictedFieldName,
# inferenceElement=InferenceElement.encodings,
# metric="two_gram",
# params={'window':metricWindow,
# "errorMetric":oneGramErrorMetric,
# 'predictionField':predictedFieldName,
# 'steps': steps})
# )
#
#Include the baseline moving mean/mode metric
if isCategory:
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"avg_err",
"mode_window":200,
"steps": steps})
)
else :
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"altMAPE",
"mean_window":200,
"steps": steps})
)
# -----------------------------------------------------------------------
# Metrics for classification
elif inferenceType in (InferenceType.TemporalClassification):
metricName = 'avg_err'
trivialErrorMetric = 'avg_err'
oneGramErrorMetric = 'avg_err'
movingAverageBaselineName = 'moving_mode'
optimizeMetricSpec, optimizeMetricLabel = \
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric=metricName,
params={'window':metricWindow},
returnLabel=True)
metricSpecStrings.append(optimizeMetricSpec)
if options["runBaselines"]:
# If temporal, generate the trivial predictor metric
if inferenceType == InferenceType.TemporalClassification:
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric="trivial",
params={'window':metricWindow,
"errorMetric":trivialErrorMetric})
)
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric="two_gram",
params={'window':metricWindow,
"errorMetric":oneGramErrorMetric})
)
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"avg_err",
"mode_window":200})
)
# Custom Error Metric
if not options["customErrorMetric"] == None :
#If errorWindow is not specified, make it equal to the default window
if not "errorWindow" in options["customErrorMetric"]:
options["customErrorMetric"]["errorWindow"] = metricWindow
optimizeMetricSpec = _generateMetricSpecString(
inferenceElement=InferenceElement.classification,
metric="custom",
params=options["customErrorMetric"])
optimizeMetricLabel = ".*custom_error_metric.*"
metricSpecStrings.append(optimizeMetricSpec)
# -----------------------------------------------------------------------
# If plug in the predictionSteps variable for any dynamically generated
# prediction steps
if options['dynamicPredictionSteps']:
for i in range(len(metricSpecStrings)):
metricSpecStrings[i] = metricSpecStrings[i].replace(
"'$REPLACE_ME'", "predictionSteps")
optimizeMetricLabel = optimizeMetricLabel.replace(
"'$REPLACE_ME'", ".*")
return metricSpecStrings, optimizeMetricLabel
#############################################################################
def _generateExtraMetricSpecs(options):
"""Generates the non-default metrics specified by the expGenerator params """
global _metricSpecSchema
results = []
for metric in options['metrics']:
for propertyName in _metricSpecSchema['properties'].keys():
_getPropertyValue(_metricSpecSchema, propertyName, metric)
specString, label = _generateMetricSpecString(
field=metric['field'],
metric=metric['metric'],
params=metric['params'],
inferenceElement=\
metric['inferenceElement'],
returnLabel=True)
if metric['logged']:
options['loggedMetrics'].append(label)
results.append(specString)
return results
#############################################################################
def _getPredictedField(options):
""" Gets the predicted field and it's datatype from the options dictionary
Returns: (predictedFieldName, predictedFieldType)
"""
if not options['inferenceArgs'] or \
not options['inferenceArgs']['predictedField']:
return None, None
predictedField = options['inferenceArgs']['predictedField']
predictedFieldInfo = None
includedFields = options['includedFields']
for info in includedFields:
if info['fieldName'] == predictedField:
predictedFieldInfo = info
break
assert predictedFieldInfo
predictedFieldType = predictedFieldInfo['fieldType']
return predictedField, predictedFieldType
#############################################################################
def _generateInferenceArgs(options, tokenReplacements):
""" Generates the token substitutions related to the predicted field
and the supplemental arguments for prediction
"""
inferenceType = options['inferenceType']
optionInferenceArgs = options.get('inferenceArgs', None)
resultInferenceArgs = {}
predictedField = _getPredictedField(options)[0]
if inferenceType in (InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly):
assert predictedField, "Inference Type '%s' needs a predictedField "\
"specified in the inferenceArgs dictionary"\
% inferenceType
if optionInferenceArgs:
# If we will be using a dynamically created predictionSteps, plug in that
# variable name in place of the constant scalar value
if options['dynamicPredictionSteps']:
altOptionInferenceArgs = copy.deepcopy(optionInferenceArgs)
altOptionInferenceArgs['predictionSteps'] = '$REPLACE_ME'
resultInferenceArgs = pprint.pformat(altOptionInferenceArgs)
resultInferenceArgs = resultInferenceArgs.replace("'$REPLACE_ME'",
'[predictionSteps]')
else:
resultInferenceArgs = pprint.pformat(optionInferenceArgs)
tokenReplacements['\$INFERENCE_ARGS'] = resultInferenceArgs
tokenReplacements['\$PREDICTION_FIELD'] = predictedField
#############################################################################
def expGenerator(args):
""" Parses, validates, and executes command-line options;
On success: Performs requested operation and exits program normally
On Error: Dumps exception/error info in JSON format to stdout and exits the
program with non-zero status.
"""
# -----------------------------------------------------------------
# Parse command line options
#
parser = OptionParser()
parser.set_usage("%prog [options] --description='{json object with args}'\n" + \
"%prog [options] --descriptionFromFile='{filename}'\n" + \
"%prog [options] --showSchema")
parser.add_option("--description", dest = "description",
help = "Tells ExpGenerator to generate an experiment description.py and " \
"permutations.py file using the given JSON formatted experiment "\
"description string.")
parser.add_option("--descriptionFromFile", dest = 'descriptionFromFile',
help = "Tells ExpGenerator to open the given filename and use it's " \
"contents as the JSON formatted experiment description.")
parser.add_option("--claDescriptionTemplateFile",
dest = 'claDescriptionTemplateFile',
default = 'claDescriptionTemplate.tpl',
help = "The file containing the template description file for " \
" ExpGenerator [default: %default]")
parser.add_option("--showSchema",
action="store_true", dest="showSchema",
help="Prints the JSON schemas for the --description arg.")
parser.add_option("--version", dest = 'version', default='v2',
help = "Generate the permutations file for this version of hypersearch."
" Possible choices are 'v1' and 'v2' [default: %default].")
parser.add_option("--outDir",
dest = "outDir", default=None,
help = "Where to generate experiment. If not specified, " \
"then a temp directory will be created"
)
(options, remainingArgs) = parser.parse_args(args)
#print("OPTIONS=%s" % (str(options)))
# -----------------------------------------------------------------
# Check for unprocessed args
#
if len(remainingArgs) > 0:
raise _InvalidCommandArgException(
_makeUsageErrorStr("Unexpected command-line args: <%s>" % \
(' '.join(remainingArgs),), parser.get_usage()))
# -----------------------------------------------------------------
# Check for use of mutually-exclusive options
#
activeOptions = filter(lambda x: getattr(options, x) != None,
('description', 'showSchema'))
if len(activeOptions) > 1:
raise _InvalidCommandArgException(
_makeUsageErrorStr(("The specified command options are " + \
"mutually-exclusive: %s") % (activeOptions,),
parser.get_usage()))
# -----------------------------------------------------------------
# Process requests
#
if options.showSchema:
_handleShowSchemaOption()
elif options.description:
_handleDescriptionOption(options.description, options.outDir,
parser.get_usage(), hsVersion=options.version,
claDescriptionTemplateFile = options.claDescriptionTemplateFile)
elif options.descriptionFromFile:
_handleDescriptionFromFileOption(options.descriptionFromFile,
options.outDir, parser.get_usage(), hsVersion=options.version,
claDescriptionTemplateFile = options.claDescriptionTemplateFile)
else:
raise _InvalidCommandArgException(
_makeUsageErrorStr("Error in validating command options. No option "
"provided:\n", parser.get_usage()))
#############################################################################
if __name__ == '__main__':
expGenerator(sys.argv[1:])
| 1 | 15,191 | Use a more granular error type. I would recommend `ValueError` in this case. | numenta-nupic | py |
@@ -2699,6 +2699,9 @@ void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t st
player->sendTextMessage(MSG_INFO_DESCR, "You can not trade more than 100 items.");
return;
}
+ if (!g_events->eventPlayerOnTradeRequest(player,tradePartner,tradeItem)) {
+ return;
+ }
internalStartTrade(player, tradePartner, tradeItem);
} | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2014 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "pugicast.h"
#include "items.h"
#include "commands.h"
#include "creature.h"
#include "monster.h"
#include "game.h"
#include "tile.h"
#include "house.h"
#include "actions.h"
#include "combat.h"
#include "iologindata.h"
#include "iomarket.h"
#include "chat.h"
#include "talkaction.h"
#include "spells.h"
#include "configmanager.h"
#include "ban.h"
#include "raids.h"
#include "database.h"
#include "server.h"
#include "ioguild.h"
#include "quests.h"
#include "globalevent.h"
#include "mounts.h"
#include "beds.h"
#include "scheduler.h"
#include "monster.h"
#include "spawn.h"
#include "connection.h"
#include "events.h"
extern ConfigManager g_config;
extern Actions* g_actions;
extern Chat g_chat;
extern TalkActions* g_talkActions;
extern Spells* g_spells;
extern Vocations g_vocations;
extern GlobalEvents* g_globalEvents;
extern Events* g_events;
Game::Game() :
wildcardTree(false),
offlineTrainingWindow(std::numeric_limits<uint32_t>::max(), "Choose a Skill", "Please choose a skill:")
{
gameState = GAME_STATE_NORMAL;
worldType = WORLD_TYPE_PVP;
services = nullptr;
lastStageLevel = 0;
playersRecord = 0;
motdNum = 0;
useLastStageLevel = false;
stagesEnabled = false;
lastBucket = 0;
//(1440 minutes/day)/(3600 seconds/day)*10 seconds event interval
int32_t dayCycle = 3600;
lightHourDelta = 1440 * 10 / dayCycle;
lightHour = SUNRISE + (SUNSET - SUNRISE) / 2;
lightLevel = LIGHT_LEVEL_DAY;
lightState = LIGHT_STATE_DAY;
offlineTrainingWindow.choices.emplace_back("Sword Fighting and Shielding", SKILL_SWORD);
offlineTrainingWindow.choices.emplace_back("Axe Fighting and Shielding", SKILL_AXE);
offlineTrainingWindow.choices.emplace_back("Club Fighting and Shielding", SKILL_CLUB);
offlineTrainingWindow.choices.emplace_back("Distance Fighting and Shielding", SKILL_DIST);
offlineTrainingWindow.choices.emplace_back("Magic Level and Shielding", SKILL__MAGLEVEL);
offlineTrainingWindow.buttons.emplace_back("Okay", 1);
offlineTrainingWindow.buttons.emplace_back("Cancel", 0);
offlineTrainingWindow.defaultEnterButton = 1;
offlineTrainingWindow.defaultEscapeButton = 0;
offlineTrainingWindow.priority = true;
}
Game::~Game()
{
for (const auto& it : guilds) {
delete it.second;
}
}
void Game::start(ServiceManager* servicer)
{
services = servicer;
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
g_scheduler.addEvent(createSchedulerTask(EVENT_CREATURE_THINK_INTERVAL, std::bind(&Game::checkCreatures, this, 0)));
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
}
GameState_t Game::getGameState() const
{
return gameState;
}
void Game::setWorldType(WorldType_t type)
{
worldType = type;
}
void Game::setGameState(GameState_t newState)
{
if (gameState == GAME_STATE_SHUTDOWN) {
return; //this cannot be stopped
}
if (gameState == newState) {
return;
}
gameState = newState;
switch (newState) {
case GAME_STATE_INIT: {
commands.loadFromXml();
loadExperienceStages();
groups.load();
g_chat.load();
Spawns::getInstance()->startup();
Raids::getInstance()->loadFromXml();
Raids::getInstance()->startup();
Quests::getInstance()->loadFromXml();
Mounts::getInstance()->loadFromXml();
loadMotdNum();
loadPlayersRecord();
g_globalEvents->startup();
break;
}
case GAME_STATE_SHUTDOWN: {
g_globalEvents->execute(GLOBALEVENT_SHUTDOWN);
//kick all players that are still online
auto it = players.begin();
while (it != players.end()) {
it->second->kickPlayer(true);
it = players.begin();
}
saveMotdNum();
saveGameState();
g_dispatcher.addTask(
createTask(std::bind(&Game::shutdown, this)));
g_scheduler.stop();
g_dispatcher.stop();
break;
}
case GAME_STATE_CLOSED: {
/* kick all players without the CanAlwaysLogin flag */
auto it = players.begin();
while (it != players.end()) {
if (!it->second->hasFlag(PlayerFlag_CanAlwaysLogin)) {
it->second->kickPlayer(true);
it = players.begin();
} else {
++it;
}
}
saveGameState();
break;
}
default:
break;
}
}
void Game::saveGameState()
{
if (gameState == GAME_STATE_NORMAL) {
setGameState(GAME_STATE_MAINTAIN);
}
std::cout << "Saving server..." << std::endl;
for (const auto& it : players) {
it.second->loginPosition = it.second->getPosition();
IOLoginData::savePlayer(it.second);
}
map.saveMap();
if (gameState == GAME_STATE_MAINTAIN) {
setGameState(GAME_STATE_NORMAL);
}
}
int32_t Game::loadMainMap(const std::string& filename)
{
Monster::despawnRange = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRANGE);
Monster::despawnRadius = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRADIUS);
return map.loadMap("data/world/" + filename + ".otbm");
}
void Game::loadMap(const std::string& path)
{
map.loadMap(path);
}
Cylinder* Game::internalGetCylinder(Player* player, const Position& pos)
{
if (pos.x != 0xFFFF) {
return getTile(pos.x, pos.y, pos.z);
}
//container
if (pos.y & 0x40) {
uint8_t from_cid = pos.y & 0x0F;
return player->getContainerByID(from_cid);
}
//inventory
return player;
}
Thing* Game::internalGetThing(Player* player, const Position& pos, int32_t index, uint32_t spriteId /*= 0*/, stackPosType_t type /*= STACKPOS_NORMAL*/)
{
if (pos.x != 0xFFFF) {
Tile* tile = getTile(pos.x, pos.y, pos.z);
if (tile) {
/*look at*/
if (type == STACKPOS_LOOK) {
return tile->getTopVisibleThing(player);
}
Thing* thing;
/*for move operations*/
if (type == STACKPOS_MOVE) {
Item* item = tile->getTopDownItem();
if (item && item->isMoveable()) {
thing = item;
} else {
thing = tile->getTopVisibleCreature(player);
}
} else if (type == STACKPOS_USEITEM) {
//First check items with topOrder 2 (ladders, signs, splashes)
Item* item = tile->getItemByTopOrder(2);
if (item && g_actions->hasAction(item)) {
thing = item;
} else {
//then down items
thing = tile->getTopDownItem();
if (!thing) {
thing = tile->getTopTopItem(); //then last we check items with topOrder 3 (doors etc)
if (!thing) {
thing = tile->ground;
}
}
}
} else if (type == STACKPOS_USE) {
thing = tile->getTopDownItem();
} else {
thing = tile->__getThing(index);
}
if (player && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//do extra checks here if the thing is accessable
if (thing && thing->getItem()) {
if (tile->hasProperty(ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
thing = nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
thing = nullptr;
}
}
}
}
return thing;
}
} else {
//container
if (pos.y & 0x40) {
uint8_t fromCid = pos.y & 0x0F;
uint8_t slot = pos.z;
Container* parentContainer = player->getContainerByID(fromCid);
if (!parentContainer) {
return nullptr;
}
if (parentContainer->getID() == ITEM_BROWSEFIELD) {
Tile* tile = parentContainer->getTile();
if (tile && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
if (tile->hasProperty(ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
return nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
return nullptr;
}
}
}
}
return parentContainer->getItemByIndex(player->getContainerIndex(fromCid) + slot);
} else if (pos.y == 0 && pos.z == 0) {
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return nullptr;
}
int32_t subType;
if (it.isFluidContainer() && index < int32_t(sizeof(reverseFluidMap) / sizeof(int8_t))) {
subType = reverseFluidMap[index];
} else {
subType = -1;
}
return findItemOfType(player, it.id, true, subType);
} else { //inventory
slots_t slot = static_cast<slots_t>(pos.y);
return player->getInventoryItem(slot);
}
}
return nullptr;
}
void Game::internalGetPosition(Item* item, Position& pos, uint8_t& stackpos)
{
pos.x = 0;
pos.y = 0;
pos.z = 0;
stackpos = 0;
Cylinder* topParent = item->getTopParent();
if (topParent) {
if (Player* player = dynamic_cast<Player*>(topParent)) {
pos.x = 0xFFFF;
Container* container = dynamic_cast<Container*>(item->getParent());
if (container) {
pos.y = (uint16_t)0x40 | (uint16_t)player->getContainerID(container);
pos.z = container->__getIndexOfThing(item);
stackpos = pos.z;
} else {
pos.y = player->__getIndexOfThing(item);
stackpos = pos.y;
}
} else if (Tile* tile = topParent->getTile()) {
pos = tile->getPosition();
stackpos = tile->__getIndexOfThing(item);
}
}
}
void Game::setTile(Tile* newTile)
{
return map.setTile(newTile->getPosition(), newTile);
}
Tile* Game::getTile(int32_t x, int32_t y, int32_t z)
{
return map.getTile(x, y, z);
}
Tile* Game::getTile(const Position& pos)
{
return map.getTile(pos.x, pos.y, pos.z);
}
QTreeLeafNode* Game::getLeaf(uint32_t x, uint32_t y)
{
return map.getLeaf(x, y);
}
Creature* Game::getCreatureByID(uint32_t id)
{
if (id <= Player::playerAutoID) {
return getPlayerByID(id);
} else if (id <= Monster::monsterAutoID) {
return getMonsterByID(id);
} else if (id <= Npc::npcAutoID) {
return getNpcByID(id);
}
return nullptr;
}
Monster* Game::getMonsterByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = monsters.find(id);
if (it == monsters.end()) {
return nullptr;
}
return it->second;
}
Npc* Game::getNpcByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = npcs.find(id);
if (it == npcs.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = players.find(id);
if (it == players.end()) {
return nullptr;
}
return it->second;
}
Creature* Game::getCreatureByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const std::string& lowerCaseName = asLowerCaseString(s);
auto m_it = mappedPlayerNames.find(lowerCaseName);
if (m_it != mappedPlayerNames.end()) {
return m_it->second;
}
for (const auto& it : npcs) {
if (lowerCaseName == asLowerCaseString(it.second->getName())) {
return it.second;
}
}
for (const auto& it : monsters) {
if (lowerCaseName == asLowerCaseString(it.second->getName())) {
return it.second;
}
}
return nullptr;
}
Player* Game::getPlayerByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
auto it = mappedPlayerNames.find(asLowerCaseString(s));
if (it == mappedPlayerNames.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByGUID(const uint32_t& guid)
{
if (guid == 0) {
return nullptr;
}
for (const auto& it : players) {
if (guid == it.second->getGUID()) {
return it.second;
}
}
return nullptr;
}
ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player)
{
size_t strlen = s.length();
if (strlen == 0 || strlen > 20) {
return RET_PLAYERWITHTHISNAMEISNOTONLINE;
}
if (s.back() == '~') {
const std::string& query = asLowerCaseString(s.substr(0, strlen - 1));
std::string result;
ReturnValue ret = wildcardTree.findOne(query, result);
if (ret != RET_NOERROR) {
return ret;
}
player = getPlayerByName(result);
} else {
player = getPlayerByName(s);
}
if (!player) {
return RET_PLAYERWITHTHISNAMEISNOTONLINE;
}
return RET_NOERROR;
}
Player* Game::getPlayerByAccount(uint32_t acc)
{
for (const auto& it : players) {
if (it.second->getAccount() == acc) {
return it.second;
}
}
return nullptr;
}
bool Game::internalPlaceCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (creature->getParent() != nullptr) {
return false;
}
if (!map.placeCreature(pos, creature, extendedPos, forced)) {
return false;
}
creature->useThing2();
creature->setID();
creature->addList();
return true;
}
bool Game::placeCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (!internalPlaceCreature(creature, pos, extendedPos, forced)) {
return false;
}
SpectatorVec list;
getSpectators(list, creature->getPosition(), true);
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureAppear(creature, creature->getPosition(), true);
}
}
for (Creature* spectator : list) {
spectator->onCreatureAppear(creature, true);
}
Cylinder* creatureParent = creature->getParent();
int32_t newIndex = creatureParent->__getIndexOfThing(creature);
creatureParent->postAddNotification(creature, nullptr, newIndex);
// TODO: Move this code to Player::onCreatureAppear where creature == this.
Player* player = creature->getPlayer();
if (player) {
int32_t offlineTime;
if (player->getLastLogout() != 0) {
// Not counting more than 21 days to prevent overflow when multiplying with 1000 (for milliseconds).
offlineTime = std::min<int32_t>(time(nullptr) - player->getLastLogout(), 86400 * 21);
} else {
offlineTime = 0;
}
Condition* conditionMuted = player->getCondition(CONDITION_MUTED, CONDITIONID_DEFAULT);
if (conditionMuted && conditionMuted->getTicks() > 0) {
conditionMuted->setTicks(conditionMuted->getTicks() - (offlineTime * 1000));
if (conditionMuted->getTicks() <= 0) {
player->removeCondition(conditionMuted);
} else {
player->addCondition(conditionMuted->clone());
}
}
Condition* conditionTrade = player->getCondition(CONDITION_CHANNELMUTEDTICKS, CONDITIONID_DEFAULT, CHANNEL_ADVERTISING);
if (conditionTrade && conditionTrade->getTicks() > 0) {
conditionTrade->setTicks(conditionTrade->getTicks() - (offlineTime * 1000));
if (conditionTrade->getTicks() <= 0) {
player->removeCondition(conditionTrade);
} else {
player->addCondition(conditionTrade->clone());
}
}
Condition* conditionTradeRook = player->getCondition(CONDITION_CHANNELMUTEDTICKS, CONDITIONID_DEFAULT, CHANNEL_ADVERTISINGROOKGAARD);
if (conditionTradeRook && conditionTradeRook->getTicks() > 0) {
conditionTradeRook->setTicks(conditionTradeRook->getTicks() - (offlineTime * 1000));
if (conditionTradeRook->getTicks() <= 0) {
player->removeCondition(conditionTradeRook);
} else {
player->addCondition(conditionTradeRook->clone());
}
}
Condition* conditionHelp = player->getCondition(CONDITION_CHANNELMUTEDTICKS, CONDITIONID_DEFAULT, CHANNEL_HELP);
if (conditionHelp && conditionHelp->getTicks() > 0) {
conditionHelp->setTicks(conditionHelp->getTicks() - (offlineTime * 1000));
if (conditionHelp->getTicks() <= 0) {
player->removeCondition(conditionHelp);
} else {
player->addCondition(conditionHelp->clone());
}
}
Condition* conditionYell = player->getCondition(CONDITION_YELLTICKS, CONDITIONID_DEFAULT);
if (conditionYell && conditionYell->getTicks() > 0) {
conditionYell->setTicks(conditionYell->getTicks() - (offlineTime * 1000));
if (conditionYell->getTicks() <= 0) {
player->removeCondition(conditionYell);
} else {
player->addCondition(conditionYell->clone());
}
}
if (player->isPremium()) {
int32_t value;
player->getStorageValue(STORAGEVALUE_PROMOTION, value);
if (player->isPromoted() && value != 1) {
player->addStorageValue(STORAGEVALUE_PROMOTION, 1);
} else if (!player->isPromoted() && value == 1) {
player->setVocation(g_vocations.getPromotedVocation(player->getVocationId()));
}
} else if (player->isPromoted()) {
player->setVocation(player->vocation->getFromVocation());
}
bool sentStats = false;
int16_t oldStaminaMinutes = player->getStaminaMinutes();
player->regenerateStamina(offlineTime);
int32_t offlineTrainingSkill = player->getOfflineTrainingSkill();
if (offlineTrainingSkill != -1) {
player->setOfflineTrainingSkill(-1);
uint32_t offlineTrainingTime = std::max<int32_t>(0, std::min<int32_t>(offlineTime, std::min<int32_t>(43200, player->getOfflineTrainingTime() / 1000)));
if (offlineTime >= 600) {
player->removeOfflineTrainingTime(offlineTrainingTime * 1000);
int32_t remainder = offlineTime - offlineTrainingTime;
if (remainder > 0) {
player->addOfflineTrainingTime(remainder * 1000);
}
if (offlineTrainingTime >= 60) {
std::ostringstream ss;
ss << "During your absence you trained for ";
int32_t hours = offlineTrainingTime / 3600;
if (hours > 1) {
ss << hours << " hours";
} else if (hours == 1) {
ss << "1 hour";
}
int32_t minutes = (offlineTrainingTime % 3600) / 60;
if (minutes != 0) {
if (hours != 0) {
ss << " and ";
}
if (minutes > 1) {
ss << minutes << " minutes";
} else {
ss << "1 minute";
}
}
ss << '.';
player->sendTextMessage(MSG_EVENT_ADVANCE, ss.str());
Vocation* vocation;
if (player->isPromoted()) {
vocation = player->getVocation();
} else {
int32_t promotedVocationId = g_vocations.getPromotedVocation(player->getVocationId());
vocation = g_vocations.getVocation(promotedVocationId);
if (!vocation) {
vocation = player->getVocation();
}
}
bool sendUpdateSkills = false;
if (offlineTrainingSkill == SKILL_CLUB || offlineTrainingSkill == SKILL_SWORD || offlineTrainingSkill == SKILL_AXE) {
float modifier = vocation->getAttackSpeed() / 1000.f;
sendUpdateSkills = player->addOfflineTrainingTries((skills_t)offlineTrainingSkill, (offlineTrainingTime / modifier) / 2);
} else if (offlineTrainingSkill == SKILL_DIST) {
float modifier = vocation->getAttackSpeed() / 1000.f;
sendUpdateSkills = player->addOfflineTrainingTries((skills_t)offlineTrainingSkill, (offlineTrainingTime / modifier) / 4);
} else if (offlineTrainingSkill == SKILL__MAGLEVEL) {
int32_t gainTicks = vocation->getManaGainTicks() * 2;
if (gainTicks == 0) {
gainTicks = 1;
}
player->addOfflineTrainingTries(SKILL__MAGLEVEL, offlineTrainingTime * (vocation->getManaGainAmount() / gainTicks));
}
if (player->addOfflineTrainingTries(SKILL_SHIELD, offlineTrainingTime / 4) || sendUpdateSkills) {
player->sendSkills();
}
}
player->sendStats();
sentStats = true;
} else {
player->sendTextMessage(MSG_EVENT_ADVANCE, "You must be logged out for more than 10 minutes to start offline training.");
}
} else {
uint16_t oldMinutes = player->getOfflineTrainingTime() / 60 / 1000;
player->addOfflineTrainingTime(offlineTime * 1000);
uint16_t newMinutes = player->getOfflineTrainingTime() / 60 / 1000;
if (oldMinutes != newMinutes) {
player->sendStats();
sentStats = true;
}
}
if (!sentStats && player->getStaminaMinutes() != oldStaminaMinutes) {
player->sendStats();
}
}
addCreatureCheck(creature);
creature->onPlacedCreature();
return true;
}
bool Game::removeCreature(Creature* creature, bool isLogout /*= true*/)
{
if (creature->isRemoved()) {
return false;
}
Tile* tile = creature->getTile();
std::vector<int32_t> oldStackPosVector;
SpectatorVec list;
getSpectators(list, tile->getPosition(), true);
for (Creature* spectator : list) {
if (Player* player = spectator->getPlayer()) {
if (player->canSeeCreature(creature)) {
oldStackPosVector.push_back(tile->getClientIndexOfThing(player, creature));
} else {
oldStackPosVector.push_back(-1);
}
}
}
int32_t index = tile->__getIndexOfThing(creature);
if (!map.removeCreature(creature)) {
return false;
}
const Position& tilePosition = tile->getPosition();
//send to client
size_t i = 0;
for (Creature* spectator : list) {
if (Player* player = spectator->getPlayer()) {
int32_t stackpos = oldStackPosVector[i++];
if (stackpos != -1) {
player->sendRemoveTileThing(tilePosition, stackpos);
}
}
}
//event method
for (Creature* spectator : list) {
spectator->onCreatureDisappear(creature, index, isLogout);
}
creature->getParent()->postRemoveNotification(creature, nullptr, index, true);
creature->removeList();
creature->setRemoved();
ReleaseCreature(creature);
removeCreatureCheck(creature);
for (Creature* summon : creature->summons) {
summon->setLossSkill(false);
removeCreature(summon);
}
creature->onRemovedCreature();
return true;
}
void Game::playerMoveThing(uint32_t playerId, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = fromPos.y;
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, spriteId, STACKPOS_MOVE);
if (!thing) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
Cylinder* toCylinder = internalGetCylinder(player, toPos);
if (!toCylinder) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
if (Creature* movingCreature = thing->getCreature()) {
if (Position::areInRange<1, 1, 0>(movingCreature->getPosition(), player->getPosition())) {
SchedulerTask* task = createSchedulerTask(1000,
std::bind(&Game::playerMoveCreature, this, player->getID(),
movingCreature->getID(), movingCreature->getPosition(), toCylinder->getPosition()));
player->setNextActionTask(task);
} else {
playerMoveCreature(playerId, movingCreature->getID(), movingCreature->getPosition(), toCylinder->getPosition());
}
} else if (thing->getItem()) {
playerMoveItem(playerId, fromPos, spriteId, fromStackPos, toPos, count);
}
}
void Game::playerMoveCreature(uint32_t playerId, uint32_t movingCreatureId,
const Position& movingCreatureOrigPos, const Position& toPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveCreature,
this, playerId, movingCreatureId, movingCreatureOrigPos, toPos));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
Creature* movingCreature = getCreatureByID(movingCreatureId);
if (!movingCreature) {
return;
}
if (movingCreature->getPlayer()) {
if (movingCreature->getPlayer()->getNoMove()) {
return;
}
}
if (!Position::areInRange<1, 1, 0>(movingCreatureOrigPos, player->getPosition())) {
//need to walk to the creature first before moving it
std::list<Direction> listDir;
if (getPathToEx(player, movingCreatureOrigPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(1500, std::bind(&Game::playerMoveCreature, this,
playerId, movingCreatureId, movingCreatureOrigPos, toPos));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RET_THEREISNOWAY);
}
return;
}
Tile* toTile = getTile(toPos);
if (!toTile) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
if ((!movingCreature->isPushable() && !player->hasFlag(PlayerFlag_CanPushAllCreatures)) ||
(movingCreature->isInGhostMode() && !player->isAccessPlayer())) {
player->sendCancelMessage(RET_NOTMOVEABLE);
return;
}
//check throw distance
const Position& movingCreaturePos = movingCreature->getPosition();
if ((Position::getDistanceX(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceY(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceZ(movingCreaturePos, toPos) * 4 > movingCreature->getThrowRange())) {
player->sendCancelMessage(RET_DESTINATIONOUTOFREACH);
return;
}
Tile* movingCreatureTile = movingCreature->getTile();
if (!movingCreatureTile) {
player->sendCancelMessage(RET_NOTMOVEABLE);
return;
}
if (player != movingCreature) {
if (toTile->hasProperty(BLOCKPATH)) {
player->sendCancelMessage(RET_NOTENOUGHROOM);
return;
} else if ((movingCreature->getZone() == ZONE_PROTECTION && !toTile->hasFlag(TILESTATE_PROTECTIONZONE)) || (movingCreature->getZone() == ZONE_NOPVP && !toTile->hasFlag(TILESTATE_NOPVPZONE))) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
} else {
if (CreatureVector* tileCreatures = toTile->getCreatures()) {
for (Creature* tileCreature : *tileCreatures) {
if (!tileCreature->isInGhostMode()) {
player->sendCancelMessage(RET_NOTENOUGHROOM);
return;
}
}
}
Npc* movingNpc = movingCreature->getNpc();
if (movingNpc && !Spawns::getInstance()->isInZone(movingNpc->getMasterPos(), movingNpc->getMasterRadius(), toPos)) {
player->sendCancelMessage(RET_NOTENOUGHROOM);
return;
}
}
}
if (!g_events->eventPlayerOnMoveCreature(player, movingCreature, movingCreaturePos, toPos)) {
return;
}
ReturnValue ret = internalMoveCreature(movingCreature, movingCreatureTile, toTile);
if (ret != RET_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveCreature(Creature* creature, Direction direction, uint32_t flags /*= 0*/)
{
Cylinder* fromTile = creature->getTile();
Cylinder* toTile = nullptr;
creature->setLastPosition(creature->getPosition());
const Position& currentPos = creature->getPosition();
Position destPos = currentPos;
bool diagonalMovement;
switch (direction) {
case NORTHWEST:
case NORTHEAST:
case SOUTHWEST:
case SOUTHEAST:
diagonalMovement = true;
break;
default:
diagonalMovement = false;
break;
}
destPos = getNextPosition(direction, destPos);
if (creature->getPlayer() && !diagonalMovement) {
//try go up
if (currentPos.z != 8 && creature->getTile()->hasHeight(3)) {
Tile* tmpTile = getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1);
if (tmpTile == nullptr || (tmpTile->ground == nullptr && !tmpTile->hasProperty(BLOCKSOLID))) {
tmpTile = getTile(destPos.x, destPos.y, destPos.getZ() - 1);
if (tmpTile && tmpTile->ground && !tmpTile->hasProperty(BLOCKSOLID)) {
flags = flags | FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
if (!tmpTile->floorChange()) {
destPos.z--;
}
}
}
} else {
//try go down
Tile* tmpTile = getTile(destPos);
if (currentPos.z != 7 && (tmpTile == nullptr || (tmpTile->ground == nullptr && !tmpTile->hasProperty(BLOCKSOLID)))) {
tmpTile = getTile(destPos.x, destPos.y, destPos.z + 1);
if (tmpTile && tmpTile->hasHeight(3)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
destPos.z++;
}
}
}
}
toTile = getTile(destPos);
ReturnValue ret = RET_NOTPOSSIBLE;
if (toTile != nullptr) {
ret = internalMoveCreature(creature, fromTile, toTile, flags);
}
return ret;
}
ReturnValue Game::internalMoveCreature(Creature* creature, Cylinder* fromCylinder, Cylinder* toCylinder, uint32_t flags /*= 0*/)
{
//check if we can move the creature to the destination
ReturnValue ret = toCylinder->__queryAdd(0, creature, 1, flags);
if (ret != RET_NOERROR) {
return ret;
}
fromCylinder->getTile()->moveCreature(creature, toCylinder);
int32_t index = 0;
Item* toItem = nullptr;
Cylinder* subCylinder = nullptr;
uint32_t n = 0;
while ((subCylinder = toCylinder->__queryDestination(index, creature, &toItem, flags)) != toCylinder) {
toCylinder->getTile()->moveCreature(creature, subCylinder);
if (creature->getParent() != subCylinder) {
//could happen if a script move the creature
break;
}
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++n >= MAP_MAX_LAYERS) {
break;
}
}
return RET_NOERROR;
}
void Game::playerMoveItem(uint32_t playerId, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveItem, this,
playerId, fromPos, spriteId, fromStackPos, toPos, count));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
Cylinder* fromCylinder = internalGetCylinder(player, fromPos);
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, spriteId, STACKPOS_MOVE);
if (!thing || !thing->getItem()) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
Cylinder* toCylinder = internalGetCylinder(player, toPos);
uint8_t toIndex = 0;
if (toPos.x == 0xFFFF) {
if (toPos.y & 0x40) {
toIndex = toPos.z;
} else {
toIndex = toPos.y;
}
}
if (fromCylinder == nullptr || toCylinder == nullptr || item == nullptr || item->getClientID() != spriteId) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
if (!item->isPushable() || item->getUniqueId() != 0) {
player->sendCancelMessage(RET_NOTMOVEABLE);
return;
}
const Position& playerPos = player->getPosition();
const Position& mapFromPos = fromCylinder->getTile()->getPosition();
if (playerPos.z > mapFromPos.z) {
player->sendCancelMessage(RET_FIRSTGOUPSTAIRS);
return;
}
if (playerPos.z < mapFromPos.z) {
player->sendCancelMessage(RET_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1, 0>(playerPos, mapFromPos)) {
//need to walk to the item first before using it
std::list<Direction> listDir;
if (getPathToEx(player, item->getPosition(), listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItem, this,
playerId, fromPos, spriteId, fromStackPos, toPos, count));
player->setNextWalkActionTask(task);
return;
} else {
player->sendCancelMessage(RET_THEREISNOWAY);
return;
}
}
const Tile* toCylinderTile = toCylinder->getTile();
const Position& mapToPos = toCylinderTile->getPosition();
//hangable item specific code
if (item->isHangable() && toCylinderTile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//destination supports hangable objects so need to move there first
bool vertical = toCylinderTile->hasProperty(ISVERTICAL);
if (vertical) {
if (playerPos.x + 1 == mapToPos.x) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
} else { // horizontal
if (playerPos.y + 1 == mapToPos.y) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
}
if (!Position::areInRange<1, 1, 0>(playerPos, mapToPos)) {
Position walkPos = mapToPos;
if (vertical) {
walkPos.x++;
} else {
walkPos.y++;
}
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(mapFromPos, player->getPosition())
&& !Position::areInRange<1, 1, 0>(mapFromPos, walkPos)) {
//need to pickup the item first
Item* moveItem = nullptr;
ReturnValue ret = internalMoveItem(fromCylinder, player, INDEX_WHEREEVER, item, count, &moveItem);
if (ret != RET_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::list<Direction> listDir;
if (map.getPathTo(player, walkPos, listDir)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItem, this,
playerId, itemPos, spriteId, itemStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RET_THEREISNOWAY);
}
return;
}
}
if ((Position::getDistanceX(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceY(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceZ(mapFromPos, mapToPos) * 4 > item->getThrowRange())) {
player->sendCancelMessage(RET_DESTINATIONOUTOFREACH);
return;
}
if (!canThrowObjectTo(mapFromPos, mapToPos)) {
player->sendCancelMessage(RET_CANNOTTHROW);
return;
}
if (!g_events->eventPlayerOnMoveItem(player, item, count, fromPos, toPos)) {
return;
}
ReturnValue ret = internalMoveItem(fromCylinder, toCylinder, toIndex, item, count, nullptr, 0, player);
if (ret != RET_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveItem(Cylinder* fromCylinder, Cylinder* toCylinder, int32_t index,
Item* item, uint32_t count, Item** _moveItem, uint32_t flags /*= 0*/, Creature* actor/* = nullptr*/, Item* tradeItem/* = nullptr*/)
{
if (!toCylinder) {
return RET_NOTPOSSIBLE;
}
Tile* fromTile = fromCylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == fromCylinder) {
fromCylinder = fromTile;
}
}
Item* toItem = nullptr;
Cylinder* subCylinder;
int floorN = 0;
while ((subCylinder = toCylinder->__queryDestination(index, item, &toItem, flags)) != toCylinder) {
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++floorN >= MAP_MAX_LAYERS) {
break;
}
}
//destination is the same as the source?
if (item == toItem) {
return RET_NOERROR; //silently ignore move
}
//check if we can add this item
ReturnValue ret = toCylinder->__queryAdd(index, item, count, flags, actor);
if (ret == RET_NEEDEXCHANGE) {
//check if we can add it to source cylinder
ret = fromCylinder->__queryAdd(fromCylinder->__getIndexOfThing(item), toItem, toItem->getItemCount(), 0);
if (ret == RET_NOERROR) {
//check how much we can move
uint32_t maxExchangeQueryCount = 0;
ReturnValue retExchangeMaxCount = fromCylinder->__queryMaxCount(INDEX_WHEREEVER, toItem, toItem->getItemCount(), maxExchangeQueryCount, 0);
if (retExchangeMaxCount != RET_NOERROR && maxExchangeQueryCount == 0) {
return retExchangeMaxCount;
}
if (toCylinder->__queryRemove(toItem, toItem->getItemCount(), flags) == RET_NOERROR) {
int32_t oldToItemIndex = toCylinder->__getIndexOfThing(toItem);
toCylinder->__removeThing(toItem, toItem->getItemCount());
fromCylinder->__addThing(toItem);
if (oldToItemIndex != -1) {
toCylinder->postRemoveNotification(toItem, fromCylinder, oldToItemIndex, true);
}
int32_t newToItemIndex = fromCylinder->__getIndexOfThing(toItem);
if (newToItemIndex != -1) {
fromCylinder->postAddNotification(toItem, toCylinder, newToItemIndex);
}
ret = toCylinder->__queryAdd(index, item, count, flags);
toItem = nullptr;
}
}
}
if (ret != RET_NOERROR) {
return ret;
}
//check how much we can move
uint32_t maxQueryCount = 0;
ReturnValue retMaxCount = toCylinder->__queryMaxCount(index, item, count, maxQueryCount, flags);
if (retMaxCount != RET_NOERROR && maxQueryCount == 0) {
return retMaxCount;
}
uint32_t m;
if (item->isStackable()) {
m = std::min<uint32_t>(count, maxQueryCount);
} else {
m = maxQueryCount;
}
Item* moveItem = item;
//check if we can remove this item
ret = fromCylinder->__queryRemove(item, m, flags);
if (ret != RET_NOERROR) {
return ret;
}
if (tradeItem) {
if (toCylinder->getItem() == tradeItem) {
return RET_NOTENOUGHROOM;
}
Cylinder* tmpCylinder = toCylinder->getParent();
while (tmpCylinder) {
if (tmpCylinder->getItem() == tradeItem) {
return RET_NOTENOUGHROOM;
}
tmpCylinder = tmpCylinder->getParent();
}
}
//remove the item
int32_t itemIndex = fromCylinder->__getIndexOfThing(item);
Item* updateItem = nullptr;
fromCylinder->__removeThing(item, m);
bool isCompleteRemoval = item->isRemoved();
//update item(s)
if (item->isStackable()) {
uint32_t n;
if (toItem && toItem->getID() == item->getID()) {
n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->__updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
updateItem = toItem;
} else {
n = 0;
}
int32_t newCount = m - n;
if (newCount > 0) {
moveItem = Item::CreateItem(item->getID(), newCount);
} else {
moveItem = nullptr;
}
if (item->isRemoved()) {
ReleaseItem(item);
}
}
//add item
if (moveItem /*m - n > 0*/) {
toCylinder->__addThing(index, moveItem);
}
if (itemIndex != -1) {
fromCylinder->postRemoveNotification(item, toCylinder, itemIndex, isCompleteRemoval);
}
if (moveItem) {
int32_t moveItemIndex = toCylinder->__getIndexOfThing(moveItem);
if (moveItemIndex != -1) {
toCylinder->postAddNotification(moveItem, fromCylinder, moveItemIndex);
}
}
if (updateItem) {
int32_t updateItemIndex = toCylinder->__getIndexOfThing(updateItem);
if (updateItemIndex != -1) {
toCylinder->postAddNotification(updateItem, fromCylinder, updateItemIndex);
}
}
if (_moveItem) {
if (moveItem) {
*_moveItem = moveItem;
} else {
*_moveItem = item;
}
}
//we could not move all, inform the player
if (item->isStackable() && maxQueryCount < count) {
return retMaxCount;
}
return ret;
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index /*= INDEX_WHEREEVER*/,
uint32_t flags/* = 0*/, bool test/* = false*/)
{
uint32_t remainderCount = 0;
return internalAddItem(toCylinder, item, index, flags, test, remainderCount);
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index,
uint32_t flags, bool test, uint32_t& remainderCount)
{
remainderCount = 0;
if (toCylinder == nullptr || item == nullptr) {
return RET_NOTPOSSIBLE;
}
Cylinder* destCylinder = toCylinder;
Item* toItem = nullptr;
toCylinder = toCylinder->__queryDestination(index, item, &toItem, flags);
//check if we can add this item
ReturnValue ret = toCylinder->__queryAdd(index, item, item->getItemCount(), flags);
if (ret != RET_NOERROR) {
return ret;
}
/*
Check if we can move add the whole amount, we do this by checking against the original cylinder,
since the queryDestination can return a cylinder that might only hold a part of the full amount.
*/
uint32_t maxQueryCount = 0;
ret = destCylinder->__queryMaxCount(INDEX_WHEREEVER, item, item->getItemCount(), maxQueryCount, flags);
if (ret != RET_NOERROR) {
return ret;
}
if (test) {
return RET_NOERROR;
}
if (item->isStackable() && toItem && toItem->getID() == item->getID()) {
uint32_t m = std::min<uint32_t>(item->getItemCount(), maxQueryCount);
uint32_t n = 0;
if (toItem->getID() == item->getID()) {
n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->__updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
}
int32_t count = m - n;
if (count > 0) {
if (item->getItemCount() != count) {
Item* remainderItem = Item::CreateItem(item->getID(), count);
if (internalAddItem(destCylinder, remainderItem, INDEX_WHEREEVER, flags, false) != RET_NOERROR) {
ReleaseItem(remainderItem);
remainderCount = count;
}
} else {
toCylinder->__addThing(index, item);
int32_t itemIndex = toCylinder->__getIndexOfThing(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
} else {
//fully merged with toItem, item will be destroyed
item->onRemoved();
ReleaseItem(item);
int32_t itemIndex = toCylinder->__getIndexOfThing(toItem);
if (itemIndex != -1) {
toCylinder->postAddNotification(toItem, nullptr, itemIndex);
}
}
} else {
toCylinder->__addThing(index, item);
int32_t itemIndex = toCylinder->__getIndexOfThing(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
return RET_NOERROR;
}
ReturnValue Game::internalRemoveItem(Item* item, int32_t count /*= -1*/, bool test /*= false*/, uint32_t flags /*= 0*/)
{
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return RET_NOTPOSSIBLE;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
if (count == -1) {
count = item->getItemCount();
}
//check if we can remove this item
ReturnValue ret = cylinder->__queryRemove(item, count, flags | FLAG_IGNORENOTMOVEABLE);
if (ret != RET_NOERROR) {
return ret;
}
if (!item->canRemove()) {
return RET_NOTPOSSIBLE;
}
if (!test) {
int32_t index = cylinder->__getIndexOfThing(item);
//remove the item
cylinder->__removeThing(item, count);
bool isCompleteRemoval = false;
if (item->isRemoved()) {
isCompleteRemoval = true;
ReleaseItem(item);
}
cylinder->postRemoveNotification(item, nullptr, index, isCompleteRemoval);
}
item->onRemoved();
return RET_NOERROR;
}
ReturnValue Game::internalPlayerAddItem(Player* player, Item* item, bool dropOnMap /*= true*/, slots_t slot /*= SLOT_WHEREEVER*/)
{
uint32_t remainderCount = 0;
ReturnValue ret = internalAddItem(player, item, (int32_t)slot, 0, false, remainderCount);
if (remainderCount > 0) {
Item* remainderItem = Item::CreateItem(item->getID(), remainderCount);
ReturnValue remaindRet = internalAddItem(player->getTile(), remainderItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
if (remaindRet != RET_NOERROR) {
ReleaseItem(remainderItem);
}
}
if (ret != RET_NOERROR && dropOnMap) {
ret = internalAddItem(player->getTile(), item, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
return ret;
}
Item* Game::findItemOfType(Cylinder* cylinder, uint16_t itemId,
bool depthSearch /*= true*/, int32_t subType /*= -1*/)
{
if (cylinder == nullptr) {
return nullptr;
}
std::forward_list<Container*> listContainer;
for (int32_t i = cylinder->__getFirstIndex(), j = cylinder->__getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->__getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
} else {
if (depthSearch) {
Container* container = item->getContainer();
if (container) {
listContainer.push_front(container);
}
}
}
}
while (!listContainer.empty()) {
Container* container = listContainer.front();
listContainer.pop_front();
for (Item* item : container->getItemList()) {
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
listContainer.push_front(tmpContainer);
}
}
}
return nullptr;
}
bool Game::removeMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (cylinder == nullptr) {
return false;
}
if (money <= 0) {
return true;
}
std::forward_list<Container*> listContainer;
typedef std::multimap<uint64_t, Item*, std::less<uint64_t>> MoneyMap;
typedef MoneyMap::value_type moneymap_pair;
MoneyMap moneyMap;
uint64_t moneyCount = 0;
for (int32_t i = cylinder->__getFirstIndex(), j = cylinder->__getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->__getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
Container* container = item->getContainer();
if (container) {
listContainer.push_front(container);
} else if (item->getWorth() != 0) {
moneyCount += item->getWorth();
moneyMap.insert(moneymap_pair(item->getWorth(), item));
}
}
while (!listContainer.empty()) {
Container* container = listContainer.front();
listContainer.pop_front();
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
listContainer.push_front(tmpContainer);
} else if (item->getWorth() != 0) {
moneyCount += item->getWorth();
moneyMap.insert(moneymap_pair(item->getWorth(), item));
}
}
}
/*not enough money*/
if (moneyCount < money) {
return false;
}
for (MoneyMap::const_iterator mit = moneyMap.begin(), mend = moneyMap.end(); mit != mend && money > 0; ++mit) {
Item* item = mit->second;
internalRemoveItem(item);
if (mit->first > money) {
/* Remove a monetary value from an item*/
uint64_t remaind = item->getWorth() - money;
addMoney(cylinder, remaind, flags);
money = 0;
} else {
money -= mit->first;
}
}
return money == 0;
}
void Game::addMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
uint32_t crys = money / 10000;
money -= crys * 10000;
while (crys > 0) {
Item* remaindItem = Item::CreateItem(ITEM_COINS_CRYSTAL, std::min<int32_t>(100, crys));
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RET_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
crys -= std::min<int32_t>(100, crys);
}
uint16_t plat = money / 100;
if (plat != 0) {
Item* remaindItem = Item::CreateItem(ITEM_COINS_PLATINUM, plat);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RET_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
money -= plat * 100;
}
if (money != 0) {
Item* remaindItem = Item::CreateItem(ITEM_COINS_GOLD, money);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RET_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
}
}
Item* Game::transformItem(Item* item, uint16_t newId, int32_t newCount /*= -1*/)
{
if (item->getID() == newId && (newCount == -1 || (newCount == item->getSubType() && newCount != 0))) { //chargeless item placed on map = infinite
return item;
}
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return nullptr;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
int32_t itemIndex = cylinder->__getIndexOfThing(item);
if (itemIndex == -1) {
return item;
}
if (!item->canTransform()) {
return item;
}
const ItemType& curType = Item::items[item->getID()];
const ItemType& newType = Item::items[newId];
if (curType.alwaysOnTop != newType.alwaysOnTop) {
//This only occurs when you transform items on tiles from a downItem to a topItem (or vice versa)
//Remove the old, and add the new
ReturnValue ret = internalRemoveItem(item);
if (ret != RET_NOERROR) {
return item;
}
Item* newItem;
if (newCount == -1) {
newItem = Item::CreateItem(newId);
} else {
newItem = Item::CreateItem(newId, newCount);
}
if (!newItem) {
return nullptr;
}
newItem->copyAttributes(item);
ret = internalAddItem(cylinder, newItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
if (ret != RET_NOERROR) {
delete newItem;
return nullptr;
}
return newItem;
}
if (curType.type == newType.type) {
//Both items has the same type so we can safely change id/subtype
if (newCount == 0 && (item->isStackable() || item->getCharges() != 0)) {
if (item->isStackable()) {
internalRemoveItem(item);
return nullptr;
} else {
int32_t newItemId = newId;
if (curType.id == newType.id) {
newItemId = curType.decayTo;
}
if (newItemId == -1) {
internalRemoveItem(item);
return nullptr;
} else if (newItemId != newId) {
//Replacing the the old item with the new while maintaining the old position
Item* newItem = Item::CreateItem(newItemId, 1);
if (newItem == nullptr) {
return nullptr;
}
cylinder->__replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex, true);
ReleaseItem(item);
return newItem;
} else {
return transformItem(item, newItemId);
}
}
} else {
cylinder->postRemoveNotification(item, cylinder, itemIndex, false);
uint16_t itemId = item->getID();
int32_t count = item->getSubType();
if (curType.id != newType.id) {
if (newType.group != curType.group) {
item->setDefaultSubtype();
}
itemId = newId;
}
if (newCount != -1 && newType.hasSubType()) {
count = newCount;
}
cylinder->__updateThing(item, itemId, count);
cylinder->postAddNotification(item, cylinder, itemIndex);
return item;
}
}
//Replacing the the old item with the new while maintaining the old position
Item* newItem;
if (newCount == -1) {
newItem = Item::CreateItem(newId);
} else {
newItem = Item::CreateItem(newId, newCount);
}
if (newItem == nullptr) {
return nullptr;
}
cylinder->__replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex, true);
ReleaseItem(item);
return newItem;
}
ReturnValue Game::internalTeleport(Thing* thing, const Position& newPos, bool pushMove/* = true*/, uint32_t flags /*= 0*/)
{
if (newPos == thing->getPosition()) {
return RET_NOERROR;
} else if (thing->isRemoved()) {
return RET_NOTPOSSIBLE;
}
Tile* toTile = getTile(newPos.x, newPos.y, newPos.z);
if (toTile) {
if (Creature* creature = thing->getCreature()) {
ReturnValue ret = toTile->__queryAdd(0, creature, 1, FLAG_NOLIMIT);
if (ret != RET_NOERROR) {
return ret;
}
creature->getTile()->moveCreature(creature, toTile, !pushMove);
return RET_NOERROR;
} else if (Item* item = thing->getItem()) {
return internalMoveItem(item->getParent(), toTile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr, flags);
}
}
return RET_NOTPOSSIBLE;
}
//Implementation of player invoked events
void Game::playerMove(uint32_t playerId, Direction direction)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkActionTask(nullptr);
std::list<Direction> dirs;
dirs.push_back(direction);
player->startAutoWalk(dirs);
}
bool Game::playerBroadcastMessage(Player* player, const std::string& text)
{
if (!player->hasFlag(PlayerFlag_CanBroadcast)) {
return false;
}
std::cout << "> " << player->getName() << " broadcasted: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendCreatureSay(player, SPEAK_BROADCAST, text);
}
return true;
}
void Game::playerCreatePrivateChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player || !player->isPremium()) {
return;
}
ChatChannel* channel = g_chat.createChannel(*player, CHANNEL_PRIVATE);
if (!channel || !channel->addUser(*player)) {
return;
}
player->sendCreatePrivateChannel(channel->getId(), channel->getName());
}
void Game::playerChannelInvite(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat.getPrivateChannel(*player);
if (!channel) {
return;
}
Player* invitePlayer = getPlayerByName(name);
if (!invitePlayer) {
return;
}
if (player == invitePlayer) {
return;
}
channel->invitePlayer(*player, *invitePlayer);
}
void Game::playerChannelExclude(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat.getPrivateChannel(*player);
if (!channel) {
return;
}
Player* excludePlayer = getPlayerByName(name);
if (!excludePlayer) {
return;
}
if (player == excludePlayer) {
return;
}
channel->excludePlayer(*player, *excludePlayer);
}
void Game::playerRequestChannels(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendChannelsDialog();
}
void Game::playerOpenChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
ChatChannel* channel = g_chat.addUserToChannel(*player, channelId);
if (!channel) {
return;
}
const InvitedMap* invitedUsers = channel->getInvitedUsersPtr();
const UsersMap* users;
if (!channel->isPublicChannel()) {
users = &channel->getUsers();
} else {
users = nullptr;
}
player->sendChannel(channel->getId(), channel->getName(), users, invitedUsers);
}
void Game::playerCloseChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_chat.removeUserFromChannel(*player, channelId);
}
void Game::playerOpenPrivateChannel(uint32_t playerId, std::string& receiver)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!IOLoginData::formatPlayerName(receiver)) {
player->sendCancel("A player with this name does not exist.");
return;
}
player->sendOpenPrivateChannel(receiver);
}
void Game::playerCloseNpcChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
SpectatorVec list;
getSpectators(list, player->getPosition());
for (Creature* spectator : list) {
if (Npc* npc = spectator->getNpc()) {
npc->onPlayerCloseChannel(player);
}
}
}
void Game::playerReceivePing(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->receivePing();
}
void Game::playerReceivePingBack(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendPingBack();
}
void Game::playerAutoWalk(uint32_t playerId, const std::list<Direction>& listDir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkTask(nullptr);
player->startAutoWalk(listDir);
}
void Game::playerStopAutoWalk(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->stopWalk();
}
void Game::playerUseItemEx(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint16_t fromSpriteId,
const Position& toPos, uint8_t toStackPos, uint16_t toSpriteId, bool isHotkey)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, fromSpriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != fromSpriteId) {
player->sendCancelMessage(RET_CANNOTUSETHISOBJECT);
return;
}
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RET_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RET_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RET_NOERROR) {
if (ret == RET_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && toPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) &&
!Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem);
if (ret != RET_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::list<Direction> listDir;
if (getPathToEx(player, walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItemEx, this,
playerId, itemPos, itemStackPos, fromSpriteId, toPos, toStackPos, toSpriteId, isHotkey));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RET_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItemEx, this,
playerId, fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId, isHotkey));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, toPos, toStackPos, item, isHotkey);
}
void Game::playerUseItem(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint8_t index, uint16_t spriteId, bool isHotkey)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RET_CANNOTUSETHISOBJECT);
return;
}
ReturnValue ret = g_actions->canUse(player, pos);
if (ret != RET_NOERROR) {
if (ret == RET_TOOFARAWAY) {
std::list<Direction> listDir;
if (getPathToEx(player, pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId, isHotkey));
player->setNextWalkActionTask(task);
return;
}
ret = RET_THEREISNOWAY;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId, isHotkey));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItem(player, pos, index, item, isHotkey);
}
void Game::playerUseWithCreature(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint32_t creatureId, uint16_t spriteId, bool isHotkey)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!Position::areInRange<7, 5, 0>(creature->getPosition(), player->getPosition())) {
return;
}
if (!g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
if (creature->getPlayer() || isHotkey) {
player->sendCancelMessage(RET_DIRECTPLAYERSHOOT);
return;
}
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RET_CANNOTUSETHISOBJECT);
return;
}
Position toPos = creature->getPosition();
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RET_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RET_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RET_NOERROR) {
if (ret == RET_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem);
if (ret != RET_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::list<Direction> listDir;
if (getPathToEx(player, walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseWithCreature, this,
playerId, itemPos, itemStackPos, creatureId, spriteId, isHotkey));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RET_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseWithCreature, this,
playerId, fromPos, fromStackPos, creatureId, spriteId, isHotkey));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, creature->getPosition(), creature->getParent()->__getIndexOfThing(creature), item, isHotkey, creatureId);
}
void Game::playerCloseContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeContainer(cid);
player->sendCloseContainer(cid);
}
void Game::playerMoveUpContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
Container* parentContainer = dynamic_cast<Container*>(container->getParent());
if (!parentContainer) {
Tile* tile = container->getTile();
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
parentContainer = new Container(tile);
parentContainer->useThing2();
browseFields[tile] = parentContainer;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
parentContainer = it->second;
}
}
player->addContainer(cid, parentContainer);
player->sendContainer(cid, parentContainer, parentContainer->hasParent(), player->getContainerIndex(cid));
}
void Game::playerUpdateContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
player->sendContainer(cid, container, container->hasParent(), player->getContainerIndex(cid));
}
void Game::playerRotateItem(uint32_t playerId, const Position& pos, uint8_t stackPos, const uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos);
if (!thing) {
return;
}
Item* item = thing->getItem();
if (!item || item->getClientID() != spriteId || !item->isRoteable() || item->getUniqueId() != 0) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
if (pos.x != 0xFFFF && !Position::areInRange<1, 1, 0>(pos, player->getPosition())) {
std::list<Direction> listDir;
if (getPathToEx(player, pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRotateItem, this,
playerId, pos, stackPos, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RET_THEREISNOWAY);
}
return;
}
uint16_t newId = Item::items[item->getID()].rotateTo;
if (newId != 0) {
transformItem(item, newId);
}
}
void Game::playerWriteItem(uint32_t playerId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint16_t maxTextLength = 0;
uint32_t internalWindowTextId = 0;
Item* writeItem = player->getWriteItem(internalWindowTextId, maxTextLength);
if (text.length() > maxTextLength || windowTextId != internalWindowTextId) {
return;
}
if (!writeItem || writeItem->isRemoved()) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
Cylinder* topParent = writeItem->getTopParent();
Player* owner = dynamic_cast<Player*>(topParent);
if (owner && owner != player) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
if (!Position::areInRange<1, 1, 0>(writeItem->getPosition(), player->getPosition())) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_TEXTEDIT)) {
if (!creatureEvent->executeTextEdit(player, writeItem, text)) {
player->setWriteItem(nullptr);
return;
}
}
if (!text.empty()) {
if (writeItem->getText() != text) {
writeItem->setText(text);
writeItem->setWriter(player->getName());
writeItem->setDate(time(nullptr));
}
} else {
writeItem->resetText();
writeItem->resetWriter();
writeItem->resetDate();
}
uint16_t newId = Item::items[writeItem->getID()].writeOnceItemId;
if (newId != 0) {
transformItem(writeItem, newId);
}
player->setWriteItem(nullptr);
}
void Game::playerBrowseField(uint32_t playerId, const Position& pos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Position& playerPos = player->getPosition();
if (playerPos.z > pos.z) {
player->sendCancelMessage(RET_FIRSTGOUPSTAIRS);
return;
}
if (playerPos.z < pos.z) {
player->sendCancelMessage(RET_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1, 0>(playerPos, pos)) {
std::list<Direction> listDir;
if (getPathToEx(player, pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(
&Game::playerBrowseField, this, playerId, pos
));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RET_THEREISNOWAY);
}
return;
}
Tile* tile = getTile(pos);
if (!tile) {
return;
}
Container* container;
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
container = new Container(tile);
container->useThing2();
browseFields[tile] = container;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
container = it->second;
}
uint8_t dummyContainerId = 0xF - ((pos.x % 3) * 3 + (pos.y % 3));
Container* openContainer = player->getContainerByID(dummyContainerId);
if (openContainer) {
player->onCloseContainer(openContainer);
player->closeContainer(dummyContainerId);
} else {
player->addContainer(dummyContainerId, container);
player->sendContainer(dummyContainerId, container, false, 0);
}
}
void Game::playerSeekInContainer(uint32_t playerId, uint8_t containerId, uint16_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(containerId);
if (!container || !container->hasPagination()) {
return;
}
if ((index % container->capacity()) != 0 || index >= container->size()) {
return;
}
player->setContainerIndex(containerId, index);
player->sendContainer(containerId, container, false, index);
}
void Game::playerUpdateHouseWindow(uint32_t playerId, uint8_t listId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint32_t internalWindowTextId;
uint32_t internalListId;
House* house = player->getEditHouse(internalWindowTextId, internalListId);
if (house && internalWindowTextId == windowTextId && listId == 0) {
house->setAccessList(internalListId, text);
player->setEditHouse(nullptr);
}
}
void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint32_t tradePlayerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = getPlayerByID(tradePlayerId);
if (!tradePartner || tradePartner == player) {
player->sendTextMessage(MSG_INFO_DESCR, "Sorry, not possible.");
return;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
std::ostringstream ss;
ss << tradePartner->getName() << " tells you to move closer.";
player->sendTextMessage(MSG_INFO_DESCR, ss.str());
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RET_CREATUREISNOTREACHABLE);
return;
}
Item* tradeItem = dynamic_cast<Item*>(internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USE));
if (!tradeItem || tradeItem->getClientID() != spriteId || !tradeItem->isPickupable() || tradeItem->getUniqueId() != 0) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
if (playerPosition.z > tradeItemPosition.z) {
player->sendCancelMessage(RET_FIRSTGOUPSTAIRS);
return;
} else if (playerPosition.z < tradeItemPosition.z) {
player->sendCancelMessage(RET_FIRSTGODOWNSTAIRS);
return;
} else if (!Position::areInRange<1, 1, 0>(tradeItemPosition, playerPosition)) {
std::list<Direction> listDir;
if (getPathToEx(player, pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRequestTrade, this,
playerId, pos, stackPos, tradePlayerId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RET_THEREISNOWAY);
}
return;
}
Container* tradeItemContainer = tradeItem->getContainer();
if (tradeItemContainer) {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MSG_INFO_DESCR, "This item is already being traded.");
return;
}
if (tradeItemContainer->isHoldingItem(item)) {
player->sendTextMessage(MSG_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MSG_INFO_DESCR, "This item is already being traded.");
return;
}
}
} else {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MSG_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MSG_INFO_DESCR, "This item is already being traded.");
return;
}
}
}
Container* tradeContainer = tradeItem->getContainer();
if (tradeContainer && tradeContainer->getItemHoldingCount() + 1 > 100) {
player->sendTextMessage(MSG_INFO_DESCR, "You can not trade more than 100 items.");
return;
}
internalStartTrade(player, tradePartner, tradeItem);
}
bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeItem)
{
if (player->tradeState != TRADE_NONE && !(player->tradeState == TRADE_ACKNOWLEDGE && player->tradePartner == tradePartner)) {
player->sendCancelMessage(RET_YOUAREALREADYTRADING);
return false;
} else if (tradePartner->tradeState != TRADE_NONE && tradePartner->tradePartner != player) {
player->sendCancelMessage(RET_THISPLAYERISALREADYTRADING);
return false;
}
player->tradePartner = tradePartner;
player->tradeItem = tradeItem;
player->tradeState = TRADE_INITIATED;
tradeItem->useThing2();
tradeItems[tradeItem] = player->getID();
player->sendTradeItemRequest(player, tradeItem, true);
if (tradePartner->tradeState == TRADE_NONE) {
std::ostringstream ss;
ss << player->getName() << " wants to trade with you.";
tradePartner->sendTextMessage(MSG_EVENT_ADVANCE, ss.str());
tradePartner->tradeState = TRADE_ACKNOWLEDGE;
tradePartner->tradePartner = player;
} else {
Item* counterOfferItem = tradePartner->tradeItem;
player->sendTradeItemRequest(tradePartner, counterOfferItem, false);
tradePartner->sendTradeItemRequest(player, tradeItem, false);
}
return true;
}
void Game::playerAcceptTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!(player->getTradeState() == TRADE_ACKNOWLEDGE || player->getTradeState() == TRADE_INITIATED)) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RET_CREATUREISNOTREACHABLE);
return;
}
player->setTradeState(TRADE_ACCEPT);
if (tradePartner->getTradeState() == TRADE_ACCEPT) {
Item* tradeItem1 = player->tradeItem;
Item* tradeItem2 = tradePartner->tradeItem;
player->setTradeState(TRADE_TRANSFER);
tradePartner->setTradeState(TRADE_TRANSFER);
std::map<Item*, uint32_t>::iterator it = tradeItems.find(tradeItem1);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
it = tradeItems.find(tradeItem2);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
bool isSuccess = false;
ReturnValue ret1 = internalAddItem(tradePartner, tradeItem1, INDEX_WHEREEVER, 0, true);
ReturnValue ret2 = internalAddItem(player, tradeItem2, INDEX_WHEREEVER, 0, true);
if (ret1 == RET_NOERROR && ret2 == RET_NOERROR) {
ret1 = internalRemoveItem(tradeItem1, tradeItem1->getItemCount(), true);
ret2 = internalRemoveItem(tradeItem2, tradeItem2->getItemCount(), true);
if (ret1 == RET_NOERROR && ret2 == RET_NOERROR) {
Cylinder* cylinder1 = tradeItem1->getParent();
Cylinder* cylinder2 = tradeItem2->getParent();
uint32_t count1 = tradeItem1->getItemCount();
uint32_t count2 = tradeItem2->getItemCount();
ret1 = internalMoveItem(cylinder1, tradePartner, INDEX_WHEREEVER, tradeItem1, count1, nullptr, FLAG_IGNOREAUTOSTACK, nullptr, tradeItem2);
if (ret1 == RET_NOERROR) {
internalMoveItem(cylinder2, player, INDEX_WHEREEVER, tradeItem2, count2, nullptr, FLAG_IGNOREAUTOSTACK);
tradeItem1->onTradeEvent(ON_TRADE_TRANSFER, tradePartner);
tradeItem2->onTradeEvent(ON_TRADE_TRANSFER, player);
isSuccess = true;
}
}
}
if (!isSuccess) {
std::string errorDescription;
if (tradePartner->tradeItem) {
errorDescription = getTradeErrorDescription(ret1, tradeItem1);
tradePartner->sendTextMessage(MSG_EVENT_ADVANCE, errorDescription);
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
}
if (player->tradeItem) {
errorDescription = getTradeErrorDescription(ret2, tradeItem2);
player->sendTextMessage(MSG_EVENT_ADVANCE, errorDescription);
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
}
}
player->setTradeState(TRADE_NONE);
player->tradeItem = nullptr;
player->tradePartner = nullptr;
player->sendTradeClose();
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradeItem = nullptr;
tradePartner->tradePartner = nullptr;
tradePartner->sendTradeClose();
}
}
std::string Game::getTradeErrorDescription(ReturnValue ret, Item* item)
{
if (item) {
if (ret == RET_NOTENOUGHCAPACITY) {
std::ostringstream ss;
ss << "You do not have enough capacity to carry";
if (item->isStackable() && item->getItemCount() > 1) {
ss << " these objects.";
} else {
ss << " this object.";
}
ss << std::endl << ' ' << item->getWeightDescription();
return ss.str();
} else if (ret == RET_NOTENOUGHROOM || ret == RET_CONTAINERNOTENOUGHROOM) {
std::ostringstream ss;
ss << "You do not have enough room to carry";
if (item->isStackable() && item->getItemCount() > 1) {
ss << " these objects.";
} else {
ss << " this object.";
}
return ss.str();
}
}
return "Trade could not be completed.";
}
void Game::playerLookInTrade(uint32_t playerId, bool lookAtCounterOffer, int index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
Item* tradeItem;
if (lookAtCounterOffer) {
tradeItem = tradePartner->getTradeItem();
} else {
tradeItem = player->getTradeItem();
}
if (!tradeItem) {
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
int32_t lookDistance = std::max<int32_t>(Position::getDistanceX(playerPosition, tradeItemPosition),
Position::getDistanceY(playerPosition, tradeItemPosition));
if (index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance);
return;
}
Container* tradeContainer = tradeItem->getContainer();
if (!tradeContainer || index > (int32_t)tradeContainer->getItemHoldingCount()) {
return;
}
bool foundItem = false;
std::forward_list<const Container*> listContainer {tradeContainer};
do {
const Container* container = listContainer.front();
listContainer.pop_front();
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
listContainer.push_front(tmpContainer);
}
if (--index == 0) {
tradeItem = item;
foundItem = true;
break;
}
}
} while (!foundItem && !listContainer.empty());
if (foundItem) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance);
}
}
void Game::playerCloseTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
internalCloseTrade(player);
}
bool Game::internalCloseTrade(Player* player)
{
Player* tradePartner = player->tradePartner;
if ((tradePartner && tradePartner->getTradeState() == TRADE_TRANSFER) || player->getTradeState() == TRADE_TRANSFER) {
return true;
}
if (player->getTradeItem()) {
std::map<Item*, uint32_t>::iterator it = tradeItems.find(player->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
player->tradeItem = nullptr;
}
player->setTradeState(TRADE_NONE);
player->tradePartner = nullptr;
player->sendTextMessage(MSG_STATUS_SMALL, "Trade cancelled.");
player->sendTradeClose();
if (tradePartner) {
if (tradePartner->getTradeItem()) {
std::map<Item*, uint32_t>::iterator it = tradeItems.find(tradePartner->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
tradePartner->tradeItem = nullptr;
}
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradePartner = nullptr;
tradePartner->sendTextMessage(MSG_STATUS_SMALL, "Trade cancelled.");
tradePartner->sendTradeClose();
}
return true;
}
void Game::playerPurchaseItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount,
bool ignoreCap/* = false*/, bool inBackpacks/* = false*/)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
merchant->onPlayerTrade(player, onBuy, it.id, subType, amount, ignoreCap, inBackpacks);
}
void Game::playerSellItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreEquipped)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
merchant->onPlayerTrade(player, onSell, it.id, subType, amount, ignoreEquipped);
}
void Game::playerCloseShop(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeShopWindow();
}
void Game::playerLookInShop(uint32_t playerId, uint16_t spriteId, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
int32_t subType;
if (it.isFluidContainer() || it.isSplash()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
if (!g_events->eventPlayerOnLookInShop(player, &it, subType)) {
return;
}
std::ostringstream ss;
ss << "You see " << Item::getDescription(it, 1, nullptr, subType);
player->sendTextMessage(MSG_INFO_DESCR, ss.str());
}
void Game::playerLookAt(uint32_t playerId, const Position& pos, uint16_t spriteId, uint8_t stackPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_LOOK);
if (!thing) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
Position thingPos = thing->getPosition();
if (!player->canSee(thingPos)) {
player->sendCancelMessage(RET_NOTPOSSIBLE);
return;
}
Position playerPos = player->getPosition();
int32_t lookDistance;
if (thing != player) {
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, thingPos), Position::getDistanceY(playerPos, thingPos));
if (playerPos.z != thingPos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLook(player, pos, thing, stackPos, lookDistance);
}
void Game::playerLookInBattleList(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!player->canSeeCreature(creature)) {
return;
}
const Position& creaturePos = creature->getPosition();
if (!player->canSee(creaturePos)) {
return;
}
int32_t lookDistance;
if (creature != player) {
const Position& playerPos = player->getPosition();
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, creaturePos), Position::getDistanceY(playerPos, creaturePos));
if (playerPos.z != creaturePos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLookInBattleList(player, creature, lookDistance);
}
void Game::playerCancelAttackAndFollow(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerSetAttackedCreature(playerId, 0);
playerFollowCreature(playerId, 0);
player->stopWalk();
}
void Game::playerSetAttackedCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->getAttackedCreature() && creatureId == 0) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
Creature* attackCreature = getCreatureByID(creatureId);
if (!attackCreature) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
ReturnValue ret = Combat::canTargetCreature(player, attackCreature);
if (ret != RET_NOERROR) {
player->sendCancelMessage(ret);
player->sendCancelTarget();
player->setAttackedCreature(nullptr);
return;
}
player->setAttackedCreature(attackCreature);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
}
void Game::playerFollowCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setAttackedCreature(nullptr);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
player->setFollowCreature(getCreatureByID(creatureId));
}
void Game::playerSetFightModes(uint32_t playerId, fightMode_t fightMode, chaseMode_t chaseMode, secureMode_t secureMode)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setFightMode(fightMode);
player->setChaseMode(chaseMode);
player->setSecureMode(secureMode);
}
void Game::playerRequestAddVip(uint32_t playerId, const std::string& vip_name)
{
if (vip_name.length() > 20) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
std::string real_name;
real_name = vip_name;
uint32_t guid;
bool specialVip;
if (!IOLoginData::getGuidByNameEx(guid, specialVip, real_name)) {
player->sendTextMessage(MSG_STATUS_SMALL, "A player with that name does not exist.");
return;
}
if (specialVip && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MSG_STATUS_SMALL, "You can not add this player.");
return;
}
VipStatus_t status;
Player* vipPlayer = getPlayerByName(real_name);
if (!vipPlayer) {
status = VIPSTATUS_OFFLINE;
} else if (player->isAccessPlayer() || !vipPlayer->isInGhostMode()) {
status = VIPSTATUS_ONLINE;
} else {
status = VIPSTATUS_OFFLINE;
}
player->addVIP(guid, real_name, status);
}
void Game::playerRequestRemoveVip(uint32_t playerId, uint32_t guid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->removeVIP(guid);
}
void Game::playerRequestEditVip(uint32_t playerId, uint32_t guid, const std::string& description, uint32_t icon, bool notify)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->editVIP(guid, description, icon, notify);
}
void Game::playerTurn(uint32_t playerId, Direction dir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!g_events->eventPlayerOnTurn(player, dir)) {
return;
}
player->resetIdleTime();
internalCreatureTurn(player, dir);
}
void Game::playerRequestOutfit(uint32_t playerId)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendOutfitWindow();
}
void Game::playerToggleMount(uint32_t playerId, bool mount)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->toggleMount(mount);
}
void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->hasRequestedOutfit()) {
return;
}
player->hasRequestedOutfit(false);
if (outfit.lookMount != 0) {
Mount* mount = Mounts::getInstance()->getMountByClientID(outfit.lookMount);
if (!mount) {
return;
}
if (!player->hasMount(mount)) {
return;
}
if (player->isMounted()) {
Mount* prevMount = Mounts::getInstance()->getMountByID(player->getCurrentMount());
if (prevMount) {
changeSpeed(player, mount->speed - prevMount->speed);
}
player->setCurrentMount(mount->id);
} else {
player->setCurrentMount(mount->id);
outfit.lookMount = 0;
}
} else if (player->isMounted()) {
player->dismount();
}
if (player->canWear(outfit.lookType, outfit.lookAddons)) {
player->defaultOutfit = outfit;
if (player->hasCondition(CONDITION_OUTFIT)) {
return;
}
internalCreatureChangeOutfit(player, outfit);
}
}
void Game::playerShowQuestLog(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendQuestLog();
}
void Game::playerShowQuestLine(uint32_t playerId, uint16_t questId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Quest* quest = Quests::getInstance()->getQuestByID(questId);
if (!quest) {
return;
}
player->sendQuestLine(quest);
}
void Game::playerSay(uint32_t playerId, uint16_t channelId, SpeakClasses type,
const std::string& receiver, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
uint32_t muteTime = player->isMuted();
if (muteTime > 0) {
std::ostringstream ss;
ss << "You are still muted for " << muteTime << " seconds.";
player->sendTextMessage(MSG_STATUS_SMALL, ss.str());
return;
}
if (playerSayCommand(player, text)) {
return;
}
if (playerSaySpell(player, type, text)) {
return;
}
if (!text.empty() && text.front() == '/' && player->isAccessPlayer()) {
return;
}
if (type != SPEAK_PRIVATE_PN) {
player->removeMessageBuffer();
}
switch (type) {
case SPEAK_SAY:
internalCreatureSay(player, SPEAK_SAY, text, false);
break;
case SPEAK_WHISPER:
playerWhisper(player, text);
break;
case SPEAK_YELL:
playerYell(player, text);
break;
case SPEAK_PRIVATE_TO:
case SPEAK_PRIVATE_RED_TO:
playerSpeakTo(player, type, receiver, text);
break;
case SPEAK_CHANNEL_O:
case SPEAK_CHANNEL_Y:
case SPEAK_CHANNEL_R1:
g_chat.talkToChannel(*player, type, text, channelId);
break;
case SPEAK_PRIVATE_PN:
playerSpeakToNpc(player, text);
break;
case SPEAK_BROADCAST:
playerBroadcastMessage(player, text);
break;
default:
break;
}
}
bool Game::playerSayCommand(Player* player, const std::string& text)
{
if (text.empty()) {
return false;
}
char firstCharacter = text.front();
for (char commandTag : commandTags) {
if (commandTag == firstCharacter) {
if (commands.exeCommand(*player, text)) {
return true;
}
}
}
return false;
}
bool Game::playerSaySpell(Player* player, SpeakClasses type, const std::string& text)
{
std::string words = text;
TalkActionResult_t result = g_talkActions->playerSaySpell(player, type, words);
if (result == TALKACTION_BREAK) {
return true;
}
result = g_spells->playerSaySpell(player, words);
if (result == TALKACTION_BREAK) {
if (!g_config.getBoolean(ConfigManager::EMOTE_SPELLS)) {
return internalCreatureSay(player, SPEAK_SAY, words, false);
} else {
return internalCreatureSay(player, SPEAK_MONSTER_SAY, words, false);
}
} else if (result == TALKACTION_FAILED) {
return true;
}
return false;
}
bool Game::playerWhisper(Player* player, const std::string& text)
{
SpectatorVec list;
getSpectators(list, player->getPosition(), false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
//send to client
for (Creature* spectator : list) {
if (Player* spectatorPlayer = spectator->getPlayer()) {
if (!Position::areInRange<1, 1, 0>(player->getPosition(), spectatorPlayer->getPosition())) {
spectatorPlayer->sendCreatureSay(player, SPEAK_WHISPER, "pspsps");
} else {
spectatorPlayer->sendCreatureSay(player, SPEAK_WHISPER, text);
}
}
}
//event method
for (Creature* spectator : list) {
spectator->onCreatureSay(player, SPEAK_WHISPER, text);
}
return true;
}
bool Game::playerYell(Player* player, const std::string& text)
{
if (player->getLevel() == 1) {
player->sendTextMessage(MSG_STATUS_SMALL, "You may not yell as long as you are on level 1.");
return false;
}
if (player->hasCondition(CONDITION_YELLTICKS)) {
player->sendCancelMessage(RET_YOUAREEXHAUSTED);
return false;
}
if (player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_YELLTICKS, 30000, 0);
player->addCondition(condition);
}
internalCreatureSay(player, SPEAK_YELL, asUpperCaseString(text), false);
return true;
}
bool Game::playerSpeakTo(Player* player, SpeakClasses type, const std::string& receiver,
const std::string& text)
{
Player* toPlayer = getPlayerByName(receiver);
if (!toPlayer) {
player->sendTextMessage(MSG_STATUS_SMALL, "A player with this name is not online.");
return false;
}
if (type == SPEAK_PRIVATE_RED_TO && (player->hasFlag(PlayerFlag_CanTalkRedPrivate) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER)) {
type = SPEAK_PRIVATE_RED_FROM;
} else {
type = SPEAK_PRIVATE_FROM;
}
toPlayer->sendCreatureSay(player, type, text);
toPlayer->onCreatureSay(player, type, text);
if (toPlayer->isInGhostMode() && !player->isAccessPlayer()) {
player->sendTextMessage(MSG_STATUS_SMALL, "A player with this name is not online.");
} else {
std::ostringstream ss;
ss << "Message sent to " << toPlayer->getName() << '.';
player->sendTextMessage(MSG_STATUS_SMALL, ss.str());
}
return true;
}
bool Game::playerSpeakToNpc(Player* player, const std::string& text)
{
SpectatorVec list;
getSpectators(list, player->getPosition());
for (Creature* spectator : list) {
if (spectator->getNpc()) {
spectator->onCreatureSay(player, SPEAK_PRIVATE_PN, text);
}
}
return true;
}
//--
bool Game::canThrowObjectTo(const Position& fromPos, const Position& toPos, bool checkLineOfSight /*= true*/,
int32_t rangex /*= Map::maxClientViewportX*/, int32_t rangey /*= Map::maxClientViewportY*/) const
{
return map.canThrowObjectTo(fromPos, toPos, checkLineOfSight, rangex, rangey);
}
bool Game::isSightClear(const Position& fromPos, const Position& toPos, bool floorCheck) const
{
return map.isSightClear(fromPos, toPos, floorCheck);
}
bool Game::internalCreatureTurn(Creature* creature, Direction dir)
{
if (creature->getDirection() == dir) {
return false;
}
creature->setDirection(dir);
//send to client
SpectatorVec list;
getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureTurn(creature);
}
return true;
}
bool Game::internalCreatureSay(Creature* creature, SpeakClasses type, const std::string& text,
bool ghostMode, SpectatorVec* listPtr/* = nullptr*/, const Position* pos/* = nullptr*/)
{
if (text.empty()) {
return false;
}
if (!pos) {
pos = &creature->getPosition();
}
SpectatorVec list;
if (!listPtr || listPtr->empty()) {
// This somewhat complex construct ensures that the cached SpectatorVec
// is used if available and if it can be used, else a local vector is
// used (hopefully the compiler will optimize away the construction of
// the temporary when it's not used).
if (type != SPEAK_YELL && type != SPEAK_MONSTER_YELL) {
getSpectators(list, *pos, false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
} else {
getSpectators(list, *pos, true, false, 18, 18, 14, 14);
}
} else {
list = (*listPtr);
}
//send to client
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
if (!ghostMode || tmpPlayer->canSeeCreature(creature)) {
tmpPlayer->sendCreatureSay(creature, type, text, pos);
}
}
}
//event method
for (Creature* spectator : list) {
spectator->onCreatureSay(creature, type, text);
}
return true;
}
bool Game::getPathTo(const Creature* creature, const Position& destPos,
std::list<Direction>& listDir, int32_t maxSearchDist /*= -1*/)
{
return map.getPathTo(creature, destPos, listDir, maxSearchDist);
}
bool Game::getPathToEx(const Creature* creature, const Position& targetPos,
std::list<Direction>& dirList, const FindPathParams& fpp)
{
return map.getPathMatching(creature, dirList, FrozenPathingConditionCall(targetPos), fpp);
}
bool Game::getPathToEx(const Creature* creature, const Position& targetPos, std::list<Direction>& dirList,
uint32_t minTargetDist, uint32_t maxTargetDist, bool fullPathSearch /*= true*/,
bool clearSight /*= true*/, int32_t maxSearchDist /*= -1*/)
{
FindPathParams fpp;
fpp.fullPathSearch = fullPathSearch;
fpp.maxSearchDist = maxSearchDist;
fpp.clearSight = clearSight;
fpp.minTargetDist = minTargetDist;
fpp.maxTargetDist = maxTargetDist;
return getPathToEx(creature, targetPos, dirList, fpp);
}
void Game::checkCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onWalk();
cleanup();
}
}
void Game::updateCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->goToFollowCreature();
}
}
void Game::checkCreatureAttack(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onAttacking(0);
}
}
void Game::addCreatureCheck(Creature* creature)
{
creature->creatureCheck = true;
if (creature->inCheckCreaturesVector) {
// already in a vector
return;
}
creature->inCheckCreaturesVector = true;
checkCreatureLists[uniform_random(0, EVENT_CREATURECOUNT - 1)].push_back(creature);
creature->useThing2();
}
void Game::removeCreatureCheck(Creature* creature)
{
if (creature->inCheckCreaturesVector) {
creature->creatureCheck = false;
}
}
void Game::checkCreatures(size_t index)
{
g_scheduler.addEvent(createSchedulerTask(EVENT_CHECK_CREATURE_INTERVAL, std::bind(&Game::checkCreatures, this, (index + 1) % EVENT_CREATURECOUNT)));
auto& checkCreatureList = checkCreatureLists[index];
for (auto it = checkCreatureList.begin(), end = checkCreatureList.end(); it != end;) {
Creature* creature = *it;
if (creature->creatureCheck) {
if (creature->getHealth() > 0) {
creature->onThink(EVENT_CREATURE_THINK_INTERVAL);
creature->onAttacking(EVENT_CREATURE_THINK_INTERVAL);
creature->executeConditions(EVENT_CREATURE_THINK_INTERVAL);
} else {
creature->onDeath();
}
++it;
} else {
creature->inCheckCreaturesVector = false;
it = checkCreatureList.erase(it);
ReleaseCreature(creature);
}
}
cleanup();
}
void Game::changeSpeed(Creature* creature, int32_t varSpeedDelta)
{
int32_t varSpeed = creature->getSpeed() - creature->getBaseSpeed();
varSpeed += varSpeedDelta;
creature->setSpeed(varSpeed);
//send to clients
SpectatorVec list;
getSpectators(list, creature->getPosition(), false, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendChangeSpeed(creature, creature->getStepSpeed());
}
}
void Game::internalCreatureChangeOutfit(Creature* creature, const Outfit_t& outfit)
{
creature->setCurrentOutfit(outfit);
if (creature->isInvisible()) {
return;
}
//send to clients
SpectatorVec list;
getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureChangeOutfit(creature, outfit);
}
}
void Game::internalCreatureChangeVisible(Creature* creature, bool visible)
{
//send to clients
SpectatorVec list;
getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureChangeVisible(creature, visible);
}
}
void Game::changeLight(const Creature* creature)
{
//send to clients
SpectatorVec list;
getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureLight(creature);
}
}
bool Game::combatBlockHit(CombatType_t combatType, Creature* attacker, Creature* target,
int32_t& healthChange, bool checkDefense, bool checkArmor)
{
if (combatType == COMBAT_NONE) {
return true;
}
if (target->getPlayer() && target->getPlayer()->isInGhostMode()) {
return true;
}
if (healthChange > 0) {
return false;
}
const Position& targetPos = target->getPosition();
SpectatorVec list;
getSpectators(list, targetPos, false, true);
if (!target->isAttackable() || Combat::canDoCombat(attacker, target) != RET_NOERROR) {
if (!target->isInGhostMode()) {
addMagicEffect(list, targetPos, NM_ME_POFF);
}
return true;
}
int32_t damage = -healthChange;
BlockType_t blockType = target->blockHit(attacker, combatType, damage, checkDefense, checkArmor);
healthChange = -damage;
if (blockType == BLOCK_DEFENSE) {
addMagicEffect(list, targetPos, NM_ME_POFF);
return true;
} else if (blockType == BLOCK_ARMOR) {
addMagicEffect(list, targetPos, NM_ME_BLOCKHIT);
return true;
} else if (blockType == BLOCK_IMMUNITY) {
uint8_t hitEffect = 0;
switch (combatType) {
case COMBAT_UNDEFINEDDAMAGE:
break;
case COMBAT_ENERGYDAMAGE:
case COMBAT_FIREDAMAGE:
case COMBAT_PHYSICALDAMAGE:
case COMBAT_ICEDAMAGE:
case COMBAT_DEATHDAMAGE: {
hitEffect = NM_ME_BLOCKHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
hitEffect = NM_ME_POISON_RINGS;
break;
}
case COMBAT_HOLYDAMAGE: {
hitEffect = NM_ME_HOLYDAMAGE;
break;
}
default: {
hitEffect = NM_ME_POFF;
break;
}
}
addMagicEffect(list, targetPos, hitEffect);
return true;
}
return false;
}
void Game::combatGetTypeInfo(CombatType_t combatType, Creature* target, TextColor_t& color, uint8_t& effect)
{
switch (combatType) {
case COMBAT_PHYSICALDAMAGE: {
Item* splash = nullptr;
switch (target->getRace()) {
case RACE_VENOM:
color = TEXTCOLOR_LIGHTGREEN;
effect = NM_ME_POISON;
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_GREEN);
break;
case RACE_BLOOD:
color = TEXTCOLOR_RED;
effect = NM_ME_DRAW_BLOOD;
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_BLOOD);
break;
case RACE_UNDEAD:
color = TEXTCOLOR_LIGHTGREY;
effect = NM_ME_HIT_AREA;
break;
case RACE_FIRE:
color = TEXTCOLOR_ORANGE;
effect = NM_ME_DRAW_BLOOD;
break;
case RACE_ENERGY:
color = TEXTCOLOR_PURPLE;
effect = NM_ME_ENERGY_DAMAGE;
break;
default:
color = TEXTCOLOR_NONE;
effect = NM_ME_NONE;
break;
}
if (splash) {
internalAddItem(target->getTile(), splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
startDecay(splash);
}
break;
}
case COMBAT_ENERGYDAMAGE: {
color = TEXTCOLOR_PURPLE;
effect = NM_ME_ENERGY_DAMAGE;
break;
}
case COMBAT_EARTHDAMAGE: {
color = TEXTCOLOR_LIGHTGREEN;
effect = NM_ME_POISON_RINGS;
break;
}
case COMBAT_DROWNDAMAGE: {
color = TEXTCOLOR_LIGHTBLUE;
effect = NM_ME_LOSE_ENERGY;
break;
}
case COMBAT_FIREDAMAGE: {
color = TEXTCOLOR_ORANGE;
effect = NM_ME_HITBY_FIRE;
break;
}
case COMBAT_ICEDAMAGE: {
color = TEXTCOLOR_SKYBLUE;
effect = NM_ME_ICEATTACK;
break;
}
case COMBAT_HOLYDAMAGE: {
color = TEXTCOLOR_YELLOW;
effect = NM_ME_HOLYDAMAGE;
break;
}
case COMBAT_DEATHDAMAGE: {
color = TEXTCOLOR_DARKRED;
effect = NM_ME_SMALLCLOUDS;
break;
}
case COMBAT_LIFEDRAIN: {
color = TEXTCOLOR_RED;
effect = NM_ME_MAGIC_BLOOD;
break;
}
default: {
color = TEXTCOLOR_NONE;
effect = NM_ME_NONE;
break;
}
}
}
bool Game::combatChangeHealth(CombatType_t combatType, Creature* attacker, Creature* target, int32_t healthChange)
{
const Position& targetPos = target->getPosition();
if (healthChange > 0) {
if (target->getHealth() <= 0) {
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer) {
if (g_config.getBoolean(ConfigManager::CANNOT_ATTACK_SAME_LOOKFEET) && attackerPlayer->defaultOutfit.lookFeet == target->defaultOutfit.lookFeet && combatType != COMBAT_HEALING) {
return false;
}
if (attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
}
int32_t realHealthChange = target->getHealth();
target->gainHealth(attacker, healthChange);
realHealthChange = target->getHealth() - realHealthChange;
if (realHealthChange > 0 && !target->isInGhostMode()) {
std::string pluralString = (realHealthChange != 1 ? "s." : ".");
std::ostringstream ss;
if (!attacker) {
ss << ucfirst(target->getNameDescription()) << " was healed for " << realHealthChange << " hitpoint" << pluralString;
} else if (attacker == target) {
ss << ucfirst(attacker->getNameDescription()) << " healed " << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself for " : "himself for ") : "itself for ") << realHealthChange << " hitpoint" << pluralString;
} else {
ss << ucfirst(attacker->getNameDescription()) << " healed " << target->getNameDescription() << " for " << realHealthChange << " hitpoint" << pluralString;
}
std::string spectatorMessage = ss.str();
TextMessage message;
message.position = targetPos;
message.primary.value = realHealthChange;
message.primary.color = TEXTCOLOR_MAYABLUE;
SpectatorVec list;
getSpectators(list, targetPos, false, true);
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str("");
ss << "You heal " << target->getNameDescription() << " for " << realHealthChange << " hitpoint" << pluralString;
message.type = MSG_HEALED;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str("");
if (!attacker) {
ss << "You were healed for " << realHealthChange << " hitpoint" << pluralString;
} else if (targetPlayer == attackerPlayer) {
ss << "You heal yourself for " << realHealthChange << " hitpoint" << pluralString;
} else {
ss << "You were healed by " << attacker->getNameDescription() << " for " << realHealthChange << " hitpoint" << pluralString;
}
message.type = MSG_HEALED;
message.text = ss.str();
} else {
message.type = MSG_HEALED_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
} else {
SpectatorVec list;
getSpectators(list, targetPos, true, true);
if (!target->isAttackable() || Combat::canDoCombat(attacker, target) != RET_NOERROR) {
addMagicEffect(list, targetPos, NM_ME_POFF);
return true;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer) {
if (g_config.getBoolean(ConfigManager::CANNOT_ATTACK_SAME_LOOKFEET) && attacker->defaultOutfit.lookFeet == target->defaultOutfit.lookFeet && combatType != COMBAT_HEALING) {
return false;
}
if (attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
}
int32_t damage = -healthChange;
if (damage == 0) {
return true;
}
TextMessage message;
message.position = targetPos;
if (target->hasCondition(CONDITION_MANASHIELD) && combatType != COMBAT_UNDEFINEDDAMAGE) {
int32_t manaDamage = std::min<int32_t>(target->getMana(), damage);
if (manaDamage != 0) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_CHANGEMANA)) {
creatureEvent->executeChangeMana(target, attacker, manaDamage);
}
target->drainMana(attacker, manaDamage);
addMagicEffect(list, targetPos, NM_ME_LOSE_ENERGY);
std::ostringstream ss;
if (!attacker) {
ss << ucfirst(target->getNameDescription()) << " loses " << manaDamage << " mana.";
} else if (attacker == target) {
ss << ucfirst(target->getNameDescription()) << " loses " << manaDamage << " mana blocking an attack by " << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself." : "himself.") : "itself.");
} else {
ss << ucfirst(target->getNameDescription()) << " loses " << manaDamage << " mana blocking an attack by " << attacker->getNameDescription() << '.';
}
std::string spectatorMessage = ss.str();
message.primary.value = manaDamage;
message.primary.color = TEXTCOLOR_BLUE;
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z == targetPos.z) {
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str("");
ss << ucfirst(target->getNameDescription()) << " loses " << manaDamage << " mana blocking your attack.";
message.type = MSG_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str("");
if (!attacker) {
ss << "You lose " << manaDamage << " mana.";
} else if (targetPlayer == attackerPlayer) {
ss << "You lose " << manaDamage << " mana blocking an attack by yourself.";
} else {
ss << "You lose " << manaDamage << " mana blocking an attack by " << attacker->getNameDescription() << '.';
}
message.type = MSG_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
message.type = MSG_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
damage = std::max<int32_t>(0, damage - manaDamage);
}
}
int32_t targetHealth = target->getHealth();
damage = std::min<int32_t>(targetHealth, damage);
if (damage > 0) {
const auto& changeHealthEvents = target->getCreatureEvents(CREATURE_EVENT_CHANGEHEALTH);
if (!changeHealthEvents.empty()) {
CombatDamage tmpDamage;
tmpDamage.primary.type = combatType;
tmpDamage.primary.value = damage;
for (CreatureEvent* creatureEvent : changeHealthEvents) {
creatureEvent->executeChangeHealth(target, attacker, tmpDamage);
}
}
if (damage >= targetHealth) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) {
if (!creatureEvent->executeOnPrepareDeath(target, attacker)) {
return false;
}
}
}
target->drainHealth(attacker, damage);
addCreatureHealth(list, target);
uint8_t hitEffect;
combatGetTypeInfo(combatType, target, message.primary.color, hitEffect);
if (message.primary.color != TEXTCOLOR_NONE) {
message.primary.value = damage;
addMagicEffect(list, targetPos, hitEffect);
std::string pluralString = (damage != 1 ? "s" : "");
std::ostringstream ss;
if (!attacker) {
ss << ucfirst(target->getNameDescription()) << " loses " << damage << " hitpoint" << pluralString << ".";
} else if (attacker == target) {
ss << ucfirst(target->getNameDescription()) << " loses " << damage << " hitpoint" << pluralString << " due to " << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his") : "its") << " own attack.";
} else {
ss << ucfirst(target->getNameDescription()) << " loses " << damage << " hitpoint" << pluralString << " due to an attack by " << attacker->getNameDescription() << '.';
}
std::string spectatorMessage = ss.str();
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z == targetPos.z) {
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str("");
ss << ucfirst(target->getNameDescription()) << " loses " << damage << " hitpoint" << pluralString << " due to your attack.";
message.type = MSG_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str("");
if (!attacker) {
ss << "You lose " << damage << " hitpoint" << pluralString << ".";
} else if (targetPlayer == attackerPlayer) {
ss << "You lose " << damage << " hitpoint" << pluralString << " due to your own attack.";
} else {
ss << "You lose " << damage << " hitpoint" << pluralString << " due to an attack by " << attacker->getNameDescription() << '.';
}
message.type = MSG_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
message.type = MSG_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
}
}
}
return true;
}
bool Game::combatChangeHealth(Creature* attacker, Creature* target, CombatDamage& damage)
{
const Position& targetPos = target->getPosition();
int32_t healthChange = damage.primary.value + damage.secondary.value;
if (healthChange > 0) {
if (target->getHealth() <= 0) {
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer) {
if (g_config.getBoolean(ConfigManager::CANNOT_ATTACK_SAME_LOOKFEET) && attackerPlayer->defaultOutfit.lookFeet == target->defaultOutfit.lookFeet && damage.primary.type != COMBAT_HEALING) {
return false;
}
if (attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
}
int32_t realHealthChange = target->getHealth();
target->gainHealth(attacker, healthChange);
realHealthChange = target->getHealth() - realHealthChange;
if (realHealthChange > 0 && !target->isInGhostMode()) {
std::string pluralString = (realHealthChange != 1 ? "s." : ".");
std::ostringstream ss;
if (!attacker) {
ss << ucfirst(target->getNameDescription()) << " was healed for " << realHealthChange << " hitpoint" << pluralString;
} else if (attacker == target) {
ss << ucfirst(attacker->getNameDescription()) << " healed " << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself for " : "himself for ") : "itself for ") << realHealthChange << " hitpoint" << pluralString;
} else {
ss << ucfirst(attacker->getNameDescription()) << " healed " << target->getNameDescription() << " for " << realHealthChange << " hitpoint" << pluralString;
}
std::string spectatorMessage = ss.str();
TextMessage message;
message.position = targetPos;
message.primary.value = realHealthChange;
message.primary.color = TEXTCOLOR_MAYABLUE;
SpectatorVec list;
getSpectators(list, targetPos, false, true);
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str("");
ss << "You heal " << target->getNameDescription() << " for " << realHealthChange << " hitpoint" << pluralString;
message.type = MSG_HEALED;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str("");
if (!attacker) {
ss << "You were healed for " << realHealthChange << " hitpoint" << pluralString;
} else if (targetPlayer == attackerPlayer) {
ss << "You heal yourself for " << realHealthChange << " hitpoint" << pluralString;
} else {
ss << "You were healed by " << attacker->getNameDescription() << " for " << realHealthChange << " hitpoint" << pluralString;
}
message.type = MSG_HEALED;
message.text = ss.str();
} else {
message.type = MSG_HEALED_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
} else {
SpectatorVec list;
getSpectators(list, targetPos, true, true);
if (!target->isAttackable() || Combat::canDoCombat(attacker, target) != RET_NOERROR) {
addMagicEffect(list, targetPos, NM_ME_POFF);
return true;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer) {
if (g_config.getBoolean(ConfigManager::CANNOT_ATTACK_SAME_LOOKFEET) && attacker->defaultOutfit.lookFeet == target->defaultOutfit.lookFeet && damage.primary.type != COMBAT_HEALING) {
return false;
}
if (attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
}
damage.primary.value = std::abs(damage.primary.value);
damage.secondary.value = std::abs(damage.secondary.value);
TextMessage message;
message.position = targetPos;
if (target->hasCondition(CONDITION_MANASHIELD) && damage.primary.type != COMBAT_UNDEFINEDDAMAGE) {
int32_t manaDamage = std::min<int32_t>(target->getMana(), -healthChange);
if (manaDamage != 0) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_CHANGEMANA)) {
creatureEvent->executeChangeMana(target, attacker, manaDamage);
}
target->drainMana(attacker, manaDamage);
addMagicEffect(list, targetPos, NM_ME_LOSE_ENERGY);
std::ostringstream ss;
if (!attacker) {
ss << ucfirst(target->getNameDescription()) << " loses " << manaDamage << " mana.";
} else if (attacker == target) {
ss << ucfirst(target->getNameDescription()) << " loses " << manaDamage << " mana blocking an attack by " << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself." : "himself.") : "itself.");
} else {
ss << ucfirst(target->getNameDescription()) << " loses " << manaDamage << " mana blocking an attack by " << attacker->getNameDescription() << '.';
}
std::string spectatorMessage = ss.str();
message.primary.value = manaDamage;
message.primary.color = TEXTCOLOR_BLUE;
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z == targetPos.z) {
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str("");
ss << ucfirst(target->getNameDescription()) << " loses " << manaDamage << " mana blocking your attack.";
message.type = MSG_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str("");
if (!attacker) {
ss << "You lose " << manaDamage << " mana.";
} else if (targetPlayer == attackerPlayer) {
ss << "You lose " << manaDamage << " mana blocking an attack by yourself.";
} else {
ss << "You lose " << manaDamage << " mana blocking an attack by " << attacker->getNameDescription() << '.';
}
message.type = MSG_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
message.type = MSG_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
damage.primary.value -= manaDamage;
if (damage.primary.value < 0) {
damage.secondary.value = std::max<int32_t>(0, damage.secondary.value + damage.primary.value);
damage.primary.value = 0;
}
}
}
int32_t targetHealth = target->getHealth();
if (damage.primary.value >= targetHealth) {
damage.primary.value = targetHealth;
damage.secondary.value = 0;
} else if (damage.secondary.value) {
damage.secondary.value = std::min<int32_t>(damage.secondary.value, targetHealth - damage.primary.value);
}
int32_t realDamage = damage.primary.value + damage.secondary.value;
if (realDamage) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_CHANGEHEALTH)) {
creatureEvent->executeChangeHealth(target, attacker, damage);
}
if (realDamage >= targetHealth) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) {
if (!creatureEvent->executeOnPrepareDeath(target, attacker)) {
return false;
}
}
}
target->drainHealth(attacker, realDamage);
addCreatureHealth(list, target);
message.primary.value = damage.primary.value;
message.secondary.value = damage.secondary.value;
uint8_t hitEffect;
if (message.primary.value) {
combatGetTypeInfo(damage.primary.type, target, message.primary.color, hitEffect);
if (hitEffect != NM_ME_NONE) {
addMagicEffect(list, targetPos, hitEffect);
}
}
if (message.secondary.value) {
combatGetTypeInfo(damage.secondary.type, target, message.secondary.color, hitEffect);
if (hitEffect != NM_ME_NONE) {
addMagicEffect(list, targetPos, hitEffect);
}
}
if (message.primary.color != TEXTCOLOR_NONE || message.secondary.color != TEXTCOLOR_NONE) {
std::string pluralString = (realDamage != 1 ? "s" : "");
std::ostringstream ss;
if (!attacker) {
ss << ucfirst(target->getNameDescription()) << " loses " << realDamage << " hitpoint" << pluralString << ".";
} else if (attacker == target) {
ss << ucfirst(target->getNameDescription()) << " loses " << realDamage << " hitpoint" << pluralString << " due to " << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his") : "its") << " own attack.";
} else {
ss << ucfirst(target->getNameDescription()) << " loses " << realDamage << " hitpoint" << pluralString << " due to an attack by " << attacker->getNameDescription() << '.';
}
std::string spectatorMessage = ss.str();
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z == targetPos.z) {
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str("");
ss << ucfirst(target->getNameDescription()) << " loses " << realDamage << " hitpoint" << pluralString << " due to your attack.";
message.type = MSG_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str("");
if (!attacker) {
ss << "You lose " << realDamage << " hitpoint" << pluralString << ".";
} else if (targetPlayer == attackerPlayer) {
ss << "You lose " << realDamage << " hitpoint" << pluralString << " due to your own attack.";
} else {
ss << "You lose " << realDamage << " hitpoint" << pluralString << " due to an attack by " << attacker->getNameDescription() << '.';
}
message.type = MSG_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
message.type = MSG_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
}
}
}
return true;
}
bool Game::combatChangeMana(Creature* attacker, Creature* target, int32_t manaChange)
{
if (manaChange > 0) {
if (attacker) {
Player* attackerPlayer = attacker->getPlayer();
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer) {
if (g_config.getBoolean(ConfigManager::CANNOT_ATTACK_SAME_LOOKFEET) && attacker->defaultOutfit.lookFeet == target->defaultOutfit.lookFeet) {
return false;
}
if (attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
}
}
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_CHANGEMANA)) {
creatureEvent->executeChangeMana(target, attacker, manaChange);
}
target->changeMana(manaChange);
} else {
const Position& targetPos = target->getPosition();
if (!target->isAttackable() || Combat::canDoCombat(attacker, target) != RET_NOERROR) {
addMagicEffect(targetPos, NM_ME_POFF);
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer) {
if (g_config.getBoolean(ConfigManager::CANNOT_ATTACK_SAME_LOOKFEET) && attacker->defaultOutfit.lookFeet == target->defaultOutfit.lookFeet) {
return false;
}
if (attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
}
int32_t manaLoss = std::min<int32_t>(target->getMana(), -manaChange);
BlockType_t blockType = target->blockHit(attacker, COMBAT_MANADRAIN, manaLoss);
if (blockType != BLOCK_NONE) {
addMagicEffect(targetPos, NM_ME_POFF);
return false;
}
if (manaLoss <= 0) {
return true;
}
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_CHANGEMANA)) {
creatureEvent->executeChangeMana(target, attacker, manaLoss);
}
target->drainMana(attacker, manaLoss);
std::ostringstream ss;
if (!attacker) {
ss << ucfirst(target->getNameDescription()) << " loses " << manaLoss << " mana.";
} else if (attacker == target) {
ss << ucfirst(target->getNameDescription()) << " loses " << manaLoss << " mana blocking an attack by " << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself." : "himself.") : "itself.");
} else {
ss << ucfirst(target->getNameDescription()) << " loses " << manaLoss << " mana blocking an attack by " << attacker->getNameDescription() << '.';
}
std::string spectatorMessage = ss.str();
TextMessage message;
message.position = targetPos;
message.primary.value = manaLoss;
message.primary.color = TEXTCOLOR_BLUE;
SpectatorVec list;
getSpectators(list, targetPos, false, true);
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str("");
ss << ucfirst(target->getNameDescription()) << " loses " << manaLoss << " mana blocking your attack.";
message.type = MSG_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str("");
if (!attacker) {
ss << "You lose " << manaLoss << " mana.";
} else if (targetPlayer == attackerPlayer) {
ss << "You lose " << manaLoss << " mana blocking an attack by yourself.";
} else {
ss << "You lose " << manaLoss << " mana blocking an attack by " << attacker->getNameDescription() << '.';
}
message.type = MSG_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
message.type = MSG_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
return true;
}
void Game::addCreatureHealth(const Creature* target)
{
SpectatorVec list;
getSpectators(list, target->getPosition(), true, true);
addCreatureHealth(list, target);
}
void Game::addCreatureHealth(const SpectatorVec& list, const Creature* target)
{
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureHealth(target);
}
}
}
void Game::addMagicEffect(const Position& pos, uint8_t effect)
{
if (effect > NM_ME_LAST) {
return;
}
SpectatorVec list;
getSpectators(list, pos, true, true);
addMagicEffect(list, pos, effect);
}
void Game::addMagicEffect(const SpectatorVec& list, const Position& pos, uint8_t effect)
{
if (effect > NM_ME_LAST) {
return;
}
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendMagicEffect(pos, effect);
}
}
}
void Game::addDistanceEffect(const Position& fromPos, const Position& toPos, uint8_t effect)
{
if (effect > NM_SHOOT_LAST || effect == NM_SHOOT_UNK1 || effect == NM_SHOOT_UNK2 || effect == NM_SHOOT_UNK3) {
return;
}
SpectatorVec list;
getSpectators(list, fromPos, false, true);
getSpectators(list, toPos, false, true);
addDistanceEffect(list, fromPos, toPos, effect);
}
void Game::addDistanceEffect(const SpectatorVec& list, const Position& fromPos, const Position& toPos, uint8_t effect)
{
if (effect > NM_SHOOT_LAST || effect == NM_SHOOT_UNK1 || effect == NM_SHOOT_UNK2 || effect == NM_SHOOT_UNK3) {
return;
}
for (Creature* spectator : list) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendDistanceShoot(fromPos, toPos, effect);
}
}
}
void Game::startDecay(Item* item)
{
if (!item || !item->canDecay()) {
return;
}
ItemDecayState_t decayState = item->getDecaying();
if (decayState == DECAYING_TRUE) {
return;
}
if (item->getDuration() > 0) {
item->useThing2();
item->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(item);
} else {
internalDecayItem(item);
}
}
void Game::internalDecayItem(Item* item)
{
const ItemType& it = Item::items[item->getID()];
if (it.decayTo != 0) {
Item* newItem = transformItem(item, it.decayTo);
startDecay(newItem);
} else {
ReturnValue ret = internalRemoveItem(item);
if (ret != RET_NOERROR) {
std::cout << "DEBUG, internalDecayItem failed, error code: " << (int32_t) ret << "item id: " << item->getID() << std::endl;
}
}
}
void Game::checkDecay()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
size_t bucket = (lastBucket + 1) % EVENT_DECAY_BUCKETS;
for (auto it = decayItems[bucket].begin(); it != decayItems[bucket].end();) {
Item* item = *it;
if (!item->canDecay()) {
item->setDecaying(DECAYING_FALSE);
ReleaseItem(item);
it = decayItems[bucket].erase(it);
continue;
}
int32_t decreaseTime = EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS;
int32_t duration = item->getDuration();
if (duration - decreaseTime < 0) {
decreaseTime = duration;
}
duration -= decreaseTime;
item->decreaseDuration(decreaseTime);
if (duration <= 0) {
it = decayItems[bucket].erase(it);
internalDecayItem(item);
ReleaseItem(item);
} else if (duration < EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
it = decayItems[bucket].erase(it);
size_t newBucket = (bucket + ((duration + EVENT_DECAYINTERVAL / 2) / 1000)) % EVENT_DECAY_BUCKETS;
if (newBucket == bucket) {
internalDecayItem(item);
ReleaseItem(item);
} else {
decayItems[newBucket].push_back(item);
}
} else {
++it;
}
}
lastBucket = bucket;
cleanup();
}
void Game::checkLight()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
lightHour += lightHourDelta;
if (lightHour > 1440) {
lightHour -= 1440;
}
if (std::abs(lightHour - SUNRISE) < 2 * lightHourDelta) {
lightState = LIGHT_STATE_SUNRISE;
} else if (std::abs(lightHour - SUNSET) < 2 * lightHourDelta) {
lightState = LIGHT_STATE_SUNSET;
}
int32_t newLightLevel = lightLevel;
bool lightChange = false;
switch (lightState) {
case LIGHT_STATE_SUNRISE: {
newLightLevel += (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30;
lightChange = true;
break;
}
case LIGHT_STATE_SUNSET: {
newLightLevel -= (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30;
lightChange = true;
break;
}
default:
break;
}
if (newLightLevel <= LIGHT_LEVEL_NIGHT) {
lightLevel = LIGHT_LEVEL_NIGHT;
lightState = LIGHT_STATE_NIGHT;
} else if (newLightLevel >= LIGHT_LEVEL_DAY) {
lightLevel = LIGHT_LEVEL_DAY;
lightState = LIGHT_STATE_DAY;
} else {
lightLevel = newLightLevel;
}
if (lightChange) {
LightInfo lightInfo;
getWorldLightInfo(lightInfo);
for (const auto& it : players) {
it.second->sendWorldLight(lightInfo);
}
}
}
void Game::getWorldLightInfo(LightInfo& lightInfo) const
{
lightInfo.level = lightLevel;
lightInfo.color = 0xD7;
}
void Game::addCommandTag(char tag)
{
for (char commandTag : commandTags) {
if (commandTag == tag) {
return;
}
}
commandTags.push_back(tag);
}
void Game::resetCommandTag()
{
commandTags.clear();
}
void Game::shutdown()
{
std::cout << "Shutting down server..." << std::flush;
g_scheduler.shutdown();
g_dispatcher.shutdown();
Spawns::getInstance()->clear();
Raids::getInstance()->clear();
cleanup();
if (services) {
services->stop();
}
ConnectionManager::getInstance()->closeAll();
std::cout << " done!" << std::endl;
}
void Game::cleanup()
{
//free memory
for (auto creature : ToReleaseCreatures) {
creature->releaseThing2();
}
ToReleaseCreatures.clear();
for (auto item : ToReleaseItems) {
item->releaseThing2();
}
ToReleaseItems.clear();
for (Item* item : toDecayItems) {
const uint32_t dur = item->getDuration();
if (dur >= EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
decayItems[lastBucket].push_back(item);
} else {
decayItems[(lastBucket + 1 + dur / 1000) % EVENT_DECAY_BUCKETS].push_back(item);
}
}
toDecayItems.clear();
}
void Game::ReleaseCreature(Creature* creature)
{
ToReleaseCreatures.push_back(creature);
}
void Game::ReleaseItem(Item* item)
{
ToReleaseItems.push_back(item);
}
void Game::broadcastMessage(const std::string& text, MessageClasses type)
{
std::cout << "> Broadcasted message: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendTextMessage(type, text);
}
}
void Game::updateCreatureWalkthrough(const Creature* creature)
{
//send to clients
SpectatorVec list;
getSpectators(list, creature->getPosition(), true, true);
for (Creature* spectator : list) {
Player* tmpPlayer = spectator->getPlayer();
tmpPlayer->sendCreatureWalkthrough(creature, tmpPlayer->canWalkthroughEx(creature));
}
}
void Game::updatePlayerSkull(Player* player)
{
if (getWorldType() != WORLD_TYPE_PVP) {
return;
}
SpectatorVec list;
getSpectators(list, player->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureSkull(player);
}
}
void Game::updatePlayerShield(Player* player)
{
SpectatorVec list;
getSpectators(list, player->getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureShield(player);
}
}
void Game::updatePlayerHelpers(const Player& player)
{
uint32_t creatureId = player.getID();
uint16_t helpers = player.getHelpers();
SpectatorVec list;
getSpectators(list, player.getPosition(), true, true);
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureHelpers(creatureId, helpers);
}
}
void Game::updateCreatureType(Creature* creature)
{
const Player* masterPlayer = nullptr;
uint32_t creatureId = creature->getID();
CreatureType_t creatureType = creature->getType();
if (creatureType == CREATURETYPE_MONSTER) {
const Creature* master = creature->getMaster();
if (master) {
masterPlayer = master->getPlayer();
if (masterPlayer) {
creatureType = CREATURETYPE_SUMMON_OTHERS;
}
}
}
//send to clients
SpectatorVec list;
getSpectators(list, creature->getPosition(), true, true);
if (creatureType == CREATURETYPE_SUMMON_OTHERS) {
for (Creature* spectator : list) {
Player* player = spectator->getPlayer();
if (masterPlayer == player) {
player->sendCreatureType(creatureId, CREATURETYPE_SUMMON_OWN);
} else {
player->sendCreatureType(creatureId, creatureType);
}
}
} else {
for (Creature* spectator : list) {
spectator->getPlayer()->sendCreatureType(creatureId, creatureType);
}
}
}
void Game::updatePremium(Account& account)
{
bool save = false;
time_t timeNow = time(nullptr);
if (account.premiumDays != 0 && account.premiumDays != std::numeric_limits<uint16_t>::max()) {
if (account.lastDay == 0) {
account.lastDay = timeNow;
save = true;
} else {
uint32_t days = (timeNow - account.lastDay) / 86400;
if (days > 0) {
if (days >= account.premiumDays) {
account.premiumDays = 0;
account.lastDay = 0;
} else {
account.premiumDays -= days;
uint32_t remainder = (timeNow - account.lastDay) % 86400;
account.lastDay = timeNow - remainder;
}
save = true;
}
}
} else if (account.lastDay != 0) {
account.lastDay = 0;
save = true;
}
if (save && !IOLoginData::saveAccount(account)) {
std::cout << "> ERROR: Failed to save account: " << account.name << "!" << std::endl;
}
}
void Game::loadMotdNum()
{
Database* db = Database::getInstance();
DBResult* result = db->storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_num'");
if (result) {
motdNum = atoi(result->getDataString("value").c_str());
db->freeResult(result);
} else {
db->executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_num', '0')");
}
result = db->storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_hash'");
if (result) {
motdHash = result->getDataString("value");
if (motdHash != transformToSHA1(g_config.getString(ConfigManager::MOTD))) {
++motdNum;
}
db->freeResult(result);
} else {
db->executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_hash', '')");
}
}
void Game::saveMotdNum() const
{
Database* db = Database::getInstance();
std::ostringstream query;
query << "UPDATE `server_config` SET `value` = '" << motdNum << "' WHERE `config` = 'motd_num'";
db->executeQuery(query.str());
query.str("");
query << "UPDATE `server_config` SET `value` = '" << transformToSHA1(g_config.getString(ConfigManager::MOTD)) << "' WHERE `config` = 'motd_hash'";
db->executeQuery(query.str());
}
void Game::checkPlayersRecord()
{
const size_t playersOnline = getPlayersOnline();
if (playersOnline > playersRecord) {
uint32_t previousRecord = playersRecord;
playersRecord = playersOnline;
for (const auto& it : g_globalEvents->getEventMap(GLOBALEVENT_RECORD)) {
it.second->executeRecord(playersRecord, previousRecord);
}
updatePlayersRecord();
}
}
void Game::updatePlayersRecord() const
{
Database* db = Database::getInstance();
std::ostringstream query;
query << "UPDATE `server_config` SET `value` = '" << playersRecord << "' WHERE `config` = 'players_record'";
db->executeQuery(query.str());
}
void Game::loadPlayersRecord()
{
Database* db = Database::getInstance();
DBResult* result = db->storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'players_record'");
if (result) {
playersRecord = atoi(result->getDataString("value").c_str());
db->freeResult(result);
} else {
db->executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('players_record', '0')");
}
}
uint64_t Game::getExperienceStage(uint32_t level)
{
if (!stagesEnabled) {
return g_config.getNumber(ConfigManager::RATE_EXPERIENCE);
}
if (useLastStageLevel && level >= lastStageLevel) {
return stages[lastStageLevel];
}
return stages[level];
}
bool Game::loadExperienceStages()
{
pugi::xml_document doc;
pugi::xml_parse_result result = doc.load_file("data/XML/stages.xml");
if (!result) {
std::cout << "[Error - Game::loadExperienceStages] Failed to load data/XML/stages.xml: " << result.description() << std::endl;
return false;
}
for (pugi::xml_node stageNode = doc.child("stages").first_child(); stageNode; stageNode = stageNode.next_sibling()) {
if (strcasecmp(stageNode.name(), "config") == 0) {
stagesEnabled = stageNode.attribute("enabled").as_bool();
} else {
uint32_t minLevel, maxLevel, multiplier;
pugi::xml_attribute minLevelAttribute = stageNode.attribute("minlevel");
if (minLevelAttribute) {
minLevel = pugi::cast<uint32_t>(minLevelAttribute.value());
} else {
minLevel = 1;
}
pugi::xml_attribute maxLevelAttribute = stageNode.attribute("maxlevel");
if (maxLevelAttribute) {
maxLevel = pugi::cast<uint32_t>(maxLevelAttribute.value());
} else {
maxLevel = 0;
lastStageLevel = minLevel;
useLastStageLevel = true;
}
pugi::xml_attribute multiplierAttribute = stageNode.attribute("multiplier");
if (multiplierAttribute) {
multiplier = pugi::cast<uint32_t>(multiplierAttribute.value());
} else {
multiplier = 1;
}
if (useLastStageLevel) {
stages[lastStageLevel] = multiplier;
} else {
for (uint32_t i = minLevel; i <= maxLevel; ++i) {
stages[i] = multiplier;
}
}
}
}
return true;
}
void Game::playerInviteToParty(uint32_t playerId, uint32_t invitedId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || invitedPlayer->isInviting(player)) {
return;
}
if (invitedPlayer->getParty()) {
std::ostringstream ss;
ss << invitedPlayer->getName() << " is already in a party.";
player->sendTextMessage(MSG_INFO_DESCR, ss.str());
return;
}
Party* party = player->getParty();
if (!party) {
party = new Party(player);
} else if (party->getLeader() != player) {
return;
}
party->invitePlayer(*invitedPlayer);
}
void Game::playerJoinParty(uint32_t playerId, uint32_t leaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* leader = getPlayerByID(leaderId);
if (!leader || !leader->isInviting(player)) {
return;
}
Party* party = leader->getParty();
if (!party || party->getLeader() != leader) {
return;
}
if (player->getParty()) {
player->sendTextMessage(MSG_INFO_DESCR, "You are already in a party.");
return;
}
party->joinParty(*player);
}
void Game::playerRevokePartyInvitation(uint32_t playerId, uint32_t invitedId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || !player->isInviting(invitedPlayer)) {
return;
}
party->revokeInvitation(*invitedPlayer);
}
void Game::playerPassPartyLeadership(uint32_t playerId, uint32_t newLeaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* newLeader = getPlayerByID(newLeaderId);
if (!newLeader || !player->isPartner(newLeader)) {
return;
}
party->passPartyLeadership(newLeader);
}
void Game::playerLeaveParty(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || player->hasCondition(CONDITION_INFIGHT)) {
return;
}
party->leaveParty(player);
}
void Game::playerEnableSharedPartyExperience(uint32_t playerId, bool sharedExpActive)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || player->hasCondition(CONDITION_INFIGHT)) {
return;
}
party->setSharedExperience(player, sharedExpActive);
}
void Game::sendGuildMotd(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Guild* guild = player->getGuild();
if (guild) {
player->sendChannelMessage("Message of the Day", guild->getMotd(), SPEAK_CHANNEL_R1, CHANNEL_GUILD);
}
}
void Game::kickPlayer(uint32_t playerId, bool displayEffect)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->kickPlayer(displayEffect);
}
void Game::playerReportBug(uint32_t playerId, const std::string& bug)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->getAccountType() == ACCOUNT_TYPE_NORMAL) {
return;
}
std::string fileName = "data/reports/" + player->getName() + " report.txt";
FILE* file = fopen(fileName.c_str(), "a");
if (file) {
const Position& position = player->getPosition();
fprintf(file, "------------------------------\nName: %s [Position X: %u Y: %u Z: %u]\nBug Report: %s\n", player->getName().c_str(), position.x, position.y, position.z, bug.c_str());
fclose(file);
}
player->sendTextMessage(MSG_EVENT_DEFAULT, "Your report has been sent to " + g_config.getString(ConfigManager::SERVER_NAME) + ".");
}
void Game::playerDebugAssert(uint32_t playerId, const std::string& assertLine, const std::string& date, const std::string& description, const std::string& comment)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
// TODO: move debug assertions to database
FILE* file = fopen("client_assertions.txt", "a");
if (file) {
fprintf(file, "----- %s - %s (%s) -----\n", formatDate(time(nullptr)).c_str(), player->getName().c_str(), convertIPToString(player->getIP()).c_str());
fprintf(file, "%s\n%s\n%s\n%s\n", assertLine.c_str(), date.c_str(), description.c_str(), comment.c_str());
fclose(file);
}
}
void Game::playerLeaveMarket(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setInMarket(false);
}
void Game::playerBrowseMarket(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
if (it.wareId == 0) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
player->sendMarketDetail(it.id);
}
void Game::playerBrowseMarketOwnOffers(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getOwnOffers(MARKETACTION_BUY, player->getGUID());
const MarketOfferList& sellOffers = IOMarket::getOwnOffers(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnOffers(buyOffers, sellOffers);
}
void Game::playerBrowseMarketOwnHistory(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const HistoryMarketOfferList& buyOffers = IOMarket::getOwnHistory(MARKETACTION_BUY, player->getGUID());
const HistoryMarketOfferList& sellOffers = IOMarket::getOwnHistory(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnHistory(buyOffers, sellOffers);
}
void Game::playerCreateMarketOffer(uint32_t playerId, uint8_t type, uint16_t spriteId, uint16_t amount, uint32_t price, bool anonymous)
{
if (amount == 0 || amount > 64000) {
return;
}
if (price == 0 || price > 999999999) {
return;
}
if (type != MARKETACTION_BUY && type != MARKETACTION_SELL) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
if (g_config.getBoolean(ConfigManager::MARKET_PREMIUM) && !player->isPremium()) {
player->sendMarketLeave();
return;
}
const ItemType& itt = Item::items.getItemIdByClientId(spriteId);
if (itt.id == 0 || itt.wareId == 0) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(itt.wareId);
if (it.id == 0 || it.wareId == 0) {
return;
}
if (!it.stackable && amount > 2000) {
return;
}
const int32_t maxOfferCount = g_config.getNumber(ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER);
if (maxOfferCount > 0) {
const int32_t offerCount = IOMarket::getPlayerOfferCount(player->getGUID());
if (offerCount == -1 || offerCount >= maxOfferCount) {
return;
}
}
uint64_t fee = (price / 100.) * amount;
if (fee < 20) {
fee = 20;
} else if (fee > 1000) {
fee = 1000;
}
if (type == MARKETACTION_SELL) {
if (fee > player->bankBalance) {
return;
}
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
ItemList itemList;
uint32_t count = 0;
std::forward_list<Container*> containerList {depotChest, player->getInbox()};
bool enough = false;
do {
Container* container = containerList.front();
containerList.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containerList.push_front(c);
continue;
}
if (item->getWareID() != it.wareId) {
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (!itemType.isRune() && item->getCharges() != itemType.charges) {
continue;
}
if (item->getDuration() != itemType.decayTime) {
continue;
}
itemList.push_back(item);
count += Item::countByType(item, -1);
if (count >= amount) {
enough = true;
break;
}
}
if (enough) {
break;
}
} while (!containerList.empty());
if (!enough) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance -= fee;
} else {
uint64_t totalPrice = (uint64_t)price * amount;
totalPrice += fee;
if (totalPrice > player->bankBalance) {
return;
}
player->bankBalance -= totalPrice;
}
IOMarket::createOffer(player->getGUID(), (MarketAction_t)type, it.id, amount, price, anonymous);
player->sendMarketEnter(player->getLastDepotId());
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
}
void Game::playerCancelMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0 || offer.playerId != player->getGUID()) {
return;
}
if (offer.type == MARKETACTION_BUY) {
player->bankBalance += (uint64_t)offer.price * offer.amount;
player->sendMarketEnter(player->getLastDepotId());
} else {
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = offer.amount;
while (tmpAmount > 0) {
int32_t stackCount = std::min<int32_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RET_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < offer.amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RET_NOERROR) {
delete item;
break;
}
}
}
}
IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_CANCELLED);
offer.amount = 0;
offer.timestamp += g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
player->sendMarketCancelOffer(offer);
}
void Game::playerAcceptMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter, uint16_t amount)
{
if (amount == 0 || amount > 64000) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0) {
return;
}
if (amount > offer.amount) {
return;
}
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
uint64_t totalPrice = (uint64_t)offer.price * amount;
if (offer.type == MARKETACTION_BUY) {
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
ItemList itemList;
uint32_t count = 0;
std::forward_list<Container*> containerList {depotChest, player->getInbox()};
bool enough = false;
do {
Container* container = containerList.front();
containerList.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containerList.push_front(c);
continue;
}
if (item->getID() != it.id) {
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (!itemType.isRune() && item->getCharges() != itemType.charges) {
continue;
}
if (item->getDuration() != itemType.decayTime) {
continue;
}
itemList.push_back(item);
count += Item::countByType(item, -1);
if (count >= amount) {
enough = true;
break;
}
}
if (enough) {
break;
}
} while (!containerList.empty());
if (!enough) {
return;
}
Player* buyerPlayer = getPlayerByGUID(offer.playerId);
if (!buyerPlayer) {
buyerPlayer = new Player(nullptr);
if (!IOLoginData::loadPlayerById(buyerPlayer, offer.playerId)) {
delete buyerPlayer;
return;
}
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance += totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RET_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RET_NOERROR) {
delete item;
break;
}
}
}
if (buyerPlayer->isOffline()) {
IOLoginData::savePlayer(buyerPlayer);
delete buyerPlayer;
} else {
buyerPlayer->onReceiveMail();
}
} else {
if (totalPrice > player->bankBalance) {
return;
}
player->bankBalance -= totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RET_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RET_NOERROR) {
delete item;
break;
}
}
}
Player* sellerPlayer = getPlayerByGUID(offer.playerId);
if (sellerPlayer) {
sellerPlayer->bankBalance += totalPrice;
} else {
IOLoginData::increaseBankBalance(offer.playerId, totalPrice);
}
player->onReceiveMail();
}
const int32_t marketOfferDuration = g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
IOMarket::appendHistory(player->getGUID(), (offer.type == MARKETACTION_BUY ? MARKETACTION_SELL : MARKETACTION_BUY), offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTEDEX);
IOMarket::appendHistory(offer.playerId, offer.type, offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTED);
offer.amount -= amount;
if (offer.amount == 0) {
IOMarket::deleteOffer(offer.id);
} else {
IOMarket::acceptOffer(offer.id, amount);
}
player->sendMarketEnter(player->getLastDepotId());
offer.timestamp += marketOfferDuration;
player->sendMarketAcceptOffer(offer);
}
void Game::checkExpiredMarketOffers()
{
const ExpiredMarketOfferList& expiredBuyOffers = IOMarket::getExpiredOffers(MARKETACTION_BUY);
for (const ExpiredMarketOffer& offer : expiredBuyOffers) {
uint64_t totalPrice = (uint64_t)offer.price * offer.amount;
Player* player = getPlayerByGUID(offer.playerId);
if (player) {
player->bankBalance += totalPrice;
} else {
IOLoginData::increaseBankBalance(offer.playerId, totalPrice);
}
IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_EXPIRED);
}
const ExpiredMarketOfferList& expiredSellOffers = IOMarket::getExpiredOffers(MARKETACTION_SELL);
for (const ExpiredMarketOffer& offer : expiredSellOffers) {
Player* player = getPlayerByGUID(offer.playerId);
if (!player) {
player = new Player(nullptr);
if (!IOLoginData::loadPlayerById(player, offer.playerId)) {
delete player;
continue;
}
}
const ItemType& itemType = Item::items[offer.itemId];
if (itemType.id == 0) {
continue;
}
if (itemType.stackable) {
uint16_t tmpAmount = offer.amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(itemType.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RET_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (itemType.charges != 0) {
subType = itemType.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < offer.amount; ++i) {
Item* item = Item::CreateItem(itemType.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RET_NOERROR) {
delete item;
break;
}
}
}
if (player->isOffline()) {
IOLoginData::savePlayer(player);
delete player;
}
IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_EXPIRED);
}
int32_t checkExpiredMarketOffersEachMinutes = g_config.getNumber(ConfigManager::CHECK_EXPIRED_MARKET_OFFERS_EACH_MINUTES);
if (checkExpiredMarketOffersEachMinutes <= 0) {
return;
}
g_scheduler.addEvent(createSchedulerTask(checkExpiredMarketOffersEachMinutes * 60 * 1000, std::bind(&Game::checkExpiredMarketOffers, this)));
}
void Game::parsePlayerExtendedOpcode(uint32_t playerId, uint8_t opcode, const std::string& buffer)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
for (CreatureEvent* creatureEvent : player->getCreatureEvents(CREATURE_EVENT_EXTENDED_OPCODE)) {
creatureEvent->executeExtendedOpcode(player, opcode, buffer);
}
}
void Game::forceAddCondition(uint32_t creatureId, Condition* condition)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
delete condition;
return;
}
creature->addCondition(condition, true);
}
void Game::forceRemoveCondition(uint32_t creatureId, ConditionType_t type)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
creature->removeCondition(type, true);
}
void Game::sendOfflineTrainingDialog(Player* player)
{
if (!player) {
return;
}
if (!player->hasModalWindowOpen(offlineTrainingWindow.id)) {
player->sendModalWindow(offlineTrainingWindow);
}
}
void Game::playerAnswerModalWindow(uint32_t playerId, uint32_t modalWindowId, uint8_t button, uint8_t choice)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->hasModalWindowOpen(modalWindowId)) {
return;
}
player->onModalWindowHandled(modalWindowId);
// offline training, hardcoded
if (modalWindowId == std::numeric_limits<uint32_t>::max()) {
if (button == 1) {
if (choice == SKILL_SWORD || choice == SKILL_AXE || choice == SKILL_CLUB || choice == SKILL_DIST || choice == SKILL__MAGLEVEL) {
BedItem* bedItem = player->getBedItem();
if (bedItem && bedItem->sleep(player)) {
player->setOfflineTrainingSkill(choice);
return;
}
}
} else {
player->sendTextMessage(MSG_EVENT_ADVANCE, "Offline training aborted.");
}
player->setBedItem(nullptr);
} else {
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_MODALWINDOW)) {
creatureEvent->executeModalWindow(player, modalWindowId, button, choice);
}
}
}
void Game::addPlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames[lowercase_name] = player;
wildcardTree.insert(lowercase_name);
players[player->getID()] = player;
}
void Game::removePlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames.erase(lowercase_name);
wildcardTree.remove(lowercase_name);
players.erase(player->getID());
}
void Game::addNpc(Npc* npc)
{
npcs[npc->getID()] = npc;
}
void Game::removeNpc(Npc* npc)
{
npcs.erase(npc->getID());
}
void Game::addMonster(Monster* monster)
{
monsters[monster->getID()] = monster;
}
void Game::removeMonster(Monster* monster)
{
monsters.erase(monster->getID());
}
Guild* Game::getGuild(uint32_t id) const
{
auto it = guilds.find(id);
if (it == guilds.end()) {
return nullptr;
}
return it->second;
}
void Game::addGuild(Guild* guild)
{
guilds[guild->getId()] = guild;
}
void Game::decreaseBrowseFieldRef(const Position& pos)
{
Tile* tile = getTile(pos);
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it != browseFields.end()) {
it->second->releaseThing2();
}
}
Group* Game::getGroup(uint32_t id)
{
return groups.getGroup(id);
}
| 1 | 9,863 | A space after each argument, and an empty line above this if-statement. | otland-forgottenserver | cpp |
@@ -27,8 +27,8 @@ export function memo(c, comparer) {
this.shouldComponentUpdate = shouldUpdate;
return createElement(c, props);
}
- Memoed.prototype.isReactComponent = true;
Memoed.displayName = 'Memo(' + (c.displayName || c.name) + ')';
+ Memoed.prototype.isReactComponent = true;
Memoed._forwarded = true;
return Memoed;
} | 1 | import { createElement } from 'preact';
import { shallowDiffers } from './util';
/**
* Memoize a component, so that it only updates when the props actually have
* changed. This was previously known as `React.pure`.
* @param {import('./internal').FunctionalComponent} c functional component
* @param {(prev: object, next: object) => boolean} [comparer] Custom equality function
* @returns {import('./internal').FunctionalComponent}
*/
export function memo(c, comparer) {
function shouldUpdate(nextProps) {
let ref = this.props.ref;
let updateRef = ref == nextProps.ref;
if (!updateRef && ref) {
ref.call ? ref(null) : (ref.current = null);
}
if (!comparer) {
return shallowDiffers(this.props, nextProps);
}
return !comparer(this.props, nextProps) || !updateRef;
}
function Memoed(props) {
this.shouldComponentUpdate = shouldUpdate;
return createElement(c, props);
}
Memoed.prototype.isReactComponent = true;
Memoed.displayName = 'Memo(' + (c.displayName || c.name) + ')';
Memoed._forwarded = true;
return Memoed;
}
| 1 | 16,002 | Things like this make me wonder whether we should ship a "development" and a "production" version. I remember the discussions about making things more complex bundling wise but couldn't we omit this line in a "production" or "minimal" variant of our build? That would save us some bytes here and there. Or are we somehow able to move this line to debug? I though about monkey patching the exported function but that only works in CJS and not ESM AFAIK. | preactjs-preact | js |
@@ -6,6 +6,8 @@ namespace Datadog.Trace
Scope Activate(Span span, bool finishOnClose);
+ void AddScopeListener(IScopeListener listener);
+
void Close(Scope scope);
}
} | 1 | namespace Datadog.Trace
{
internal interface IScopeManager
{
Scope Active { get; }
Scope Activate(Span span, bool finishOnClose);
void Close(Scope scope);
}
}
| 1 | 15,092 | Maybe rename to `RegisterListener` or `RegisterSubscriber`? | DataDog-dd-trace-dotnet | .cs |
@@ -34,7 +34,8 @@ export function diffChildren(
oldDom,
isHydrating
) {
- let i, j, oldVNode, newDom, sibDom, firstChildDom, refs;
+ let i, j, oldVNode, newDom, sibDom, firstChildDom;
+ let refs = [];
// This is a compression of oldParentVNode!=null && oldParentVNode != EMPTY_OBJ && oldParentVNode._children || EMPTY_ARR
// as EMPTY_OBJ._children should be `undefined`. | 1 | import { diff, unmount, applyRef } from './index';
import { createVNode } from '../create-element';
import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { removeNode } from '../util';
import { getDomSibling } from '../component';
/**
* Diff the children of a virtual node
* @param {import('../internal').PreactElement} parentDom The DOM element whose
* children are being diffed
* @param {import('../internal').VNode} newParentVNode The new virtual
* node whose children should be diff'ed against oldParentVNode
* @param {import('../internal').VNode} oldParentVNode The old virtual
* node whose children should be diff'ed against newParentVNode
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {Node | Text} oldDom The current attached DOM
* element any new dom elements should be placed around. Likely `null` on first
* render (except when hydrating). Can be a sibling DOM element when diffing
* Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`.
* @param {boolean} isHydrating Whether or not we are in hydration
*/
export function diffChildren(
parentDom,
newParentVNode,
oldParentVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
oldDom,
isHydrating
) {
let i, j, oldVNode, newDom, sibDom, firstChildDom, refs;
// This is a compression of oldParentVNode!=null && oldParentVNode != EMPTY_OBJ && oldParentVNode._children || EMPTY_ARR
// as EMPTY_OBJ._children should be `undefined`.
let oldChildren = (oldParentVNode && oldParentVNode._children) || EMPTY_ARR;
let oldChildrenLength = oldChildren.length;
// Only in very specific places should this logic be invoked (top level `render` and `diffElementNodes`).
// I'm using `EMPTY_OBJ` to signal when `diffChildren` is invoked in these situations. I can't use `null`
// for this purpose, because `null` is a valid value for `oldDom` which can mean to skip to this logic
// (e.g. if mounting a new tree in which the old DOM should be ignored (usually for Fragments).
if (oldDom == EMPTY_OBJ) {
if (excessDomChildren != null) {
oldDom = excessDomChildren[0];
} else if (oldChildrenLength) {
oldDom = getDomSibling(oldParentVNode, 0);
} else {
oldDom = null;
}
}
i = 0;
newParentVNode._children = toChildArray(
newParentVNode._children,
childVNode => {
if (childVNode != null) {
childVNode._parent = newParentVNode;
childVNode._depth = newParentVNode._depth + 1;
// Check if we find a corresponding element in oldChildren.
// If found, delete the array item by setting to `undefined`.
// We use `undefined`, as `null` is reserved for empty placeholders
// (holes).
oldVNode = oldChildren[i];
if (
oldVNode === null ||
(oldVNode &&
childVNode.key == oldVNode.key &&
childVNode.type === oldVNode.type)
) {
oldChildren[i] = undefined;
} else {
// Either oldVNode === undefined or oldChildrenLength > 0,
// so after this loop oldVNode == null or oldVNode is a valid value.
for (j = 0; j < oldChildrenLength; j++) {
oldVNode = oldChildren[j];
// If childVNode is unkeyed, we only match similarly unkeyed nodes, otherwise we match by key.
// We always match by type (in either case).
if (
oldVNode &&
childVNode.key == oldVNode.key &&
childVNode.type === oldVNode.type
) {
oldChildren[j] = undefined;
break;
}
oldVNode = null;
}
}
oldVNode = oldVNode || EMPTY_OBJ;
// Morph the old element into the new one, but don't append it to the dom yet
newDom = diff(
parentDom,
childVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
oldDom,
isHydrating
);
if ((j = childVNode.ref) && oldVNode.ref != j) {
if (!refs) refs = [];
if (oldVNode.ref) refs.push(oldVNode.ref, null, childVNode);
refs.push(j, childVNode._component || newDom, childVNode);
}
// Only proceed if the vnode has not been unmounted by `diff()` above.
if (newDom != null) {
if (firstChildDom == null) {
firstChildDom = newDom;
}
if (childVNode._lastDomChild != null) {
// Only Fragments or components that return Fragment like VNodes will
// have a non-null _lastDomChild. Continue the diff from the end of
// this Fragment's DOM tree.
newDom = childVNode._lastDomChild;
// Eagerly cleanup _lastDomChild. We don't need to persist the value because
// it is only used by `diffChildren` to determine where to resume the diff after
// diffing Components and Fragments.
childVNode._lastDomChild = null;
} else if (
excessDomChildren == oldVNode ||
newDom != oldDom ||
newDom.parentNode == null
) {
// NOTE: excessDomChildren==oldVNode above:
// This is a compression of excessDomChildren==null && oldVNode==null!
// The values only have the same type when `null`.
outer: if (oldDom == null || oldDom.parentNode !== parentDom) {
parentDom.appendChild(newDom);
} else {
// `j<oldChildrenLength; j+=2` is an alternative to `j++<oldChildrenLength/2`
for (
sibDom = oldDom, j = 0;
(sibDom = sibDom.nextSibling) && j < oldChildrenLength;
j += 2
) {
if (sibDom == newDom) {
break outer;
}
}
parentDom.insertBefore(newDom, oldDom);
}
// Browsers will infer an option's `value` from `textContent` when
// no value is present. This essentially bypasses our code to set it
// later in `diff()`. It works fine in all browsers except for IE11
// where it breaks setting `select.value`. There it will be always set
// to an empty string. Re-applying an options value will fix that, so
// there are probably some internal data structures that aren't
// updated properly.
//
// To fix it we make sure to reset the inferred value, so that our own
// value check in `diff()` won't be skipped.
if (newParentVNode.type == 'option') {
parentDom.value = '';
}
}
oldDom = newDom.nextSibling;
if (typeof newParentVNode.type == 'function') {
// At this point, if childVNode._lastDomChild existed, then
// newDom = childVNode._lastDomChild per line 101. Else it is
// the same as childVNode._dom, meaning this component returned
// only a single DOM node
newParentVNode._lastDomChild = newDom;
}
}
}
i++;
return childVNode;
}
);
newParentVNode._dom = firstChildDom;
// Remove children that are not part of any vnode.
if (excessDomChildren != null && typeof newParentVNode.type !== 'function') {
for (i = excessDomChildren.length; i--; ) {
if (excessDomChildren[i] != null) removeNode(excessDomChildren[i]);
}
}
// Remove remaining oldChildren if there are any.
for (i = oldChildrenLength; i--; ) {
if (oldChildren[i] != null) unmount(oldChildren[i], oldChildren[i]);
}
// Set refs only after unmount
if (refs) {
for (i = 0; i < refs.length; i++) {
applyRef(refs[i], refs[++i], refs[++i]);
}
}
}
/**
* Flatten and loop through the children of a virtual node
* @param {import('../index').ComponentChildren} children The unflattened
* children of a virtual node
* @param {(vnode: import('../internal').VNode) => import('../internal').VNode} [callback]
* A function to invoke for each child before it is added to the flattened list.
* @param {Array<import('../internal').VNode | string | number>} [flattened] An flat array of children to modify
* @returns {import('../internal').VNode[]}
*/
export function toChildArray(children, callback, flattened) {
if (flattened == null) flattened = [];
if (children == null || typeof children === 'boolean') {
if (callback) flattened.push(callback(null));
} else if (Array.isArray(children)) {
for (let i = 0; i < children.length; i++) {
toChildArray(children[i], callback, flattened);
}
} else if (!callback) {
flattened.push(children);
} else if (typeof children === 'string' || typeof children === 'number') {
flattened.push(callback(createVNode(null, children, null, null)));
} else if (children._dom != null || children._component != null) {
flattened.push(
callback(createVNode(children.type, children.props, children.key, null))
);
} else {
flattened.push(callback(children));
}
return flattened;
}
| 1 | 14,563 | My one concern here is that we are adding a new array allocation for each depth of the tree which could add some non-trivial GC churn. Perhaps this would be okay if we think that refs are used through a tree commonly enough such that they'll get allocated anyway but I'm not sure refs are *that* often used. Every app has a couple, but probably not at every level of their app :/ So maybe the extra bytes are worth it in this case? Though I have no data to back up my hypothesis so I could way off lol. | preactjs-preact | js |
@@ -45,7 +45,7 @@ class TestDeterminism < Test::Unit::TestCase
def subclasses
Faker.constants.delete_if do |subclass|
- %i[Base Bank Books Cat Char Base58 ChileRut Config Creature Date Dog DragonBall Dota ElderScrolls Fallout Games GamesHalfLife HeroesOfTheStorm Internet JapaneseMedia LeagueOfLegends Movies Myst Overwatch OnePiece Pokemon SwordArtOnline TvShows Time VERSION Witcher WorldOfWarcraft Zelda].include?(subclass)
+ %i[Base Bank Books Cat Char Base58 ChileRut CLI Config Creature Date Dog DragonBall Dota ElderScrolls Fallout Games GamesHalfLife HeroesOfTheStorm Internet JapaneseMedia LeagueOfLegends Movies Myst Overwatch OnePiece Pokemon SwordArtOnline TvShows Time VERSION Witcher WorldOfWarcraft Zelda].include?(subclass)
end.sort
end
| 1 | # frozen_string_literal: true
require_relative 'test_helper'
# rubocop:disable Security/Eval,Style/EvalWithLocation
class TestDeterminism < Test::Unit::TestCase
def setup
@all_methods = all_methods.freeze
@first_run = []
end
def test_determinism
Faker::Config.random = Random.new(42)
@all_methods.each_index do |index|
store_result @all_methods[index]
end
@first_run.freeze
Faker::Config.random = Random.new(42)
@all_methods.each_index do |index|
assert deterministic_random? @first_run[index], @all_methods[index]
end
end
private
def deterministic_random?(first, method_name)
second = eval(method_name)
(first == second) || raise(
"#{method_name} has an entropy leak; use \"Faker::Config.random.rand\" or \"Array#sample(random: Faker::Config.random)\". Method to lookup for: sample, shuffle, rand"
)
end
def store_result(method_name)
@first_run << eval(method_name)
rescue StandardError => exception
raise %(#{method_name} raised "#{exception}")
end
def all_methods
subclasses.map do |subclass|
subclass_methods(subclass).flatten
end.flatten.sort
end
def subclasses
Faker.constants.delete_if do |subclass|
%i[Base Bank Books Cat Char Base58 ChileRut Config Creature Date Dog DragonBall Dota ElderScrolls Fallout Games GamesHalfLife HeroesOfTheStorm Internet JapaneseMedia LeagueOfLegends Movies Myst Overwatch OnePiece Pokemon SwordArtOnline TvShows Time VERSION Witcher WorldOfWarcraft Zelda].include?(subclass)
end.sort
end
def subclass_methods(subclass)
eval("Faker::#{subclass}.public_methods(false) - Faker::Base.public_methods(false)").sort.map do |method|
"Faker::#{subclass}.#{method}"
end.sort
end
end
# rubocop:enable Security/Eval,Style/EvalWithLocation
| 1 | 8,995 | We should get rid of this big array and think about a better way to check this. | faker-ruby-faker | rb |
@@ -67,6 +67,11 @@ namespace MvvmCross.Droid.Shared.Presenter
var serializedRequest = Serializer.Serializer.SerializeObject(request);
bundle.PutString(ViewModelRequestBundleKey, serializedRequest);
+ if (request is MvxViewModelInstanceRequest)
+ {
+ Mvx.Resolve<IMvxChildViewModelCache>().Cache(((MvxViewModelInstanceRequest)request).ViewModelInstance);
+ }
+
if (!_fragmentHostRegistrationSettings.IsActualHostValid(request.ViewModelType))
{
Type newFragmentHostViewModelType = | 1 | // MvxFragmentsPresenter.cs
// (c) Copyright Cirrious Ltd. http://www.cirrious.com
// MvvmCross is licensed using Microsoft Public License (Ms-PL)
// Contributions and inspirations noted in readme.md and license.txt
//
// Project Lead - Stuart Lodge, @slodge, [email protected]
using Android.OS;
using MvvmCross.Core.ViewModels;
using MvvmCross.Droid.Views;
using MvvmCross.Platform;
using MvvmCross.Platform.Droid.Platform;
using System;
using System.Collections.Generic;
using System.Reflection;
namespace MvvmCross.Droid.Shared.Presenter
{
public class MvxFragmentsPresenter
: MvxAndroidViewPresenter
{
public const string ViewModelRequestBundleKey = "__mvxViewModelRequest";
protected FragmentHostRegistrationSettings _fragmentHostRegistrationSettings;
protected Lazy<IMvxNavigationSerializer> _lazyNavigationSerializerFactory;
protected IMvxNavigationSerializer Serializer => _lazyNavigationSerializerFactory.Value;
public MvxFragmentsPresenter(IEnumerable<Assembly> AndroidViewAssemblies)
{
_lazyNavigationSerializerFactory = new Lazy<IMvxNavigationSerializer>(Mvx.Resolve<IMvxNavigationSerializer>);
_fragmentHostRegistrationSettings = new FragmentHostRegistrationSettings(AndroidViewAssemblies);
}
public sealed override void Show(MvxViewModelRequest request)
{
if (_fragmentHostRegistrationSettings.IsTypeRegisteredAsFragment(request.ViewModelType))
ShowFragment(request);
else
ShowActivity(request);
}
protected virtual void ShowActivity(MvxViewModelRequest request, MvxViewModelRequest fragmentRequest = null)
{
if (fragmentRequest == null)
base.Show(request);
else
Show(request, fragmentRequest);
}
public void Show(MvxViewModelRequest request, MvxViewModelRequest fragmentRequest)
{
var intent = CreateIntentForRequest(request);
if (fragmentRequest != null)
{
var converter = Mvx.Resolve<IMvxNavigationSerializer>();
var requestText = converter.Serializer.SerializeObject(fragmentRequest);
intent.PutExtra(ViewModelRequestBundleKey, requestText);
}
Show(intent);
}
protected virtual void ShowFragment(MvxViewModelRequest request)
{
var bundle = new Bundle();
var serializedRequest = Serializer.Serializer.SerializeObject(request);
bundle.PutString(ViewModelRequestBundleKey, serializedRequest);
if (!_fragmentHostRegistrationSettings.IsActualHostValid(request.ViewModelType))
{
Type newFragmentHostViewModelType =
_fragmentHostRegistrationSettings.GetFragmentHostViewModelType(request.ViewModelType);
var fragmentHostMvxViewModelRequest = MvxViewModelRequest.GetDefaultRequest(newFragmentHostViewModelType);
ShowActivity(fragmentHostMvxViewModelRequest, request);
return;
}
var mvxFragmentAttributeAssociated = _fragmentHostRegistrationSettings.GetMvxFragmentAttributeAssociatedWithCurrentHost(request.ViewModelType);
var fragmentType = _fragmentHostRegistrationSettings.GetFragmentTypeAssociatedWith(request.ViewModelType);
GetActualFragmentHost().Show(request, bundle, fragmentType, mvxFragmentAttributeAssociated);
}
public sealed override void Close(IMvxViewModel viewModel)
{
if (_fragmentHostRegistrationSettings.IsTypeRegisteredAsFragment(viewModel.GetType()))
CloseFragment(viewModel);
else
CloseActivity(viewModel);
}
protected virtual void CloseActivity(IMvxViewModel viewModel)
{
base.Close(viewModel);
}
protected virtual void CloseFragment(IMvxViewModel viewModel)
{
GetActualFragmentHost().Close(viewModel);
}
protected IMvxFragmentHost GetActualFragmentHost()
{
var currentActivity = Mvx.Resolve<IMvxAndroidCurrentTopActivity>().Activity;
var fragmentHost = currentActivity as IMvxFragmentHost;
if (fragmentHost == null)
throw new InvalidOperationException($"You are trying to close ViewModel associated with Fragment when currently top Activity ({currentActivity.GetType()} does not implement IMvxFragmentHost interface!");
return fragmentHost;
}
}
} | 1 | 12,586 | Should we cache the result of `Mvx.Resolve<IMvxChildViewModelCache>()` to avoid a lookup each time? | MvvmCross-MvvmCross | .cs |
@@ -89,7 +89,10 @@ class RequestConsumer:
self.connect_to_rabbitmq()
self.init_rabbitmq_channels()
- avg_size_of_message //= num_of_messages
+ try:
+ avg_size_of_message //= num_of_messages
+ except ZeroDivisionError:
+ current_app.logger.warn("No messages calculated", exc_info=True)
current_app.logger.info("Done!")
current_app.logger.info("Number of messages sent: {}".format(num_of_messages)) | 1 | # listenbrainz-labs
#
# Copyright (C) 2019 Param Singh <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import pika
import uuid
import json
import sys
import time
import listenbrainz_spark
import listenbrainz_spark.query_map
from datetime import datetime
from listenbrainz_spark.utils import init_rabbitmq
from flask import current_app
from py4j.protocol import Py4JJavaError
RABBITMQ_HEARTBEAT_TIME = 2 * 60 * 60 # 2 hours -- a full dump import takes 40 minutes right now
rc = None
class RequestConsumer:
def get_result(self, request):
try:
query = request['query']
params = request.get('params', {})
except Exception:
current_app.logger.error('Bad query sent to spark request consumer: %s', json.dumps(request), exc_info=True)
return None
try:
query_handler = listenbrainz_spark.query_map.get_query_handler(query)
except KeyError:
current_app.logger.error("Bad query sent to spark request consumer: %s", query, exc_info=True)
return None
except Exception as e:
current_app.logger.error("Error while mapping query to function: %s", str(e), exc_info=True)
return None
try:
return query_handler(**params)
except TypeError as e:
current_app.logger.error(
"TypeError in the query handler for query '%s', maybe bad params. Error: %s", query, str(e), exc_info=True)
return None
except Exception as e:
current_app.logger.error("Error in the query handler for query '%s': %s", query, str(e), exc_info=True)
return None
def push_to_result_queue(self, messages):
current_app.logger.debug("Pushing result to RabbitMQ...")
num_of_messages = 0
avg_size_of_message = 0
for message in messages:
num_of_messages += 1
body = json.dumps(message)
avg_size_of_message += len(body)
while message is not None:
try:
self.result_channel.basic_publish(
exchange=current_app.config['SPARK_RESULT_EXCHANGE'],
routing_key='',
body=body,
properties=pika.BasicProperties(delivery_mode=2,),
)
break
except (pika.exceptions.ConnectionClosed, pika.exceptions.ChannelClosed) as e:
current_app.logger.error('RabbitMQ Connection error while publishing results: %s', str(e), exc_info=True)
time.sleep(1)
self.rabbitmq.close()
self.connect_to_rabbitmq()
self.init_rabbitmq_channels()
avg_size_of_message //= num_of_messages
current_app.logger.info("Done!")
current_app.logger.info("Number of messages sent: {}".format(num_of_messages))
current_app.logger.info("Average size of message: {} bytes".format(avg_size_of_message))
def callback(self, channel, method, properties, body):
request = json.loads(body.decode('utf-8'))
current_app.logger.info('Received a request!')
messages = self.get_result(request)
if messages:
self.push_to_result_queue(messages)
while True:
try:
self.request_channel.basic_ack(delivery_tag=method.delivery_tag)
break
except (pika.exceptions.ConnectionClosed, pika.exceptions.ChannelClosed) as e:
current_app.logger.error('RabbitMQ Connection error when acknowledging request: %s', str(e), exc_info=True)
time.sleep(1)
self.rabbitmq.close()
self.connect_to_rabbitmq()
self.init_rabbitmq_channels()
current_app.logger.info('Request done!')
def connect_to_rabbitmq(self):
self.rabbitmq = init_rabbitmq(
username=current_app.config['RABBITMQ_USERNAME'],
password=current_app.config['RABBITMQ_PASSWORD'],
host=current_app.config['RABBITMQ_HOST'],
port=current_app.config['RABBITMQ_PORT'],
vhost=current_app.config['RABBITMQ_VHOST'],
log=current_app.logger.critical,
heartbeat=RABBITMQ_HEARTBEAT_TIME,
)
def init_rabbitmq_channels(self):
self.request_channel = self.rabbitmq.channel()
self.request_channel.exchange_declare(exchange=current_app.config['SPARK_REQUEST_EXCHANGE'], exchange_type='fanout')
self.request_channel.queue_declare(current_app.config['SPARK_REQUEST_QUEUE'], durable=True)
self.request_channel.queue_bind(
exchange=current_app.config['SPARK_REQUEST_EXCHANGE'],
queue=current_app.config['SPARK_REQUEST_QUEUE']
)
self.request_channel.basic_consume(self.callback, queue=current_app.config['SPARK_REQUEST_QUEUE'])
self.result_channel = self.rabbitmq.channel()
self.result_channel.exchange_declare(exchange=current_app.config['SPARK_RESULT_EXCHANGE'], exchange_type='fanout')
def run(self):
while True:
try:
self.connect_to_rabbitmq()
self.init_rabbitmq_channels()
current_app.logger.info('Request consumer started!')
try:
self.request_channel.start_consuming()
except pika.exceptions.ConnectionClosed as e:
current_app.logger.error('connection to rabbitmq closed: %s', str(e), exc_info=True)
self.rabbitmq.close()
continue
self.rabbitmq.close()
except Py4JJavaError as e:
current_app.logger.critical("Critical: JAVA error in spark-request consumer: %s, message: %s",
str(e), str(e.java_exception), exc_info=True)
time.sleep(2)
except Exception as e:
current_app.logger.critical("Error in spark-request-consumer: %s", str(e), exc_info=True)
time.sleep(2)
def ping(self):
""" Sends a heartbeat to rabbitmq to avoid closing the connection during long processes """
self.rabbitmq.process_data_events(0)
def main(app_name):
listenbrainz_spark.init_spark_session(app_name)
global rc
rc = RequestConsumer()
rc.run()
if __name__ == '__main__':
main('spark-writer')
| 1 | 17,500 | If you get to this line avg_size_of_message is an undefined value, yet you use it below. You you should set this value to something in the exception block. | metabrainz-listenbrainz-server | py |
@@ -0,0 +1,10 @@
+package mux
+
+import (
+ "net/http"
+ "net/http/pprof"
+)
+
+func pprofHandlers(httpMux *http.ServeMux) {
+ httpMux.HandleFunc("/debug/pprof/", pprof.Index)
+} | 1 | 1 | 9,461 | let's just fold this inline in mux.go, no need for the indirection to this new file | lyft-clutch | go |
|
@@ -31,6 +31,7 @@ import com.github.javaparser.ast.nodeTypes.NodeWithTokenRange;
import com.github.javaparser.ast.observer.AstObserver;
import com.github.javaparser.ast.observer.ObservableProperty;
import com.github.javaparser.ast.observer.PropagatingAstObserver;
+import com.github.javaparser.ast.stmt.BlockStmt;
import com.github.javaparser.ast.visitor.CloneVisitor;
import com.github.javaparser.ast.visitor.EqualsVisitor;
import com.github.javaparser.ast.visitor.HashCodeVisitor; | 1 | /*
* Copyright (C) 2007-2010 Júlio Vilmar Gesser.
* Copyright (C) 2011, 2013-2016 The JavaParser Team.
*
* This file is part of JavaParser.
*
* JavaParser can be used either under the terms of
* a) the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* b) the terms of the Apache License
*
* You should have received a copy of both licenses in LICENCE.LGPL and
* LICENCE.APACHE. Please refer to those files for details.
*
* JavaParser is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
package com.github.javaparser.ast;
import com.github.javaparser.HasParentNode;
import com.github.javaparser.Range;
import com.github.javaparser.TokenRange;
import com.github.javaparser.ast.comments.BlockComment;
import com.github.javaparser.ast.comments.Comment;
import com.github.javaparser.ast.comments.LineComment;
import com.github.javaparser.ast.nodeTypes.NodeWithRange;
import com.github.javaparser.ast.nodeTypes.NodeWithTokenRange;
import com.github.javaparser.ast.observer.AstObserver;
import com.github.javaparser.ast.observer.ObservableProperty;
import com.github.javaparser.ast.observer.PropagatingAstObserver;
import com.github.javaparser.ast.visitor.CloneVisitor;
import com.github.javaparser.ast.visitor.EqualsVisitor;
import com.github.javaparser.ast.visitor.HashCodeVisitor;
import com.github.javaparser.ast.visitor.Visitable;
import com.github.javaparser.metamodel.*;
import com.github.javaparser.printer.PrettyPrinter;
import com.github.javaparser.printer.PrettyPrinterConfiguration;
import com.github.javaparser.resolution.SymbolResolver;
import com.github.javaparser.resolution.types.ResolvedType;
import java.util.*;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import static com.github.javaparser.ast.Node.Parsedness.PARSED;
import static com.github.javaparser.ast.Node.TreeTraversal.PREORDER;
import static java.util.Collections.emptySet;
import static java.util.Collections.unmodifiableList;
import static java.util.Spliterator.DISTINCT;
import static java.util.Spliterator.NONNULL;
import com.github.javaparser.ast.Node;
import com.github.javaparser.metamodel.NodeMetaModel;
import com.github.javaparser.metamodel.JavaParserMetaModel;
/**
* Base class for all nodes of the abstract syntax tree.
* <h2>Construction</h2>
* <p>The tree is built by instantiating the required nodes, then adding them to other nodes.
* If it is the parser who is building the tree, it will use the largest constructor,
* the one with "range" as the first parameter.
* If you want to manually instantiate nodes, we suggest to...
* <ul>
* <li>use a convenience method, like "addStatement(...)", or if none are available...</li>
* <li>use a convenient constructor, like ClassOrInterfaceType(String name), or if none are available...</li>
* <li>use the default constructor.</li>
* <li>Alternatively, use one of the JavaParser.parse(snippet) methods.</li>
* </ul>
* ... and use the various methods on the node to initialize it further, if needed.
* <h2>Parent/child</h2>
* <p>The parent node field is managed automatically and can be seen as read only.
* Note that there is only one parent,
* and trying to use the same node in two places will lead to unexpected behaviour.
* It is advised to clone() a node before moving it around.
* <h2>Comments</h2>
* <p>Each Node can have one associated comment which describes it and
* a number of "orphan comments" which it contains but are not specifically
* associated to any child.
* <h2>Positions</h2>
* <p>When the parser creates nodes, it sets their source code position in the "range" field.
* When you manually instantiate nodes, their range is not set.
* The top left character is position 1, 1.
* Note that since this is an <i>abstract</i> syntax tree,
* it leaves out a lot of text from the original source file,
* like where braces or comma's are exactly.
* Therefore there is no position information on everything in the original source file.
* <h2>Observers</h2>
* <p>It is possible to add observers to the the tree.
* Any change in the tree is sent as an event to any observers watching.
* <h2>Visitors</h2>
* <p>The most comfortable way of working with an abstract syntax tree is using visitors.
* You can use one of the visitors in the visitor package, or extend one of them.
* A visitor can be "run" by calling accept on a node:
* <pre>node.accept(visitor, argument);</pre>
* where argument is an object of your choice (often simply null.)
*
* @author Julio Vilmar Gesser
*/
public abstract class Node implements Cloneable, HasParentNode<Node>, Visitable, NodeWithRange<Node>, NodeWithTokenRange<Node> {
/**
* Different registration mode for observers on nodes.
*/
public enum ObserverRegistrationMode {
/**
* Notify exclusively for changes happening on this node alone.
*/
JUST_THIS_NODE,
/**
* Notify for changes happening on this node and all its descendants existing at the moment in
* which the observer was registered. Nodes attached later will not be observed.
*/
THIS_NODE_AND_EXISTING_DESCENDANTS,
/**
* Notify for changes happening on this node and all its descendants. The descendants existing at the moment in
* which the observer was registered will be observed immediately. As new nodes are attached later they are
* automatically registered to be observed.
*/
SELF_PROPAGATING
}
public enum Parsedness {
PARSED, UNPARSABLE
}
/**
* This can be used to sort nodes on position.
*/
public static Comparator<NodeWithRange<?>> NODE_BY_BEGIN_POSITION = (a, b) -> {
if (a.getRange().isPresent() && b.getRange().isPresent()) {
return a.getRange().get().begin.compareTo(b.getRange().get().begin);
}
if (a.getRange().isPresent() || b.getRange().isPresent()) {
if (a.getRange().isPresent()) {
return 1;
}
return -1;
}
return 0;
};
private static PrettyPrinterConfiguration toStringPrettyPrinterConfiguration = new PrettyPrinterConfiguration();
protected static final PrettyPrinterConfiguration prettyPrinterNoCommentsConfiguration = new PrettyPrinterConfiguration().setPrintComments(false);
@InternalProperty
private Range range;
@InternalProperty
private TokenRange tokenRange;
@InternalProperty
private Node parentNode;
@InternalProperty
private List<Node> childNodes = new LinkedList<>();
@InternalProperty
private List<Comment> orphanComments = new LinkedList<>();
@InternalProperty
private IdentityHashMap<DataKey<?>, Object> data = null;
@OptionalProperty
private Comment comment;
@InternalProperty
private List<AstObserver> observers = new ArrayList<>();
@InternalProperty
private Parsedness parsed = PARSED;
protected Node(TokenRange tokenRange) {
setTokenRange(tokenRange);
}
/**
* Called in every constructor for node specific code.
* It can't be written in the constructor itself because it will
* be overwritten during code generation.
*/
protected void customInitialization() {
}
/**
* This is a comment associated with this node.
*
* @return comment property
*/
@Generated("com.github.javaparser.generator.core.node.PropertyGenerator")
public Optional<Comment> getComment() {
return Optional.ofNullable(comment);
}
/**
* @return the range of characters in the source code that this node covers.
*/
public Optional<Range> getRange() {
return Optional.ofNullable(range);
}
/**
* @return the range of tokens that this node covers.
*/
public Optional<TokenRange> getTokenRange() {
return Optional.ofNullable(tokenRange);
}
public Node setTokenRange(TokenRange tokenRange) {
this.tokenRange = tokenRange;
if (tokenRange == null || !(tokenRange.getBegin().getRange().isPresent() && tokenRange.getBegin().getRange().isPresent())) {
range = null;
} else {
range = new Range(tokenRange.getBegin().getRange().get().begin, tokenRange.getEnd().getRange().get().end);
}
return this;
}
/**
* @param range the range of characters in the source code that this node covers. null can be used to indicate that
* no range information is known, or that it is not of interest.
*/
public Node setRange(Range range) {
if (this.range == range) {
return this;
}
notifyPropertyChange(ObservableProperty.RANGE, this.range, range);
this.range = range;
return this;
}
/**
* Use this to store additional information to this node.
*
* @param comment to be set
*/
public Node setComment(final Comment comment) {
if (this.comment == comment) {
return this;
}
notifyPropertyChange(ObservableProperty.COMMENT, this.comment, comment);
if (this.comment != null) {
this.comment.setCommentedNode(null);
}
this.comment = comment;
if (comment != null) {
this.comment.setCommentedNode(this);
}
return this;
}
/**
* Use this to store additional information to this node.
*
* @param comment to be set
*/
public final Node setLineComment(String comment) {
return setComment(new LineComment(comment));
}
/**
* Use this to store additional information to this node.
*
* @param comment to be set
*/
public final Node setBlockComment(String comment) {
return setComment(new BlockComment(comment));
}
/**
* @return pretty printed source code for this node and its children.
* Formatting can be configured with Node.setToStringPrettyPrinterConfiguration.
*/
@Override
public final String toString() {
return new PrettyPrinter(toStringPrettyPrinterConfiguration).print(this);
}
/**
* @return pretty printed source code for this node and its children.
* Formatting can be configured with parameter prettyPrinterConfiguration.
*/
public final String toString(PrettyPrinterConfiguration prettyPrinterConfiguration) {
return new PrettyPrinter(prettyPrinterConfiguration).print(this);
}
@Override
public final int hashCode() {
return HashCodeVisitor.hashCode(this);
}
@Override
public boolean equals(final Object obj) {
if (obj == null || !(obj instanceof Node)) {
return false;
}
return EqualsVisitor.equals(this, (Node) obj);
}
@Override
public Optional<Node> getParentNode() {
return Optional.ofNullable(parentNode);
}
/**
* Contains all nodes that have this node set as their parent.
* You can add and remove nodes from this list by adding or removing nodes from the fields of this node.
*
* @return all nodes that have this node as their parent.
*/
public List<Node> getChildNodes() {
return unmodifiableList(childNodes);
}
public void addOrphanComment(Comment comment) {
orphanComments.add(comment);
comment.setParentNode(this);
}
public boolean removeOrphanComment(Comment comment) {
boolean removed = orphanComments.remove(comment);
if (removed) {
notifyPropertyChange(ObservableProperty.COMMENT, comment, null);
comment.setParentNode(null);
}
return removed;
}
/**
* This is a list of Comment which are inside the node and are not associated
* with any meaningful AST Node.
* <p>
* For example, comments at the end of methods (immediately before the parenthesis)
* or at the end of CompilationUnit are orphan comments.
* <p>
* When more than one comment preceeds a statement, the one immediately preceding it
* it is associated with the statements, while the others are orphans.
* <p>
* Changes to this list are not persisted.
*
* @return all comments that cannot be attributed to a concept
*/
public List<Comment> getOrphanComments() {
return new LinkedList<>(orphanComments);
}
/**
* This is the list of Comment which are contained in the Node either because
* they are properly associated to one of its children or because they are floating
* around inside the Node
*
* @return all Comments within the node as a list
*/
public List<Comment> getAllContainedComments() {
List<Comment> comments = new LinkedList<>();
comments.addAll(getOrphanComments());
for (Node child : getChildNodes()) {
child.getComment().ifPresent(comments::add);
comments.addAll(child.getAllContainedComments());
}
return comments;
}
/**
* Assign a new parent to this node, removing it
* from the list of children of the previous parent, if any.
*
* @param newParentNode node to be set as parent
*/
@Override
public Node setParentNode(Node newParentNode) {
if (newParentNode == parentNode) {
return this;
}
observers.forEach(o -> o.parentChange(this, parentNode, newParentNode));
// remove from old parent, if any
if (parentNode != null) {
final List<Node> parentChildNodes = parentNode.childNodes;
for (int i = 0; i < parentChildNodes.size(); i++) {
if (parentChildNodes.get(i) == this) {
parentChildNodes.remove(i);
}
}
}
parentNode = newParentNode;
// add to new parent, if any
if (parentNode != null) {
parentNode.childNodes.add(this);
}
return this;
}
protected void setAsParentNodeOf(Node childNode) {
if (childNode != null) {
childNode.setParentNode(getParentNodeForChildren());
}
}
public static final int ABSOLUTE_BEGIN_LINE = -1;
public static final int ABSOLUTE_END_LINE = -2;
public void tryAddImportToParentCompilationUnit(Class<?> clazz) {
findAncestor(CompilationUnit.class).ifPresent(p -> p.addImport(clazz));
}
/**
* Recursively finds all nodes of a certain type.
*
* @param clazz the type of node to find.
* @deprecated use {@link Node#findAll(Class)} but be aware that findAll also considers the initial node.
*/
@Deprecated
public <N extends Node> List<N> getChildNodesByType(Class<N> clazz) {
List<N> nodes = new ArrayList<>();
for (Node child : getChildNodes()) {
if (clazz.isInstance(child)) {
nodes.add(clazz.cast(child));
}
nodes.addAll(child.getChildNodesByType(clazz));
}
return nodes;
}
/**
* @deprecated use {@link Node#findAll(Class)} but be aware that findAll also considers the initial node.
*/
@Deprecated
public <N extends Node> List<N> getNodesByType(Class<N> clazz) {
return getChildNodesByType(clazz);
}
/**
* Gets data for this node using the given key.
*
* @param <M> The type of the data.
* @param key The key for the data
* @return The data.
* @throws IllegalStateException if the key was not set in this node.
* @see Node#containsData(DataKey)
* @see DataKey
*/
@SuppressWarnings("unchecked")
public <M> M getData(final DataKey<M> key) {
if (data == null) {
throw new IllegalStateException("No data of this type found. Use containsData to check for this first.");
}
M value = (M) data.get(key);
if (value == null) {
throw new IllegalStateException("No data of this type found. Use containsData to check for this first.");
}
return value;
}
/**
* This method was added to support the clone method.
*
* @return all known data keys.
*/
public Set<DataKey<?>> getDataKeys() {
if (data == null) {
return emptySet();
}
return data.keySet();
}
/**
* Sets data for this node using the given key.
* For information on creating DataKey, see {@link DataKey}.
*
* @param <M> The type of data
* @param key The singleton key for the data
* @param object The data object
* @see DataKey
*/
public <M> void setData(DataKey<M> key, M object) {
if (data == null) {
data = new IdentityHashMap<>();
}
data.put(key, object);
}
/**
* @return does this node have data for this key?
* @see DataKey
*/
public boolean containsData(DataKey<?> key) {
if (data == null) {
return false;
}
return data.containsKey(key);
}
/**
* Remove data by key.
*
* @see DataKey
*/
public void removeData(DataKey<ResolvedType> key) {
if (data != null) {
data.remove(key);
}
}
/**
* Try to remove this node from the parent
*
* @return true if removed, false if it is a required property of the parent, or if the parent isn't set.
* @throws RuntimeException if it fails in an unexpected way
*/
public boolean remove() {
if (parentNode == null) {
return false;
}
return parentNode.remove(this);
}
/**
* Try to replace this node in the parent with the supplied node.
*
* @return true if removed, or if the parent isn't set.
* @throws RuntimeException if it fails in an unexpected way
*/
public boolean replace(Node node) {
if (parentNode == null) {
return false;
}
return parentNode.replace(this, node);
}
/**
* Forcibly removes this node from the AST.
* If it cannot be removed from the parent with remove(),
* it will try to remove its parent instead,
* until it finds a node that can be removed,
* or no parent can be found.
* <p>
* Since everything at CompilationUnit level is removable,
* this method will only (silently) fail when the node is in a detached AST fragment.
*/
public void removeForced() {
if (!remove()) {
getParentNode().ifPresent(Node::remove);
}
}
@Override
public Node getParentNodeForChildren() {
return this;
}
protected void setAsParentNodeOf(NodeList<? extends Node> list) {
if (list != null) {
list.setParentNode(getParentNodeForChildren());
}
}
public <P> void notifyPropertyChange(ObservableProperty property, P oldValue, P newValue) {
this.observers.forEach(o -> o.propertyChange(this, property, oldValue, newValue));
}
@Override
public void unregister(AstObserver observer) {
this.observers.remove(observer);
}
@Override
public void register(AstObserver observer) {
this.observers.add(observer);
}
/**
* Register a new observer for the given node. Depending on the mode specified also descendants, existing
* and new, could be observed. For more details see <i>ObserverRegistrationMode</i>.
*/
public void register(AstObserver observer, ObserverRegistrationMode mode) {
if (mode == null) {
throw new IllegalArgumentException("Mode should be not null");
}
switch(mode) {
case JUST_THIS_NODE:
register(observer);
break;
case THIS_NODE_AND_EXISTING_DESCENDANTS:
registerForSubtree(observer);
break;
case SELF_PROPAGATING:
registerForSubtree(PropagatingAstObserver.transformInPropagatingObserver(observer));
break;
default:
throw new UnsupportedOperationException("This mode is not supported: " + mode);
}
}
/**
* Register the observer for the current node and all the contained node and nodelists, recursively.
*/
public void registerForSubtree(AstObserver observer) {
register(observer);
this.getChildNodes().forEach(c -> c.registerForSubtree(observer));
for (PropertyMetaModel property : getMetaModel().getAllPropertyMetaModels()) {
if (property.isNodeList()) {
NodeList<?> nodeList = (NodeList<?>) property.getValue(this);
if (nodeList != null)
nodeList.register(observer);
}
}
}
@Override
public boolean isRegistered(AstObserver observer) {
return this.observers.contains(observer);
}
@Generated("com.github.javaparser.generator.core.node.RemoveMethodGenerator")
public boolean remove(Node node) {
if (node == null)
return false;
if (comment != null) {
if (node == comment) {
removeComment();
return true;
}
}
return false;
}
@Generated("com.github.javaparser.generator.core.node.RemoveMethodGenerator")
public Node removeComment() {
return setComment((Comment) null);
}
@Override
@Generated("com.github.javaparser.generator.core.node.CloneGenerator")
public Node clone() {
return (Node) accept(new CloneVisitor(), null);
}
/**
* @return get JavaParser specific node introspection information.
*/
@Generated("com.github.javaparser.generator.core.node.GetMetaModelGenerator")
public NodeMetaModel getMetaModel() {
return JavaParserMetaModel.nodeMetaModel;
}
/**
* @return whether this node was successfully parsed or not.
* If it was not, only the range and tokenRange fields will be valid.
*/
public Parsedness getParsed() {
return parsed;
}
/**
* Used by the parser to flag unparsable nodes.
*/
public Node setParsed(Parsedness parsed) {
this.parsed = parsed;
return this;
}
public static PrettyPrinterConfiguration getToStringPrettyPrinterConfiguration() {
return toStringPrettyPrinterConfiguration;
}
public static void setToStringPrettyPrinterConfiguration(PrettyPrinterConfiguration toStringPrettyPrinterConfiguration) {
Node.toStringPrettyPrinterConfiguration = toStringPrettyPrinterConfiguration;
}
@Generated("com.github.javaparser.generator.core.node.ReplaceMethodGenerator")
public boolean replace(Node node, Node replacementNode) {
if (node == null)
return false;
if (comment != null) {
if (node == comment) {
setComment((Comment) replacementNode);
return true;
}
}
return false;
}
/**
* Finds the root node of this AST by finding the topmost parent.
*/
public Node findRootNode() {
Node n = this;
while (n.getParentNode().isPresent()) {
n = n.getParentNode().get();
}
return n;
}
/**
* @return the containing CompilationUnit, or empty if this node is not inside a compilation unit.
*/
public Optional<CompilationUnit> findCompilationUnit() {
Node rootNode = findRootNode();
if (rootNode instanceof CompilationUnit) {
return Optional.of((CompilationUnit) rootNode);
}
return Optional.empty();
}
protected SymbolResolver getSymbolResolver() {
return findCompilationUnit().map(cu -> {
SymbolResolver symbolResolver = cu.getData(SYMBOL_RESOLVER_KEY);
if (symbolResolver == null) {
throw new IllegalStateException("Symbol resolution not configured: to configure consider setting a SymbolResolver in the ParserConfiguration");
}
return symbolResolver;
}).orElseThrow(() -> new IllegalStateException("The node is not inserted in a CompilationUnit"));
}
// We need to expose it because we will need to use it to inject the SymbolSolver
public static final DataKey<SymbolResolver> SYMBOL_RESOLVER_KEY = new DataKey<SymbolResolver>() {
};
public enum TreeTraversal {
PREORDER, BREADTHFIRST, POSTORDER, PARENTS, DIRECT_CHILDREN
}
private Iterator<Node> treeIterator(TreeTraversal traversal) {
switch(traversal) {
case BREADTHFIRST:
return new BreadthFirstIterator(this);
case POSTORDER:
return new PostOrderIterator(this);
case PREORDER:
return new PreOrderIterator(this);
case DIRECT_CHILDREN:
return new DirectChildrenIterator(this);
case PARENTS:
return new ParentsVisitor(this);
default:
throw new IllegalArgumentException("Unknown traversal choice.");
}
}
private Iterable<Node> treeIterable(TreeTraversal traversal) {
return () -> treeIterator(traversal);
}
/**
* Make a stream of nodes using traversal algorithm "traversal".
*/
public Stream<Node> stream(TreeTraversal traversal) {
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(treeIterator(traversal), NONNULL | DISTINCT), false);
}
/**
* Make a stream of nodes using pre-order traversal.
*/
public Stream<Node> stream() {
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(treeIterator(PREORDER), NONNULL | DISTINCT), false);
}
/**
* Walks the AST, calling the consumer for every node, with traversal algorithm "traversal".
* <br/>This is the most general walk method. All other walk and findAll methods are based on this.
*/
public void walk(TreeTraversal traversal, Consumer<Node> consumer) {
// Could be implemented as a call to the above walk method, but this is a little more efficient.
for (Node node : treeIterable(traversal)) {
consumer.accept(node);
}
}
/**
* Walks the AST, calling the consumer for every node with pre-order traversal.
*/
public void walk(Consumer<Node> consumer) {
walk(PREORDER, consumer);
}
/**
* Walks the AST with pre-order traversal, calling the consumer for every node of type "nodeType".
*/
public <T extends Node> void walk(Class<T> nodeType, Consumer<T> consumer) {
walk(TreeTraversal.PREORDER, node -> {
if (nodeType.isAssignableFrom(node.getClass())) {
consumer.accept(nodeType.cast(node));
}
});
}
/**
* Walks the AST with pre-order traversal, returning all nodes of type "nodeType".
*/
public <T extends Node> List<T> findAll(Class<T> nodeType) {
final List<T> found = new ArrayList<>();
walk(nodeType, found::add);
return found;
}
/**
* Walks the AST with pre-order traversal, returning all nodes of type "nodeType" that match the predicate.
*/
public <T extends Node> List<T> findAll(Class<T> nodeType, Predicate<T> predicate) {
final List<T> found = new ArrayList<>();
walk(nodeType, n -> {
if (predicate.test(n))
found.add(n);
});
return found;
}
/**
* Walks the AST, applying the function for every node, with traversal algorithm "traversal". If the function
* returns something else than null, the traversal is stopped and the function result is returned. <br/>This is the
* most general findFirst method. All other findFirst methods are based on this.
*/
public <T> Optional<T> findFirst(TreeTraversal traversal, Function<Node, Optional<T>> consumer) {
for (Node node : treeIterable(traversal)) {
final Optional<T> result = consumer.apply(node);
if (result.isPresent()) {
return result;
}
}
return Optional.empty();
}
/**
* Walks the AST with pre-order traversal, returning the first node of type "nodeType" or empty() if none is found.
*/
public <N extends Node> Optional<N> findFirst(Class<N> nodeType) {
return findFirst(TreeTraversal.PREORDER, node -> {
if (nodeType.isAssignableFrom(node.getClass())) {
return Optional.of(nodeType.cast(node));
}
return Optional.empty();
});
}
/**
* Walks the AST with pre-order traversal, returning the first node of type "nodeType" that matches "predicate" or empty() if none is
* found.
*/
public <N extends Node> Optional<N> findFirst(Class<N> nodeType, Predicate<N> predicate) {
return findFirst(TreeTraversal.PREORDER, node -> {
if (nodeType.isAssignableFrom(node.getClass())) {
final N castNode = nodeType.cast(node);
if (predicate.test(castNode)) {
return Optional.of(castNode);
}
}
return Optional.empty();
});
}
/**
* Determines whether this node is an ancestor of the given node. A node is <i>not</i> an ancestor of itself.
*
* @param descendant the node for which to determine whether it has this node as an ancestor.
* @return {@code true} if this node is an ancestor of the given node, and {@code false} otherwise.
* @see HasParentNode#isDescendantOf(Node)
*/
public boolean isAncestorOf(Node descendant) {
return this != descendant && findFirst(Node.class, n -> n == descendant).isPresent();
}
/**
* Performs a breadth-first node traversal starting with a given node.
*
* @see <a href="https://en.wikipedia.org/wiki/Breadth-first_search">Breadth-first traversal</a>
*/
public static class BreadthFirstIterator implements Iterator<Node> {
private final Queue<Node> queue = new LinkedList<>();
public BreadthFirstIterator(Node node) {
queue.add(node);
}
@Override
public boolean hasNext() {
return !queue.isEmpty();
}
@Override
public Node next() {
Node next = queue.remove();
queue.addAll(next.getChildNodes());
return next;
}
}
/**
* Performs a simple traversal over all nodes that have the passed node as their parent.
*/
public static class DirectChildrenIterator implements Iterator<Node> {
private final Iterator<Node> childrenIterator;
public DirectChildrenIterator(Node node) {
childrenIterator = new ArrayList<>(node.getChildNodes()).iterator();
}
@Override
public boolean hasNext() {
return childrenIterator.hasNext();
}
@Override
public Node next() {
return childrenIterator.next();
}
}
/**
* Iterates over the parent of the node, then the parent's parent, then the parent's parent's parent, until running
* out of parents.
*/
public static class ParentsVisitor implements Iterator<Node> {
private Node node;
public ParentsVisitor(Node node) {
this.node = node;
}
@Override
public boolean hasNext() {
return node.getParentNode().isPresent();
}
@Override
public Node next() {
node = node.getParentNode().orElse(null);
return node;
}
}
/**
* Performs a pre-order (or depth-first) node traversal starting with a given node.
*
* @see <a href="https://en.wikipedia.org/wiki/Pre-order">Pre-order traversal</a>
*/
public static class PreOrderIterator implements Iterator<Node> {
private final Stack<Node> stack = new Stack<>();
public PreOrderIterator(Node node) {
stack.add(node);
}
@Override
public boolean hasNext() {
return !stack.isEmpty();
}
@Override
public Node next() {
Node next = stack.pop();
List<Node> children = next.getChildNodes();
for (int i = children.size() - 1; i >= 0; i--) {
stack.add(children.get(i));
}
return next;
}
}
/**
* Performs a post-order (or leaves-first) node traversal starting with a given node.
*
* @see <a href="https://en.wikipedia.org/wiki/Post-order">Post-order traversal</a>
*/
public static class PostOrderIterator implements Iterator<Node> {
private final Stack<List<Node>> nodesStack = new Stack<>();
private final Stack<Integer> cursorStack = new Stack<>();
private final Node root;
private boolean hasNext = true;
public PostOrderIterator(Node root) {
this.root = root;
fillStackToLeaf(root);
}
private void fillStackToLeaf(Node node) {
while (true) {
List<Node> childNodes = new ArrayList<>(node.getChildNodes());
if (childNodes.isEmpty()) {
break;
}
nodesStack.push(childNodes);
cursorStack.push(0);
node = childNodes.get(0);
}
}
@Override
public boolean hasNext() {
return hasNext;
}
@Override
public Node next() {
final List<Node> nodes = nodesStack.peek();
final int cursor = cursorStack.peek();
final boolean levelHasNext = cursor < nodes.size();
if (levelHasNext) {
Node node = nodes.get(cursor);
fillStackToLeaf(node);
return nextFromLevel();
} else {
nodesStack.pop();
cursorStack.pop();
hasNext = !nodesStack.empty();
if (hasNext) {
return nextFromLevel();
}
return root;
}
}
private Node nextFromLevel() {
final List<Node> nodes = nodesStack.peek();
final int cursor = cursorStack.pop();
cursorStack.push(cursor + 1);
return nodes.get(cursor);
}
}
}
| 1 | 13,723 | This bit confused me just now as I was reading through -- turns out this is due to the `{@link}` on line 316. Reading up on this, the alternative seems to be to state the fully qualified name instead. Happy to edit if requested as I don't have a strong view either way (perhaps a small leaning towards keeping the import). | javaparser-javaparser | java |
@@ -159,6 +159,18 @@ public class CPPTokenizerTest {
tokenizer.tokenize(code, new Tokens());
}
+ @Test
+ public void testDigitSeparators() {
+ final String code = "auto integer_literal = 1'000'000;" + PMD.EOL
+ + "auto floating_point_literal = 0.000'015'3;" + PMD.EOL
+ + "auto hex_literal = 0x0F00'abcd'6f3d;" + PMD.EOL
+ + "auto silly_example = 1'0'0'000'00;";
+ Tokens tokens = parse(code);
+ System.out.println(tokens.getTokens());
+ assertTrue(TokenEntry.getEOF() != tokens.getTokens().get(0));
+ assertEquals(21, tokens.size());
+ }
+
private Tokens parse(String snippet) {
try {
return parse(snippet, false, new Tokens()); | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.cpd;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Properties;
import org.apache.commons.io.IOUtils;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import net.sourceforge.pmd.PMD;
import net.sourceforge.pmd.lang.ast.TokenMgrError;
public class CPPTokenizerTest {
@Rule
public ExpectedException expectedException = ExpectedException.none();
@Test
public void testUTFwithBOM() {
Tokens tokens = parse("\ufeffint start()\n{ int ret = 1;\nreturn ret;\n}\n");
assertNotSame(TokenEntry.getEOF(), tokens.getTokens().get(0));
assertEquals(15, tokens.size());
}
@Test
public void testUnicodeSupport() {
String code = "\ufeff" + "#include <iostream>\n" + "#include <string>\n" + "\n" + "// example\n" + "\n"
+ "int main()\n" + "{\n" + " std::string text(\"ąęćśźńó\");\n" + " std::cout << text;\n"
+ " return 0;\n" + "}\n";
Tokens tokens = parse(code);
assertNotSame(TokenEntry.getEOF(), tokens.getTokens().get(0));
assertEquals(24, tokens.size());
}
@Test
public void testIgnoreBetweenSpecialComments() {
String code = "#include <iostream>\n" + "#include <string>\n" + "\n" + "// CPD-OFF\n"
+ "int main()\n" + "{\n" + " std::string text(\"ąęćśźńó\");\n" + " std::cout << text;\n"
+ " return 0;\n" + "// CPD-ON\n" + "}\n";
Tokens tokens = parse(code);
assertNotSame(TokenEntry.getEOF(), tokens.getTokens().get(0));
assertEquals(2, tokens.size()); // "}" + EOF
}
@Test
public void testMultiLineMacros() {
Tokens tokens = parse(TEST1);
assertEquals(7, tokens.size());
}
@Test
public void testDollarSignInIdentifier() {
parse(TEST2);
}
@Test
public void testDollarSignStartingIdentifier() {
parse(TEST3);
}
@Test
public void testWideCharacters() {
parse(TEST4);
}
@Test
public void testTokenizerWithSkipBlocks() throws Exception {
String test = IOUtils.toString(CPPTokenizerTest.class.getResourceAsStream("cpp/cpp_with_asm.cpp"), StandardCharsets.UTF_8);
Tokens tokens = parse(test, true, new Tokens());
assertEquals(19, tokens.size());
}
@Test
public void testTokenizerWithSkipBlocksPattern() throws Exception {
String test = IOUtils.toString(CPPTokenizerTest.class.getResourceAsStream("cpp/cpp_with_asm.cpp"), StandardCharsets.UTF_8);
Tokens tokens = new Tokens();
try {
parse(test, true, "#if debug|#endif", tokens);
} catch (TokenMgrError ignored) {
// ignored
}
assertEquals(31, tokens.size());
}
@Test
public void testTokenizerWithoutSkipBlocks() throws Exception {
String test = IOUtils.toString(CPPTokenizerTest.class.getResourceAsStream("cpp/cpp_with_asm.cpp"), StandardCharsets.UTF_8);
Tokens tokens = new Tokens();
try {
parse(test, false, tokens);
} catch (TokenMgrError ignored) {
// ignored
}
assertEquals(37, tokens.size());
}
@Test
// ASM code containing the '@' character
public void testAsmWithAtSign() {
Tokens tokens = parse(TEST7);
assertEquals(22, tokens.size());
}
@Test
public void testEOLCommentInPreprocessingDirective() {
parse("#define LSTFVLES_CPP //*" + PMD.EOL);
}
@Test
public void testEmptyCharacter() {
Tokens tokens = parse("std::wstring wsMessage( sMessage.length(), L'');" + PMD.EOL);
assertEquals(15, tokens.size());
}
@Test
public void testHexCharacter() {
Tokens tokens = parse("if (*pbuf == '\\0x05')" + PMD.EOL);
assertEquals(8, tokens.size());
}
@Test
public void testWhiteSpaceEscape() {
Tokens tokens = parse("szPath = m_sdcacheDir + _T(\"\\ oMedia\");" + PMD.EOL);
assertEquals(10, tokens.size());
}
@Test
public void testRawStringLiteral() {
String code = "const char* const KDefaultConfig = R\"(\n" + " [Sinks.1]\n" + " Destination=Console\n"
+ " AutoFlush=true\n"
+ " Format=\"[%TimeStamp%] %ThreadId% %QueryIdHigh% %QueryIdLow% %LoggerFile%:%Line% (%Severity%) - %Message%\"\n"
+ " Filter=\"%Severity% >= WRN\"\n" + ")\";\n";
Tokens tokens = parse(code);
assertTrue(TokenEntry.getEOF() != tokens.getTokens().get(0));
assertEquals(9, tokens.size());
}
@Test
public void testLexicalErrorFilename() throws Exception {
Properties properties = new Properties();
properties.setProperty(Tokenizer.OPTION_SKIP_BLOCKS, Boolean.toString(false));
String test = IOUtils.toString(CPPTokenizerTest.class.getResourceAsStream("cpp/issue-1559.cpp"), StandardCharsets.UTF_8);
SourceCode code = new SourceCode(new SourceCode.StringCodeLoader(test, "issue-1559.cpp"));
CPPTokenizer tokenizer = new CPPTokenizer();
tokenizer.setProperties(properties);
expectedException.expect(TokenMgrError.class);
expectedException.expectMessage("Lexical error in file issue-1559.cpp at");
tokenizer.tokenize(code, new Tokens());
}
private Tokens parse(String snippet) {
try {
return parse(snippet, false, new Tokens());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private Tokens parse(String snippet, boolean skipBlocks, Tokens tokens) throws IOException {
return parse(snippet, skipBlocks, null, tokens);
}
private Tokens parse(String snippet, boolean skipBlocks, String skipPattern, Tokens tokens) throws IOException {
Properties properties = new Properties();
properties.setProperty(Tokenizer.OPTION_SKIP_BLOCKS, Boolean.toString(skipBlocks));
if (skipPattern != null) {
properties.setProperty(Tokenizer.OPTION_SKIP_BLOCKS_PATTERN, skipPattern);
}
CPPTokenizer tokenizer = new CPPTokenizer();
tokenizer.setProperties(properties);
SourceCode code = new SourceCode(new SourceCode.StringCodeLoader(snippet));
tokenizer.tokenize(code, tokens);
return tokens;
}
private static final String TEST1 = "#define FOO a +\\" + PMD.EOL + " b +\\" + PMD.EOL
+ " c +\\" + PMD.EOL + " d +\\" + PMD.EOL + " e +\\" + PMD.EOL
+ " f +\\" + PMD.EOL + " g" + PMD.EOL + " void main() {}";
private static final String TEST2 = " void main() { int x$y = 42; }";
private static final String TEST3 = " void main() { int $x = 42; }";
private static final String TEST4 = " void main() { char x = L'a'; }";
private static final String TEST7 = "asm void eSPI_boot()" + PMD.EOL + "{" + PMD.EOL + " // setup stack pointer"
+ PMD.EOL + " lis r1, _stack_addr@h" + PMD.EOL + " ori r1, r1, _stack_addr@l" + PMD.EOL + "}";
}
| 1 | 15,938 | There's a unnecessary System.out. | pmd-pmd | java |
@@ -532,6 +532,15 @@ func (c *client) InstallGatewayFlows() error {
// Add flow to ensure the liveness check packet could be forwarded correctly.
flows = append(flows, c.localProbeFlow(gatewayIPs, cookie.Default)...)
flows = append(flows, c.ctRewriteDstMACFlows(gatewayConfig.MAC, cookie.Default)...)
+ if c.enableProxy {
+ flows = append(flows, c.arpNodePortVirtualResponderFlow())
+ if gatewayConfig.IPv6 != nil {
+ flows = append(flows, c.serviceGatewayFlow(true))
+ }
+ if gatewayConfig.IPv4 != nil {
+ flows = append(flows, c.serviceGatewayFlow(false))
+ }
+ }
// In NoEncap , no traffic from tunnel port
if c.encapMode.SupportsEncap() {
flows = append(flows, c.l3FwdFlowToGateway(gatewayIPs, gatewayConfig.MAC, cookie.Default)...) | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openflow
import (
"errors"
"fmt"
"math/rand"
"net"
"github.com/contiv/libOpenflow/protocol"
"github.com/contiv/ofnet/ofctrl"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/openflow/cookie"
"github.com/vmware-tanzu/antrea/pkg/agent/types"
"github.com/vmware-tanzu/antrea/pkg/agent/util"
binding "github.com/vmware-tanzu/antrea/pkg/ovs/openflow"
"github.com/vmware-tanzu/antrea/third_party/proxy"
)
const maxRetryForOFSwitch = 5
// Client is the interface to program OVS flows for entity connectivity of Antrea.
type Client interface {
// Initialize sets up all basic flows on the specific OVS bridge. It returns a channel which
// is used to notify the caller in case of a reconnection, in which case ReplayFlows should
// be called to ensure that the set of OVS flows is correct. All flows programmed in the
// switch which match the current round number will be deleted before any new flow is
// installed.
Initialize(roundInfo types.RoundInfo, config *config.NodeConfig, encapMode config.TrafficEncapModeType) (<-chan struct{}, error)
// InstallGatewayFlows sets up flows related to an OVS gateway port, the gateway must exist.
InstallGatewayFlows() error
// InstallClusterServiceCIDRFlows sets up the appropriate flows so that traffic can reach
// the different Services running in the Cluster. This method needs to be invoked once with
// the Cluster Service CIDR as a parameter.
InstallClusterServiceCIDRFlows(serviceNets []*net.IPNet) error
// InstallClusterServiceFlows sets up the appropriate flows so that traffic can reach
// the different Services running in the Cluster. This method needs to be invoked once.
InstallClusterServiceFlows() error
// InstallDefaultTunnelFlows sets up the classification flow for the default (flow based) tunnel.
InstallDefaultTunnelFlows() error
// InstallNodeFlows should be invoked when a connection to a remote Node is going to be set
// up. The hostname is used to identify the added flows. When IPSec tunnel is enabled,
// ipsecTunOFPort must be set to the OFPort number of the IPSec tunnel port to the remote Node;
// otherwise ipsecTunOFPort must be set to 0.
// InstallNodeFlows has all-or-nothing semantics(call succeeds if all the flows are installed
// successfully, otherwise no flows will be installed). Calls to InstallNodeFlows are idempotent.
// Concurrent calls to InstallNodeFlows and / or UninstallNodeFlows are supported as long as they
// are all for different hostnames.
InstallNodeFlows(
hostname string,
peerConfigs map[*net.IPNet]net.IP,
tunnelPeerIP net.IP,
ipsecTunOFPort uint32) error
// UninstallNodeFlows removes the connection to the remote Node specified with the
// hostname. UninstallNodeFlows will do nothing if no connection to the host was established.
UninstallNodeFlows(hostname string) error
// InstallPodFlows should be invoked when a connection to a Pod on current Node. The
// interfaceName is used to identify the added flows. InstallPodFlows has all-or-nothing
// semantics(call succeeds if all the flows are installed successfully, otherwise no
// flows will be installed). Calls to InstallPodFlows are idempotent. Concurrent calls
// to InstallPodFlows and / or UninstallPodFlows are supported as long as they are all
// for different interfaceNames.
InstallPodFlows(interfaceName string, podInterfaceIPs []net.IP, podInterfaceMAC net.HardwareAddr, ofPort uint32) error
// UninstallPodFlows removes the connection to the local Pod specified with the
// interfaceName. UninstallPodFlows will do nothing if no connection to the Pod was established.
UninstallPodFlows(interfaceName string) error
// InstallServiceGroup installs a group for Service LB. Each endpoint
// is a bucket of the group. For now, each bucket has the same weight.
InstallServiceGroup(groupID binding.GroupIDType, withSessionAffinity bool, endpoints []proxy.Endpoint) error
// UninstallServiceGroup removes the group and its buckets that are
// installed by InstallServiceGroup.
UninstallServiceGroup(groupID binding.GroupIDType) error
// InstallEndpointFlows installs flows for accessing Endpoints.
// If an Endpoint is on the current Node, then flows for hairpin and endpoint
// L2 forwarding should also be installed.
InstallEndpointFlows(protocol binding.Protocol, endpoints []proxy.Endpoint, isIPv6 bool) error
// UninstallEndpointFlows removes flows of the Endpoint installed by
// InstallEndpointFlows.
UninstallEndpointFlows(protocol binding.Protocol, endpoint proxy.Endpoint) error
// InstallServiceFlows installs flows for accessing Service with clusterIP.
// It installs the flow that uses the group/bucket to do service LB. If the
// affinityTimeout is not zero, it also installs the flow which has a learn
// action to maintain the LB decision.
// The group with the groupID must be installed before, otherwise the
// installation will fail.
InstallServiceFlows(groupID binding.GroupIDType, svcIP net.IP, svcPort uint16, protocol binding.Protocol, affinityTimeout uint16) error
// UninstallServiceFlows removes flows installed by InstallServiceFlows.
UninstallServiceFlows(svcIP net.IP, svcPort uint16, protocol binding.Protocol) error
// InstallLoadBalancerServiceFromOutsideFlows installs flows for LoadBalancer Service traffic from outside node.
// The traffic is received from uplink port and will be forwarded to gateway by the installed flows. And then
// kube-proxy will handle the traffic.
// This function is only used for Windows platform.
InstallLoadBalancerServiceFromOutsideFlows(svcIP net.IP, svcPort uint16, protocol binding.Protocol) error
// UninstallLoadBalancerServiceFromOutsideFlows removes flows installed by InstallLoadBalancerServiceFromOutsideFlows.
UninstallLoadBalancerServiceFromOutsideFlows(svcIP net.IP, svcPort uint16, protocol binding.Protocol) error
// GetFlowTableStatus should return an array of flow table status, all existing flow tables should be included in the list.
GetFlowTableStatus() []binding.TableStatus
// InstallPolicyRuleFlows installs flows for a new NetworkPolicy rule. Rule should include all fields in the
// NetworkPolicy rule. Each ingress/egress policy rule installs Openflow entries on two tables, one for
// ruleTable and the other for dropTable. If a packet does not pass the ruleTable, it will be dropped by the
// dropTable.
InstallPolicyRuleFlows(ofPolicyRule *types.PolicyRule) error
// BatchInstallPolicyRuleFlows installs multiple flows for NetworkPolicy rules in batch.
BatchInstallPolicyRuleFlows(ofPolicyRules []*types.PolicyRule) error
// UninstallPolicyRuleFlows removes the Openflow entry relevant to the specified NetworkPolicy rule.
// It also returns a slice of stale ofPriorities used by ClusterNetworkPolicies.
// UninstallPolicyRuleFlows will do nothing if no Openflow entry for the rule is installed.
UninstallPolicyRuleFlows(ruleID uint32) ([]string, error)
// AddPolicyRuleAddress adds one or multiple addresses to the specified NetworkPolicy rule. If addrType is true, the
// addresses are added to PolicyRule.From, else to PolicyRule.To.
AddPolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address, priority *uint16) error
// DeletePolicyRuleAddress removes addresses from the specified NetworkPolicy rule. If addrType is srcAddress, the addresses
// are removed from PolicyRule.From, else from PolicyRule.To.
DeletePolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address, priority *uint16) error
// InstallBridgeUplinkFlows installs Openflow flows between bridge local port and uplink port to support
// host networking.
// This function is only used for Windows platform.
InstallBridgeUplinkFlows() error
// InstallExternalFlows sets up flows to enable Pods to communicate to the external IP addresses. The corresponding
// OpenFlow entries include: 1) identify the packets from local Pods to the external IP address, 2) mark the traffic
// in the connection tracking context, and 3) SNAT the packets with Node IP.
// This function is only used for Windows platform.
InstallExternalFlows() error
// Disconnect disconnects the connection between client and OFSwitch.
Disconnect() error
// IsConnected returns the connection status between client and OFSwitch. The return value is true if the OFSwitch is connected.
IsConnected() bool
// ReplayFlows should be called when a spurious disconnection occurs. After we reconnect to
// the OFSwitch, we need to replay all the flows cached by the client. ReplayFlows will try
// to replay as many flows as possible, and will log an error when a flow cannot be
// installed.
ReplayFlows()
// DeleteStaleFlows deletes all flows from the previous round which are no longer needed. It
// should be called by the agent after all required flows have been installed / updated with
// the new round number.
DeleteStaleFlows() error
// GetTunnelVirtualMAC() returns globalVirtualMAC used for tunnel traffic.
GetTunnelVirtualMAC() net.HardwareAddr
// GetPodFlowKeys returns the keys (match strings) of the cached flows for a
// Pod.
GetPodFlowKeys(interfaceName string) []string
// GetNetworkPolicyFlowKeys returns the keys (match strings) of the cached
// flows for a NetworkPolicy. Flows are grouped by policy rules, and duplicated
// entries can be added due to conjunctive match flows shared by multiple
// rules.
GetNetworkPolicyFlowKeys(npName, npNamespace string) []string
// ReassignFlowPriorities takes a list of priority updates, and update the actionFlows to replace
// the old priority with the desired one, for each priority update on that table.
ReassignFlowPriorities(updates map[uint16]uint16, table binding.TableIDType) error
// SubscribePacketIn subscribes packet-in channel in bridge. This method requires a receiver to
// pop data from "ch" timely, otherwise it will block all inbound messages from OVS.
SubscribePacketIn(reason uint8, ch chan *ofctrl.PacketIn) error
// SendTraceflowPacket injects packet to specified OVS port for Openflow.
SendTraceflowPacket(
dataplaneTag uint8,
srcMAC string,
dstMAC string,
srcIP string,
dstIP string,
IPProtocol uint8,
ttl uint8,
IPFlags uint16,
TCPSrcPort uint16,
TCPDstPort uint16,
TCPFlags uint8,
UDPSrcPort uint16,
UDPDstPort uint16,
ICMPType uint8,
ICMPCode uint8,
ICMPID uint16,
ICMPSequence uint16,
inPort uint32,
outPort int32) error
// InstallTraceflowFlows installs flows for specific traceflow request.
InstallTraceflowFlows(dataplaneTag uint8) error
// Initial tun_metadata0 in TLV map for Traceflow.
InitialTLVMap() error
// Find Network Policy reference and OFpriority by conjunction ID.
GetPolicyInfoFromConjunction(ruleID uint32) (string, string)
// RegisterPacketInHandler uses SubscribePacketIn to get PacketIn message and process received
// packets through registered handlers.
RegisterPacketInHandler(packetHandlerReason uint8, packetHandlerName string, packetInHandler interface{})
StartPacketInHandler(packetInStartedReason []uint8, stopCh <-chan struct{})
// Get traffic metrics of each NetworkPolicy rule.
NetworkPolicyMetrics() map[uint32]*types.RuleMetric
// Returns if IPv4 is supported on this Node or not.
IsIPv4Enabled() bool
// Returns if IPv6 is supported on this Node or not.
IsIPv6Enabled() bool
}
// GetFlowTableStatus returns an array of flow table status.
func (c *client) GetFlowTableStatus() []binding.TableStatus {
return c.bridge.DumpTableStatus()
}
// IsConnected returns the connection status between client and OFSwitch.
func (c *client) IsConnected() bool {
return c.bridge.IsConnected()
}
// addFlows installs the flows on the OVS bridge and then add them into the flow cache. If the flow cache exists,
// it will return immediately, otherwise it will use Bundle to add all flows, and then add them into the flow cache.
// If it fails to add the flows with Bundle, it will return the error and no flow cache is created.
func (c *client) addFlows(cache *flowCategoryCache, flowCacheKey string, flows []binding.Flow) error {
_, ok := cache.Load(flowCacheKey)
// If a flow cache entry already exists for the key, return immediately. Otherwise, add the flows to the switch
// and populate the cache with them.
if ok {
klog.V(2).Infof("Flows with cache key %s are already installed", flowCacheKey)
return nil
}
err := c.ofEntryOperations.AddAll(flows)
if err != nil {
return err
}
fCache := flowCache{}
// Add the successfully installed flows into the flow cache.
for _, flow := range flows {
fCache[flow.MatchString()] = flow
}
cache.Store(flowCacheKey, fCache)
return nil
}
// deleteFlows deletes all the flows in the flow cache indexed by the provided flowCacheKey.
func (c *client) deleteFlows(cache *flowCategoryCache, flowCacheKey string) error {
fCacheI, ok := cache.Load(flowCacheKey)
if !ok {
// no matching flows found in the cache
return nil
}
fCache := fCacheI.(flowCache)
// Delete flows from OVS.
delFlows := make([]binding.Flow, 0, len(fCache))
for _, flow := range fCache {
delFlows = append(delFlows, flow)
}
if err := c.ofEntryOperations.DeleteAll(delFlows); err != nil {
return err
}
cache.Delete(flowCacheKey)
return nil
}
func (c *client) InstallNodeFlows(hostname string,
peerConfigs map[*net.IPNet]net.IP,
tunnelPeerIP net.IP,
ipsecTunOFPort uint32) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
var flows []binding.Flow
localGatewayMAC := c.nodeConfig.GatewayConfig.MAC
for peerPodCIDR, peerGatewayIP := range peerConfigs {
if peerGatewayIP.To4() != nil {
// Since broadcast is not supported in IPv6, ARP should happen only with IPv4 address, and ARP responder flows
// only work for IPv4 addresses.
flows = append(flows, c.arpResponderFlow(peerGatewayIP, cookie.Node))
}
if c.encapMode.NeedsEncapToPeer(tunnelPeerIP, c.nodeConfig.NodeIPAddr) {
// tunnelPeerIP is the Node Internal Address. In a dual-stack setup, whether this address is an IPv4 address or an
// IPv6 one is decided by the address family of Node Internal Address.
flows = append(flows, c.l3FwdFlowToRemote(localGatewayMAC, *peerPodCIDR, tunnelPeerIP, cookie.Node))
} else {
flows = append(flows, c.l3FwdFlowToRemoteViaGW(localGatewayMAC, *peerPodCIDR, cookie.Node))
}
}
if ipsecTunOFPort != 0 {
// When IPSec tunnel is enabled, packets received from the remote Node are
// input from the Node's IPSec tunnel port, not the default tunnel port. So,
// add a separate tunnelClassifierFlow for the IPSec tunnel port.
flows = append(flows, c.tunnelClassifierFlow(ipsecTunOFPort, cookie.Node))
}
return c.addFlows(c.nodeFlowCache, hostname, flows)
}
func (c *client) UninstallNodeFlows(hostname string) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
return c.deleteFlows(c.nodeFlowCache, hostname)
}
func (c *client) InstallPodFlows(interfaceName string, podInterfaceIPs []net.IP, podInterfaceMAC net.HardwareAddr, ofPort uint32) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
localGatewayMAC := c.nodeConfig.GatewayConfig.MAC
flows := []binding.Flow{
c.podClassifierFlow(ofPort, cookie.Pod),
c.l2ForwardCalcFlow(podInterfaceMAC, ofPort, false, cookie.Pod),
}
// Add support for IPv4 ARP responder.
podInterfaceIPv4 := util.GetIPv4Addr(podInterfaceIPs)
if podInterfaceIPv4 != nil {
flows = append(flows, c.arpSpoofGuardFlow(podInterfaceIPv4, podInterfaceMAC, ofPort, cookie.Pod))
}
// Add IP SpoofGuard flows for all validate IPs.
flows = append(flows, c.podIPSpoofGuardFlow(podInterfaceIPs, podInterfaceMAC, ofPort, cookie.Pod)...)
// Add L3 Routing flows to rewrite Pod's dst MAC for all validate IPs.
flows = append(flows, c.l3FwdFlowToPod(localGatewayMAC, podInterfaceIPs, podInterfaceMAC, cookie.Pod)...)
if c.encapMode.IsNetworkPolicyOnly() {
// In policy-only mode, traffic to local Pod is routed based on destination IP.
flows = append(flows,
c.l3FwdFlowRouteToPod(podInterfaceIPs, podInterfaceMAC, cookie.Pod)...,
)
}
return c.addFlows(c.podFlowCache, interfaceName, flows)
}
func (c *client) UninstallPodFlows(interfaceName string) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
return c.deleteFlows(c.podFlowCache, interfaceName)
}
func (c *client) GetPodFlowKeys(interfaceName string) []string {
fCacheI, ok := c.podFlowCache.Load(interfaceName)
if !ok {
return nil
}
fCache := fCacheI.(flowCache)
flowKeys := make([]string, 0, len(fCache))
// ReplayFlows() could change Flow internal state. Although its current
// implementation does not impact Flow match string generation, we still
// acquire read lock of replayMutex here for logic cleanliness.
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
for _, flow := range fCache {
flowKeys = append(flowKeys, flow.MatchString())
}
return flowKeys
}
func (c *client) InstallServiceGroup(groupID binding.GroupIDType, withSessionAffinity bool, endpoints []proxy.Endpoint) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
group := c.serviceEndpointGroup(groupID, withSessionAffinity, endpoints...)
if err := group.Add(); err != nil {
return fmt.Errorf("error when installing Service Endpoints Group: %w", err)
}
c.groupCache.Store(groupID, group)
return nil
}
func (c *client) UninstallServiceGroup(groupID binding.GroupIDType) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
if !c.bridge.DeleteGroup(groupID) {
return fmt.Errorf("group %d delete failed", groupID)
}
c.groupCache.Delete(groupID)
return nil
}
func (c *client) InstallEndpointFlows(protocol binding.Protocol, endpoints []proxy.Endpoint, isIPv6 bool) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
parser := func(ipStr string) net.IP { return net.ParseIP(ipStr).To4() }
if isIPv6 {
parser = func(ipStr string) net.IP { return net.ParseIP(ipStr).To16() }
}
for _, endpoint := range endpoints {
var flows []binding.Flow
endpointPort, _ := endpoint.Port()
endpointIP := parser(endpoint.IP())
portVal := portToUint16(endpointPort)
cacheKey := fmt.Sprintf("Endpoints_%s_%d_%s", endpointIP, endpointPort, protocol)
flows = append(flows, c.endpointDNATFlow(endpointIP, portVal, protocol))
if endpoint.GetIsLocal() {
flows = append(flows, c.hairpinSNATFlow(endpointIP))
}
if err := c.addFlows(c.serviceFlowCache, cacheKey, flows); err != nil {
return err
}
}
return nil
}
func (c *client) UninstallEndpointFlows(protocol binding.Protocol, endpoint proxy.Endpoint) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
port, err := endpoint.Port()
if err != nil {
return fmt.Errorf("error when getting port: %w", err)
}
cacheKey := fmt.Sprintf("Endpoints_%s_%d_%s", endpoint.IP(), port, protocol)
return c.deleteFlows(c.serviceFlowCache, cacheKey)
}
func (c *client) InstallServiceFlows(groupID binding.GroupIDType, svcIP net.IP, svcPort uint16, protocol binding.Protocol, affinityTimeout uint16) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
var flows []binding.Flow
flows = append(flows, c.serviceLBFlow(groupID, svcIP, svcPort, protocol))
if affinityTimeout != 0 {
flows = append(flows, c.serviceLearnFlow(groupID, svcIP, svcPort, protocol, affinityTimeout))
}
cacheKey := fmt.Sprintf("Service_%s_%d_%s", svcIP, svcPort, protocol)
return c.addFlows(c.serviceFlowCache, cacheKey, flows)
}
func (c *client) UninstallServiceFlows(svcIP net.IP, svcPort uint16, protocol binding.Protocol) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
cacheKey := fmt.Sprintf("Service_%s_%d_%s", svcIP, svcPort, protocol)
return c.deleteFlows(c.serviceFlowCache, cacheKey)
}
func (c *client) InstallLoadBalancerServiceFromOutsideFlows(svcIP net.IP, svcPort uint16, protocol binding.Protocol) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
var flows []binding.Flow
flows = append(flows, c.loadBalancerServiceFromOutsideFlow(svcIP, svcPort, protocol))
cacheKey := fmt.Sprintf("LoadBalancerService_%s_%d_%s", svcIP, svcPort, protocol)
return c.addFlows(c.serviceFlowCache, cacheKey, flows)
}
func (c *client) UninstallLoadBalancerServiceFromOutsideFlows(svcIP net.IP, svcPort uint16, protocol binding.Protocol) error {
c.replayMutex.RLock()
defer c.replayMutex.RUnlock()
cacheKey := fmt.Sprintf("LoadBalancerService_%s_%d_%s", svcIP, svcPort, protocol)
return c.deleteFlows(c.serviceFlowCache, cacheKey)
}
func (c *client) InstallClusterServiceFlows() error {
flows := []binding.Flow{
c.serviceNeedLBFlow(),
c.sessionAffinityReselectFlow(),
c.l2ForwardOutputServiceHairpinFlow(),
}
if c.IsIPv4Enabled() {
flows = append(flows, c.serviceHairpinResponseDNATFlow(binding.ProtocolIP))
flows = append(flows, c.serviceLBBypassFlows(binding.ProtocolIP)...)
}
if c.IsIPv6Enabled() {
flows = append(flows, c.serviceHairpinResponseDNATFlow(binding.ProtocolIPv6))
flows = append(flows, c.serviceLBBypassFlows(binding.ProtocolIPv6)...)
}
if err := c.ofEntryOperations.AddAll(flows); err != nil {
return err
}
c.defaultServiceFlows = flows
return nil
}
func (c *client) InstallClusterServiceCIDRFlows(serviceNets []*net.IPNet) error {
flows := c.serviceCIDRDNATFlows(serviceNets)
if err := c.ofEntryOperations.AddAll(flows); err != nil {
return err
}
c.defaultServiceFlows = flows
return nil
}
func (c *client) InstallGatewayFlows() error {
gatewayConfig := c.nodeConfig.GatewayConfig
gatewayIPs := []net.IP{}
flows := []binding.Flow{
c.gatewayClassifierFlow(cookie.Default),
c.l2ForwardCalcFlow(gatewayConfig.MAC, config.HostGatewayOFPort, true, cookie.Default),
}
flows = append(flows, c.gatewayIPSpoofGuardFlows(cookie.Default)...)
// Add ARP SpoofGuard flow for local gateway interface.
if gatewayConfig.IPv4 != nil {
gatewayIPs = append(gatewayIPs, gatewayConfig.IPv4)
flows = append(flows, c.gatewayARPSpoofGuardFlow(gatewayConfig.IPv4, gatewayConfig.MAC, cookie.Default))
}
if gatewayConfig.IPv6 != nil {
gatewayIPs = append(gatewayIPs, gatewayConfig.IPv6)
}
// Add flow to ensure the liveness check packet could be forwarded correctly.
flows = append(flows, c.localProbeFlow(gatewayIPs, cookie.Default)...)
flows = append(flows, c.ctRewriteDstMACFlows(gatewayConfig.MAC, cookie.Default)...)
// In NoEncap , no traffic from tunnel port
if c.encapMode.SupportsEncap() {
flows = append(flows, c.l3FwdFlowToGateway(gatewayIPs, gatewayConfig.MAC, cookie.Default)...)
}
if err := c.ofEntryOperations.AddAll(flows); err != nil {
return err
}
c.gatewayFlows = flows
return nil
}
func (c *client) InstallDefaultTunnelFlows() error {
flows := []binding.Flow{
c.tunnelClassifierFlow(config.DefaultTunOFPort, cookie.Default),
c.l2ForwardCalcFlow(globalVirtualMAC, config.DefaultTunOFPort, true, cookie.Default),
}
if err := c.ofEntryOperations.AddAll(flows); err != nil {
return err
}
c.defaultTunnelFlows = flows
return nil
}
func (c *client) InstallBridgeUplinkFlows() error {
flows := c.hostBridgeUplinkFlows(*c.nodeConfig.PodIPv4CIDR, cookie.Default)
c.hostNetworkingFlows = flows
if err := c.ofEntryOperations.AddAll(flows); err != nil {
return err
}
c.hostNetworkingFlows = flows
return nil
}
func (c *client) initialize() error {
if err := c.ofEntryOperations.AddAll(c.defaultFlows()); err != nil {
return fmt.Errorf("failed to install default flows: %v", err)
}
if err := c.ofEntryOperations.Add(c.arpNormalFlow(cookie.Default)); err != nil {
return fmt.Errorf("failed to install arp normal flow: %v", err)
}
if err := c.ofEntryOperations.AddAll(c.ipv6Flows(cookie.Default)); err != nil {
return fmt.Errorf("failed to install ipv6 flows: %v", err)
}
if err := c.ofEntryOperations.AddAll(c.decTTLFlows(cookie.Default)); err != nil {
return fmt.Errorf("failed to install dec TTL flow on source Node: %v", err)
}
if err := c.ofEntryOperations.AddAll(c.l2ForwardOutputFlows(cookie.Default)); err != nil {
return fmt.Errorf("failed to install L2 forward output flows: %v", err)
}
if err := c.ofEntryOperations.AddAll(c.connectionTrackFlows(cookie.Default)); err != nil {
return fmt.Errorf("failed to install connection track flows: %v", err)
}
if err := c.ofEntryOperations.AddAll(c.establishedConnectionFlows(cookie.Default)); err != nil {
return fmt.Errorf("failed to install flows to skip established connections: %v", err)
}
if c.encapMode.IsNetworkPolicyOnly() {
if err := c.setupPolicyOnlyFlows(); err != nil {
return fmt.Errorf("failed to setup policy only flows: %w", err)
}
}
return nil
}
func (c *client) Initialize(roundInfo types.RoundInfo, nodeConfig *config.NodeConfig, encapMode config.TrafficEncapModeType) (<-chan struct{}, error) {
c.nodeConfig = nodeConfig
c.encapMode = encapMode
if config.IsIPv4Enabled(nodeConfig, encapMode) {
c.ipProtocols = append(c.ipProtocols, binding.ProtocolIP)
}
if config.IsIPv6Enabled(nodeConfig, encapMode) {
c.ipProtocols = append(c.ipProtocols, binding.ProtocolIPv6)
}
// Initiate connections to target OFswitch, and create tables on the switch.
connCh := make(chan struct{})
if err := c.bridge.Connect(maxRetryForOFSwitch, connCh); err != nil {
return nil, err
}
// Ignore first notification, it is not a "reconnection".
<-connCh
c.roundInfo = roundInfo
c.cookieAllocator = cookie.NewAllocator(roundInfo.RoundNum)
// In the normal case, there should be no existing flows with the current round number. This
// is needed in case the agent was restarted before we had a chance to increment the round
// number (incrementing the round number happens once we are satisfied that stale flows from
// the previous round have been deleted).
if err := c.deleteFlowsByRoundNum(roundInfo.RoundNum); err != nil {
return nil, fmt.Errorf("error when deleting exiting flows for current round number: %v", err)
}
return connCh, c.initialize()
}
func (c *client) InstallExternalFlows() error {
nodeIP := c.nodeConfig.NodeIPAddr.IP
podSubnet := c.nodeConfig.PodIPv4CIDR
flows := c.uplinkSNATFlows(cookie.SNAT)
flows = append(flows, c.snatFlows(nodeIP, *podSubnet, cookie.SNAT)...)
if err := c.ofEntryOperations.AddAll(flows); err != nil {
return fmt.Errorf("failed to install flows for external communication: %v", err)
}
c.hostNetworkingFlows = append(c.hostNetworkingFlows, flows...)
return nil
}
func (c *client) ReplayFlows() {
c.replayMutex.Lock()
defer c.replayMutex.Unlock()
if err := c.initialize(); err != nil {
klog.Errorf("Error during flow replay: %v", err)
}
addFixedFlows := func(flows []binding.Flow) {
for _, flow := range flows {
flow.Reset()
}
if err := c.ofEntryOperations.AddAll(flows); err != nil {
klog.Errorf("Error when replaying fixed flows: %v", err)
}
}
addFixedFlows(c.gatewayFlows)
addFixedFlows(c.defaultServiceFlows)
addFixedFlows(c.defaultTunnelFlows)
// hostNetworkingFlows is used only on Windows. Replay the flows only when there are flows in this cache.
if len(c.hostNetworkingFlows) > 0 {
addFixedFlows(c.hostNetworkingFlows)
}
installCachedFlows := func(key, value interface{}) bool {
fCache := value.(flowCache)
cachedFlows := make([]binding.Flow, 0)
for _, flow := range fCache {
flow.Reset()
cachedFlows = append(cachedFlows, flow)
}
if err := c.ofEntryOperations.AddAll(cachedFlows); err != nil {
klog.Errorf("Error when replaying cached flows: %v", err)
}
return true
}
c.groupCache.Range(func(id, gEntry interface{}) bool {
if err := gEntry.(binding.Group).Add(); err != nil {
klog.Errorf("Error when replaying cached group %d: %v", id, err)
}
return true
})
c.nodeFlowCache.Range(installCachedFlows)
c.podFlowCache.Range(installCachedFlows)
c.serviceFlowCache.Range(installCachedFlows)
c.replayPolicyFlows()
}
func (c *client) deleteFlowsByRoundNum(roundNum uint64) error {
cookieID, cookieMask := cookie.CookieMaskForRound(roundNum)
return c.bridge.DeleteFlowsByCookie(cookieID, cookieMask)
}
func (c *client) DeleteStaleFlows() error {
if c.roundInfo.PrevRoundNum == nil {
klog.V(2).Info("Previous round number is unset, no flows to delete")
return nil
}
return c.deleteFlowsByRoundNum(*c.roundInfo.PrevRoundNum)
}
func (c *client) setupPolicyOnlyFlows() error {
// Rewrites MAC to gw port if the packet received is unmatched by local Pod flows.
flows := c.l3FwdFlowRouteToGW(c.nodeConfig.GatewayConfig.MAC, cookie.Default)
// If IPv6 is enabled, this flow will never get hit.
flows = append(flows,
// Replies any ARP request with the same global virtual MAC.
c.arpResponderStaticFlow(cookie.Default),
)
if err := c.ofEntryOperations.AddAll(flows); err != nil {
return fmt.Errorf("failed to setup policy-only flows: %w", err)
}
return nil
}
func (c *client) SubscribePacketIn(reason uint8, ch chan *ofctrl.PacketIn) error {
return c.bridge.SubscribePacketIn(reason, ch)
}
func (c *client) SendTraceflowPacket(
dataplaneTag uint8,
srcMAC string,
dstMAC string,
srcIP string,
dstIP string,
IPProtocol uint8,
ttl uint8,
IPFlags uint16,
TCPSrcPort uint16,
TCPDstPort uint16,
TCPFlags uint8,
UDPSrcPort uint16,
UDPDstPort uint16,
ICMPType uint8,
ICMPCode uint8,
ICMPID uint16,
ICMPSequence uint16,
inPort uint32,
outPort int32) error {
packetOutBuilder := c.bridge.BuildPacketOut()
parsedSrcMAC, err := net.ParseMAC(srcMAC)
if err != nil {
return err
}
var parsedDstMAC net.HardwareAddr
if dstMAC == "" {
parsedDstMAC = c.nodeConfig.GatewayConfig.MAC
} else {
parsedDstMAC, err = net.ParseMAC(dstMAC)
if err != nil {
return err
}
}
parsedSrcIP := net.ParseIP(srcIP)
parsedDstIP := net.ParseIP(dstIP)
if parsedSrcIP == nil || parsedDstIP == nil {
return errors.New("invalid IP")
}
isIPv6 := parsedSrcIP.To4() == nil
if isIPv6 != (parsedDstIP.To4() == nil) {
return errors.New("IP version mismatch")
}
// Set ethernet header
packetOutBuilder = packetOutBuilder.SetSrcMAC(parsedSrcMAC)
packetOutBuilder = packetOutBuilder.SetDstMAC(parsedDstMAC)
// Set IP header
packetOutBuilder = packetOutBuilder.SetSrcIP(parsedSrcIP)
packetOutBuilder = packetOutBuilder.SetDstIP(parsedDstIP)
if ttl == 0 {
packetOutBuilder = packetOutBuilder.SetTTL(128)
} else {
packetOutBuilder = packetOutBuilder.SetTTL(ttl)
}
if !isIPv6 {
packetOutBuilder = packetOutBuilder.SetIPFlags(IPFlags)
}
// Set transport header
switch IPProtocol {
case protocol.Type_ICMP:
if isIPv6 {
return errors.New("cannot set protocol ICMP in IPv6 packet")
}
packetOutBuilder = packetOutBuilder.SetIPProtocol(binding.ProtocolICMP)
packetOutBuilder = packetOutBuilder.SetICMPType(ICMPType)
packetOutBuilder = packetOutBuilder.SetICMPCode(ICMPCode)
packetOutBuilder = packetOutBuilder.SetICMPID(ICMPID)
packetOutBuilder = packetOutBuilder.SetICMPSequence(ICMPSequence)
case protocol.Type_IPv6ICMP:
if !isIPv6 {
return errors.New("cannot set protocol ICMPv6 in IPv4 packet")
}
packetOutBuilder = packetOutBuilder.SetIPProtocol(binding.ProtocolICMPv6)
packetOutBuilder = packetOutBuilder.SetICMPType(ICMPType)
packetOutBuilder = packetOutBuilder.SetICMPCode(ICMPCode)
packetOutBuilder = packetOutBuilder.SetICMPID(ICMPID)
packetOutBuilder = packetOutBuilder.SetICMPSequence(ICMPSequence)
case protocol.Type_TCP:
if isIPv6 {
packetOutBuilder = packetOutBuilder.SetIPProtocol(binding.ProtocolTCPv6)
} else {
packetOutBuilder = packetOutBuilder.SetIPProtocol(binding.ProtocolTCP)
}
if TCPSrcPort == 0 {
// #nosec G404: random number generator not used for security purposes.
TCPSrcPort = uint16(rand.Uint32())
}
packetOutBuilder = packetOutBuilder.SetTCPSrcPort(TCPSrcPort)
packetOutBuilder = packetOutBuilder.SetTCPDstPort(TCPDstPort)
packetOutBuilder = packetOutBuilder.SetTCPFlags(TCPFlags)
case protocol.Type_UDP:
if isIPv6 {
packetOutBuilder = packetOutBuilder.SetIPProtocol(binding.ProtocolUDPv6)
} else {
packetOutBuilder = packetOutBuilder.SetIPProtocol(binding.ProtocolUDP)
}
packetOutBuilder = packetOutBuilder.SetUDPSrcPort(UDPSrcPort)
packetOutBuilder = packetOutBuilder.SetUDPDstPort(UDPDstPort)
}
packetOutBuilder = packetOutBuilder.SetInport(inPort)
if outPort != -1 {
packetOutBuilder = packetOutBuilder.SetOutport(uint32(outPort))
}
packetOutBuilder = packetOutBuilder.AddLoadAction(binding.NxmFieldIPToS, uint64(dataplaneTag), traceflowTagToSRange)
packetOutObj := packetOutBuilder.Done()
return c.bridge.SendPacketOut(packetOutObj)
}
func (c *client) InstallTraceflowFlows(dataplaneTag uint8) error {
flows := c.traceflowL2ForwardOutputFlows(dataplaneTag, cookie.Default)
if err := c.AddAll(flows); err != nil {
return err
}
flow := c.traceflowConnectionTrackFlows(dataplaneTag, cookie.Default)
if err := c.Add(flow); err != nil {
return err
}
flows = []binding.Flow{}
c.conjMatchFlowLock.Lock()
defer c.conjMatchFlowLock.Unlock()
// Copy default drop rules.
for _, ctx := range c.globalConjMatchFlowCache {
if ctx.dropFlow != nil {
copyFlowBuilder := ctx.dropFlow.CopyToBuilder(priorityNormal+2, false)
if ctx.dropFlow.FlowProtocol() == "" {
copyFlowBuilderIPv6 := ctx.dropFlow.CopyToBuilder(priorityNormal+2, false)
copyFlowBuilderIPv6 = copyFlowBuilderIPv6.MatchProtocol(binding.ProtocolIPv6)
flows = append(
flows, copyFlowBuilderIPv6.MatchIPDscp(dataplaneTag).
SetHardTimeout(300).
Action().SendToController(uint8(PacketInReasonTF)).
Done())
copyFlowBuilder = copyFlowBuilder.MatchProtocol(binding.ProtocolIP)
}
flows = append(
flows, copyFlowBuilder.MatchIPDscp(dataplaneTag).
SetHardTimeout(300).
Action().SendToController(uint8(PacketInReasonTF)).
Done())
}
}
// Copy Antrea NetworkPolicy drop rules.
for _, conj := range c.policyCache.List() {
for _, flow := range conj.(*policyRuleConjunction).metricFlows {
if flow.IsDropFlow() {
copyFlowBuilder := flow.CopyToBuilder(priorityNormal+2, false)
// Generate both IPv4 and IPv6 flows if the original drop flow doesn't match IP/IPv6.
// DSCP field is in IP/IPv6 headers so IP/IPv6 match is required in a flow.
if flow.FlowProtocol() == "" {
copyFlowBuilderIPv6 := flow.CopyToBuilder(priorityNormal+2, false)
copyFlowBuilderIPv6 = copyFlowBuilderIPv6.MatchProtocol(binding.ProtocolIPv6)
flows = append(
flows, copyFlowBuilderIPv6.MatchIPDscp(dataplaneTag).
SetHardTimeout(300).
Action().SendToController(uint8(PacketInReasonTF)).
Done())
copyFlowBuilder = copyFlowBuilder.MatchProtocol(binding.ProtocolIP)
}
flows = append(
flows, copyFlowBuilder.MatchIPDscp(dataplaneTag).
SetHardTimeout(300).
Action().SendToController(uint8(PacketInReasonTF)).
Done())
}
}
}
return c.AddAll(flows)
}
// Add TLV map optClass 0x0104, optType 0x80 optLength 4 tunMetadataIndex 0 to store data plane tag
// in tunnel. Data plane tag will be stored to NXM_NX_TUN_METADATA0[28..31] when packet get encapsulated
// into geneve, and will be stored back to NXM_NX_REG9[28..31] when packet get decapsulated.
func (c *client) InitialTLVMap() error {
return c.bridge.AddTLVMap(0x0104, 0x80, 4, 0)
}
func (c *client) IsIPv4Enabled() bool {
return config.IsIPv4Enabled(c.nodeConfig, c.encapMode)
}
func (c *client) IsIPv6Enabled() bool {
return config.IsIPv6Enabled(c.nodeConfig, c.encapMode)
}
| 1 | 27,809 | Should we add the flows only when NodePort is enabled? | antrea-io-antrea | go |
@@ -2,12 +2,12 @@ AC_PREREQ([2.63])
dnl To do a release: follow the instructions to update libostree-released.sym from
dnl libostree-devel.sym, update the checksum in test-symbols.sh, set is_release_build=yes
dnl below. Then make another post-release commit to bump the version and set
-dnl is_release_build=no
+dnl is_release_build=yes
dnl Seed the release notes with `git-shortlog-with-prs <previous-release>..`. Then use
dnl `git-evtag` to create the tag and push it. Finally, create a GitHub release and attach
dnl the tarball from `make dist`.
m4_define([year_version], [2020])
-m4_define([release_version], [2])
+m4_define([release_version], [3])
m4_define([package_version], [year_version.release_version])
AC_INIT([libostree], [package_version], [[email protected]])
is_release_build=no | 1 | AC_PREREQ([2.63])
dnl To do a release: follow the instructions to update libostree-released.sym from
dnl libostree-devel.sym, update the checksum in test-symbols.sh, set is_release_build=yes
dnl below. Then make another post-release commit to bump the version and set
dnl is_release_build=no
dnl Seed the release notes with `git-shortlog-with-prs <previous-release>..`. Then use
dnl `git-evtag` to create the tag and push it. Finally, create a GitHub release and attach
dnl the tarball from `make dist`.
m4_define([year_version], [2020])
m4_define([release_version], [2])
m4_define([package_version], [year_version.release_version])
AC_INIT([libostree], [package_version], [[email protected]])
is_release_build=no
AC_CONFIG_HEADER([config.h])
AC_CONFIG_MACRO_DIR([buildutil])
AC_CONFIG_AUX_DIR([build-aux])
AM_INIT_AUTOMAKE([1.13 -Wno-portability foreign no-define tar-ustar no-dist-gzip dist-xz
color-tests subdir-objects])
AM_MAINTAINER_MODE([enable])
AM_SILENT_RULES([yes])
AC_USE_SYSTEM_EXTENSIONS
AC_SYS_LARGEFILE
AC_PROG_CC
AM_PROG_CC_C_O
AC_PROG_YACC
dnl Versioning information
AC_SUBST([YEAR_VERSION], [year_version])
AC_SUBST([RELEASE_VERSION], [release_version])
AC_SUBST([PACKAGE_VERSION], [package_version])
AS_IF([echo "$CFLAGS" | grep -q -E -e '-Werror($| )'], [], [
CC_CHECK_FLAGS_APPEND([WARN_CFLAGS], [CFLAGS], [\
-pipe \
-Wall \
-Werror=empty-body \
-Werror=strict-prototypes \
-Werror=missing-prototypes \
-Werror=implicit-function-declaration \
"-Werror=format=2 -Werror=format-security -Werror=format-nonliteral" \
-Werror=pointer-arith -Werror=init-self \
-Werror=missing-declarations \
-Werror=return-type \
-Werror=switch \
-Werror=overflow \
-Werror=int-conversion \
-Werror=parentheses \
-Werror=undef \
-Werror=incompatible-pointer-types \
-Werror=misleading-indentation \
-Werror=missing-include-dirs -Werror=aggregate-return \
-Wstrict-aliasing=2 \
-Werror=unused-result \
])])
AC_SUBST(WARN_CFLAGS)
AC_MSG_CHECKING([for -fsanitize=address in CFLAGS])
if echo $CFLAGS | grep -q -e -fsanitize=address; then
AC_MSG_RESULT([yes])
using_asan=yes
else
AC_MSG_RESULT([no])
fi
AM_CONDITIONAL(BUILDOPT_ASAN, [test x$using_asan = xyes])
AM_COND_IF([BUILDOPT_ASAN],
[AC_DEFINE([BUILDOPT_ASAN], 1, [Define if we are building with -fsanitize=address])])
AC_MSG_CHECKING([for -fsanitize=thread in CFLAGS])
if echo $CFLAGS | grep -q -e -fsanitize=thread; then
AC_MSG_RESULT([yes])
using_tsan=yes
else
AC_MSG_RESULT([no])
fi
AM_CONDITIONAL(BUILDOPT_TSAN, [test x$using_tsan = xyes])
AM_COND_IF([BUILDOPT_TSAN],
[AC_DEFINE([BUILDOPT_TSAN], 1, [Define if we are building with -fsanitize=thread])])
# Initialize libtool
LT_PREREQ([2.2.4])
LT_INIT([disable-static])
OSTREE_FEATURES=""
AC_SUBST([OSTREE_FEATURES])
GLIB_TESTS
LIBGLNX_CONFIGURE
dnl These bits attempt to mirror https://github.com/coreutils/gnulib/blob/e369b04cca4da1534c98628b8ee4648bfca2bb3a/m4/parse-datetime.m4#L27
AC_CHECK_FUNCS([nanotime clock_gettime])
AC_STRUCT_TIMEZONE
AC_CHECK_HEADER([sys/xattr.h],,[AC_MSG_ERROR([You must have sys/xattr.h from glibc])])
AS_IF([test "$YACC" != "bison -y"], [AC_MSG_ERROR([bison not found but required])])
PKG_PROG_PKG_CONFIG
# PKG_CHECK_VAR added to pkg-config 0.28
m4_define_default(
[PKG_CHECK_VAR],
[AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])
AS_IF([test -z "$$1"], [$1=`$PKG_CONFIG --variable="$3" "$2"`])
AS_IF([test -n "$$1"], [$4], [$5])])
PKG_CHECK_VAR(BASH_COMPLETIONSDIR, [bash-completion], [completionsdir], ,
BASH_COMPLETIONSDIR="${datadir}/bash-completion/completions")
AC_SUBST(BASH_COMPLETIONSDIR)
AM_PATH_GLIB_2_0(,,AC_MSG_ERROR([GLib not found]))
dnl When bumping the gio-unix-2.0 dependency (or glib-2.0 in general),
dnl remember to bump GLIB_VERSION_MIN_REQUIRED and
dnl GLIB_VERSION_MAX_ALLOWED in Makefile.am
GIO_DEPENDENCY="gio-unix-2.0 >= 2.40.0"
PKG_CHECK_MODULES(OT_DEP_GIO_UNIX, $GIO_DEPENDENCY)
dnl 5.1.0 is an arbitrary version here
PKG_CHECK_MODULES(OT_DEP_LZMA, liblzma >= 5.0.5)
dnl Needed for rollsum
PKG_CHECK_MODULES(OT_DEP_ZLIB, zlib)
dnl We're not actually linking to this, just using the header
PKG_CHECK_MODULES(OT_DEP_E2P, e2p)
dnl Arbitrary version that's in CentOS7.2 now
CURL_DEPENDENCY=7.29.0
AC_ARG_WITH(curl,
AS_HELP_STRING([--with-curl], [Use libcurl @<:@default=no@:>@]),
[], [with_curl=no])
AS_IF([test x$with_curl != xno ], [
PKG_CHECK_MODULES(OT_DEP_CURL, libcurl >= $CURL_DEPENDENCY)
with_curl=yes
AC_DEFINE([HAVE_LIBCURL], 1, [Define if we have libcurl.pc])
dnl Currently using libcurl requires soup for trivial-httpd for tests
with_soup_default=yes
], [with_soup_default=check])
AM_CONDITIONAL(USE_CURL, test x$with_curl != xno)
if test x$with_curl = xyes; then OSTREE_FEATURES="$OSTREE_FEATURES libcurl"; fi
AC_ARG_ENABLE(http2,
AS_HELP_STRING([--disable-http2],
[Disable use of http2 (default: no)]),,
[enable_http2=yes])
AS_IF([test x$enable_http2 != xno ], [
AC_DEFINE([BUILDOPT_HTTP2], 1, [Define if we enable http2 by default])
], [
OSTREE_FEATURES="$OSTREE_FEATURES no-http2"
])
dnl When bumping the libsoup-2.4 dependency, remember to bump
dnl SOUP_VERSION_MIN_REQUIRED and SOUP_VERSION_MAX_ALLOWED in
dnl Makefile.am
SOUP_DEPENDENCY="libsoup-2.4 >= 2.39.1"
AC_ARG_WITH(soup,
AS_HELP_STRING([--with-soup], [Use libsoup @<:@default=yes@:>@]),
[], [with_soup=$with_soup_default])
AS_IF([test x$with_soup != xno], [
AC_ARG_ENABLE(libsoup_client_certs,
AS_HELP_STRING([--enable-libsoup-client-certs],
[Require availability of new enough libsoup TLS client cert API (default: auto)]),,
[enable_libsoup_client_certs=auto])
AC_MSG_CHECKING([for $SOUP_DEPENDENCY])
PKG_CHECK_EXISTS($SOUP_DEPENDENCY, have_soup=yes, have_soup=no)
AC_MSG_RESULT([$have_soup])
AS_IF([ test x$have_soup = xno && test x$with_soup != xcheck], [
AC_MSG_ERROR([libsoup is enabled but could not be found])
])
AS_IF([test x$have_soup = xyes], [
PKG_CHECK_MODULES(OT_DEP_SOUP, $SOUP_DEPENDENCY)
AC_DEFINE([HAVE_LIBSOUP], 1, [Define if we have libsoup.pc])
with_soup=yes
save_CFLAGS=$CFLAGS
CFLAGS=$OT_DEP_SOUP_CFLAGS
have_libsoup_client_certs=no
AC_CHECK_DECL([SOUP_SESSION_TLS_INTERACTION], [
AC_DEFINE([HAVE_LIBSOUP_CLIENT_CERTS], 1, [Define if we have libsoup client certs])
have_libsoup_client_certs=yes
], [], [#include <libsoup/soup.h>])
AS_IF([test x$enable_libsoup_client_certs = xyes && test x$have_libsoup_client_certs != xyes], [
AC_MSG_ERROR([libsoup client certs explicitly requested but not found])
])
CFLAGS=$save_CFLAGS
], [
with_soup=no
])
], [ with_soup=no ])
if test x$with_soup != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libsoup"; fi
AM_CONDITIONAL(USE_LIBSOUP, test x$with_soup != xno)
AM_CONDITIONAL(HAVE_LIBSOUP_CLIENT_CERTS, test x$have_libsoup_client_certs = xyes)
AC_ARG_ENABLE(trivial-httpd-cmdline,
[AS_HELP_STRING([--enable-trivial-httpd-cmdline],
[Continue to support "ostree trivial-httpd" [default=no]])],,
enable_trivial_httpd_cmdline=no)
AS_IF([test x$enable_trivial_httpd_cmdline = xyes],
[AC_DEFINE([BUILDOPT_ENABLE_TRIVIAL_HTTPD_CMDLINE], 1, [Define if we are enabling ostree trivial-httpd entrypoint])]
)
AS_IF([test x$with_curl = xyes && test x$with_soup = xno], [
AC_MSG_WARN([Curl enabled, but libsoup is not; libsoup is needed for tests (make check, etc.)])
])
AM_CONDITIONAL(USE_CURL_OR_SOUP, test x$with_curl != xno || test x$with_soup != xno)
AS_IF([test x$with_curl != xno || test x$with_soup != xno],
[AC_DEFINE([HAVE_LIBCURL_OR_LIBSOUP], 1, [Define if we have soup or curl])])
AS_IF([test x$with_curl = xyes], [fetcher_backend=curl], [test x$with_soup = xyes], [fetcher_backend=libsoup], [fetcher_backend=none])
m4_ifdef([GOBJECT_INTROSPECTION_CHECK], [
GOBJECT_INTROSPECTION_CHECK([1.34.0])
])
AM_CONDITIONAL(BUILDOPT_INTROSPECTION, test "x$found_introspection" = xyes)
LIBGPGME_DEPENDENCY="1.1.8"
AC_ARG_WITH(gpgme,
AS_HELP_STRING([--with-gpgme], [Use gpgme @<:@default=yes@:>@]),
[], [with_gpgme=yes])
AS_IF([test x$with_gpgme != xno], [
PKG_CHECK_MODULES(OT_DEP_GPGME, gpgme-pthread >= $LIBGPGME_DEPENDENCY, have_gpgme=yes, [
m4_ifdef([AM_PATH_GPGME_PTHREAD], [
AM_PATH_GPGME_PTHREAD($LIBGPGME_DEPENDENCY, have_gpgme=yes, have_gpgme=no)
],[ have_gpgme=no ])
])
AS_IF([ test x$have_gpgme = xno ], [
AC_MSG_ERROR([Need GPGME_PTHREAD version $LIBGPGME_DEPENDENCY or later])
])
OSTREE_FEATURES="$OSTREE_FEATURES gpgme"
PKG_CHECK_MODULES(OT_DEP_GPG_ERROR, [gpg-error], [], [
dnl This apparently doesn't ship a pkg-config file either, and we need
dnl to link to it directly.
AC_PATH_PROG(GPG_ERROR_CONFIG, [gpg-error-config], [AC_MSG_ERROR([Missing gpg-error-config])])
OT_DEP_GPG_ERROR_CFLAGS="$( $GPG_ERROR_CONFIG --cflags )"
OT_DEP_GPG_ERROR_LIBS="$( $GPG_ERROR_CONFIG --libs )"
])
OT_DEP_GPGME_CFLAGS="${OT_DEP_GPGME_CFLAGS} ${OT_DEP_GPG_ERROR_CFLAGS}"
OT_DEP_GPGME_LIBS="${OT_DEP_GPGME_LIBS} ${OT_DEP_GPG_ERROR_LIBS}"
],
[
AC_DEFINE([OSTREE_DISABLE_GPGME], 1, [Define to disable internal GPGME support])
with_gpgme=no
]
)
AM_CONDITIONAL(USE_GPGME, test "x$have_gpgme" = xyes)
LIBARCHIVE_DEPENDENCY="libarchive >= 2.8.0"
# What's in RHEL7.2.
FUSE_DEPENDENCY="fuse >= 2.9.2"
AC_CHECK_HEADERS([linux/fsverity.h])
AS_IF([test x$ac_cv_header_linux_fsverity_h = xyes ],
[OSTREE_FEATURES="$OSTREE_FEATURES ex-fsverity"])
# check for gtk-doc
m4_ifdef([GTK_DOC_CHECK], [
GTK_DOC_CHECK([1.15], [--flavour no-tmpl])
],[
enable_gtk_doc=no
AM_CONDITIONAL([ENABLE_GTK_DOC], false)
])
AC_ARG_ENABLE(man,
[AS_HELP_STRING([--enable-man],
[generate man pages [default=auto]])],,
enable_man=maybe)
AS_IF([test "$enable_man" != no], [
AC_PATH_PROG([XSLTPROC], [xsltproc])
AS_IF([test -z "$XSLTPROC"], [
AS_IF([test "$enable_man" = yes], [
AC_MSG_ERROR([xsltproc is required for --enable-man])
])
enable_man=no
],[
enable_man=yes
])
])
AM_CONDITIONAL(ENABLE_MAN, test "$enable_man" != no)
AC_ARG_ENABLE(rust,
[AS_HELP_STRING([--enable-rust],
[Compile Rust code instead of C [default=no]])],,
[enable_rust=no; rust_debug_release=no])
AS_IF([test "$enable_rust" = yes], [
AC_PATH_PROG([cargo], [cargo])
AS_IF([test -z "$cargo"], [AC_MSG_ERROR([cargo is required for --enable-rust])])
AC_PATH_PROG([rustc], [rustc])
AS_IF([test -z "$rustc"], [AC_MSG_ERROR([rustc is required for --enable-rust])])
dnl These bits based on gnome:librsvg/configure.ac
dnl By default, we build in public release mode.
AC_ARG_ENABLE(rust-debug,
AC_HELP_STRING([--enable-rust-debug],
[Build Rust code with debugging information [default=no]]),
[rust_debug_release=$enableval],
[rust_debug_release=release])
AC_MSG_CHECKING(whether to build Rust code with debugging information)
if test "x$rust_debug_release" = "xyes" ; then
rust_debug_release=debug
AC_MSG_RESULT(yes)
else
AC_MSG_RESULT(no)
fi
RUST_TARGET_SUBDIR=${rust_debug_release}
AC_SUBST([RUST_TARGET_SUBDIR])
])
AM_CONDITIONAL(RUST_DEBUG, [test "x$rust_debug_release" = "xdebug"])
AM_CONDITIONAL(ENABLE_RUST, [test "$enable_rust" != no])
AC_ARG_WITH(libarchive,
AS_HELP_STRING([--without-libarchive], [Do not use libarchive]),
:, with_libarchive=maybe)
AS_IF([ test x$with_libarchive != xno ], [
AC_MSG_CHECKING([for $LIBARCHIVE_DEPENDENCY])
PKG_CHECK_EXISTS($LIBARCHIVE_DEPENDENCY, have_libarchive=yes, have_libarchive=no)
AC_MSG_RESULT([$have_libarchive])
AS_IF([ test x$have_libarchive = xno && test x$with_libarchive != xmaybe ], [
AC_MSG_ERROR([libarchive is enabled but could not be found])
])
AS_IF([ test x$have_libarchive = xyes], [
AC_DEFINE([HAVE_LIBARCHIVE], 1, [Define if we have libarchive.pc])
PKG_CHECK_MODULES(OT_DEP_LIBARCHIVE, $LIBARCHIVE_DEPENDENCY)
save_LIBS=$LIBS
LIBS=$OT_DEP_LIBARCHIVE_LIBS
AC_CHECK_FUNCS(archive_read_support_filter_all)
LIBS=$save_LIBS
with_libarchive=yes
], [
with_libarchive=no
])
], [ with_libarchive=no ])
if test x$with_libarchive != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libarchive"; fi
AM_CONDITIONAL(USE_LIBARCHIVE, test $with_libarchive != no)
dnl This is what is in RHEL7 anyways
SELINUX_DEPENDENCY="libselinux >= 2.1.13"
AC_ARG_WITH(selinux,
AS_HELP_STRING([--without-selinux], [Do not use SELinux]),
:, with_selinux=maybe)
AS_IF([ test x$with_selinux != xno ], [
AC_MSG_CHECKING([for $SELINUX_DEPENDENCY])
PKG_CHECK_EXISTS($SELINUX_DEPENDENCY, have_selinux=yes, have_selinux=no)
AC_MSG_RESULT([$have_selinux])
AS_IF([ test x$have_selinux = xno && test x$with_selinux != xmaybe ], [
AC_MSG_ERROR([SELinux is enabled but could not be found])
])
AS_IF([ test x$have_selinux = xyes], [
AC_DEFINE([HAVE_SELINUX], 1, [Define if we have libselinux.pc])
PKG_CHECK_MODULES(OT_DEP_SELINUX, $SELINUX_DEPENDENCY)
with_selinux=yes
], [
with_selinux=no
])
], [ with_selinux=no ])
if test x$with_selinux != xno; then OSTREE_FEATURES="$OSTREE_FEATURES selinux"; fi
AM_CONDITIONAL(USE_SELINUX, test $with_selinux != no)
AC_ARG_WITH(smack,
AS_HELP_STRING([--with-smack], [Enable smack]),
:, with_smack=no)
AS_IF([ test x$with_smack = xyes], [
AC_DEFINE([WITH_SMACK], 1, [Define if we have smack.pc])
])
AM_CONDITIONAL(USE_SMACK, test $with_smack != no)
dnl crypto
AC_ARG_WITH(crypto,
AS_HELP_STRING([--with-crypto], [Choose library for checksums, one of glib, openssl, gnutls (default: glib)]),
:, with_crypto=glib)
AS_IF([test $with_crypto = glib],
[],
[test $with_crypto = openssl],
[with_openssl=yes],
[test $with_crypto = gnutls],
[],
[AC_MSG_ERROR([Invalid --with-crypto $with_crypto])]
)
dnl begin openssl (really just libcrypto right now)
dnl Note this option is now deprecated in favor of --with-crypto=openssl
OPENSSL_DEPENDENCY="libcrypto >= 1.0.1"
AC_ARG_WITH(openssl,
AS_HELP_STRING([--with-openssl], [Enable use of OpenSSL libcrypto (checksums)]),with_openssl=$withval,with_openssl=no)
AS_IF([ test x$with_openssl != xno ], [
PKG_CHECK_MODULES(OT_DEP_CRYPTO, $OPENSSL_DEPENDENCY)
AC_DEFINE([HAVE_OPENSSL], 1, [Define if we have openssl])
with_crypto=openssl
with_openssl=yes
], [
with_openssl=no
])
if test x$with_openssl != xno; then OSTREE_FEATURES="$OSTREE_FEATURES openssl"; fi
AM_CONDITIONAL(USE_OPENSSL, test $with_openssl != no)
dnl end openssl
dnl begin gnutls; in contrast to openssl this one only
dnl supports --with-crypto=gnutls
GNUTLS_DEPENDENCY="gnutls >= 3.5.0"
AS_IF([ test $with_crypto = gnutls ], [
PKG_CHECK_MODULES(OT_DEP_CRYPTO, $GNUTLS_DEPENDENCY)
AC_DEFINE([HAVE_GNUTLS], 1, [Define if we have gnutls])
OSTREE_FEATURES="$OSTREE_FEATURES gnutls"
])
AM_CONDITIONAL(USE_GNUTLS, test $with_crypto = gnutls)
dnl end gnutls
dnl Avahi dependency for finding repos
AVAHI_DEPENDENCY="avahi-client >= 0.6.31 avahi-glib >= 0.6.31"
AC_ARG_WITH(avahi,
AS_HELP_STRING([--without-avahi], [Do not use Avahi]),
:, with_avahi=maybe)
AS_IF([ test x$with_avahi != xno ], [
AC_MSG_CHECKING([for $AVAHI_DEPENDENCY])
PKG_CHECK_EXISTS($AVAHI_DEPENDENCY, have_avahi=yes, have_avahi=no)
AC_MSG_RESULT([$have_avahi])
AS_IF([ test x$have_avahi = xno && test x$with_avahi != xmaybe ], [
AC_MSG_ERROR([Avahi is enabled but could not be found])
])
AS_IF([ test x$have_avahi = xyes], [
AC_DEFINE([HAVE_AVAHI], 1, [Define if we have avahi-client.pc and avahi-glib.pc])
PKG_CHECK_MODULES(OT_DEP_AVAHI, $AVAHI_DEPENDENCY)
with_avahi=yes
], [
with_avahi=no
])
], [ with_avahi=no ])
if test x$with_avahi != xno; then OSTREE_FEATURES="$OSTREE_FEATURES avahi"; fi
AM_CONDITIONAL(USE_AVAHI, test $with_avahi != no)
dnl This is what is in RHEL7.2 right now, picking it arbitrarily
LIBMOUNT_DEPENDENCY="mount >= 2.23.0"
AC_ARG_WITH(libmount,
AS_HELP_STRING([--without-libmount], [Do not use libmount]),
:, with_libmount=maybe)
AS_IF([ test x$with_libmount != xno ], [
AC_MSG_CHECKING([for $LIBMOUNT_DEPENDENCY])
PKG_CHECK_EXISTS($LIBMOUNT_DEPENDENCY, have_libmount=yes, have_libmount=no)
AC_MSG_RESULT([$have_libmount])
AS_IF([ test x$have_libmount = xno && test x$with_libmount != xmaybe ], [
AC_MSG_ERROR([libmount is enabled but could not be found])
])
AS_IF([ test x$have_libmount = xyes], [
AC_DEFINE([HAVE_LIBMOUNT], 1, [Define if we have libmount.pc])
PKG_CHECK_MODULES(OT_DEP_LIBMOUNT, $LIBMOUNT_DEPENDENCY)
with_libmount=yes
save_LIBS=$LIBS
LIBS=$OT_DEP_LIBMOUNT_LIBS
AC_CHECK_FUNCS(mnt_unref_cache)
LIBS=$save_LIBS
], [
with_libmount=no
])
], [ with_libmount=no ])
if test x$with_libmount != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libmount"; fi
AM_CONDITIONAL(USE_LIBMOUNT, test $with_libmount != no)
# Enabled by default because I think people should use it.
AC_ARG_ENABLE(rofiles-fuse,
[AS_HELP_STRING([--enable-rofiles-fuse],
[generate rofiles-fuse helper [default=yes]])],,
enable_rofiles_fuse=yes)
AS_IF([ test x$enable_rofiles_fuse != xno ], [
PKG_CHECK_MODULES(BUILDOPT_FUSE, $FUSE_DEPENDENCY)
], [enable_rofiles_fuse=no])
AM_CONDITIONAL(BUILDOPT_FUSE, test x$enable_rofiles_fuse = xyes)
AC_ARG_WITH(dracut,
AS_HELP_STRING([--with-dracut],
[Install dracut module (default: no)]),,
[with_dracut=no])
case x$with_dracut in
xno) ;;
xyes) ;;
xyesbutnoconf) ;;
*) AC_MSG_ERROR([Unknown --with-dracut value $with_dracut])
esac
AM_CONDITIONAL(BUILDOPT_DRACUT, test x$with_dracut = xyes || test x$with_dracut = xyesbutnoconf)
AM_CONDITIONAL(BUILDOPT_DRACUT_CONF, test x$with_dracut = xyes)
AC_ARG_WITH(mkinitcpio,
AS_HELP_STRING([--with-mkinitcpio],
[Install mkinitcpio module (default: no)]),,
[with_mkinitcpio=no])
AM_CONDITIONAL(BUILDOPT_MKINITCPIO, test x$with_mkinitcpio = xyes)
dnl We have separate checks for libsystemd and the unit dir for historical reasons
AC_ARG_WITH(libsystemd,
AS_HELP_STRING([--without-libsystemd], [Do not use libsystemd]),
:, with_libsystemd=maybe)
AS_IF([ test x$with_libsystemd != xno ], [
AC_MSG_CHECKING([for libsystemd])
PKG_CHECK_EXISTS(libsystemd, have_libsystemd=yes, have_libsystemd=no)
AC_MSG_RESULT([$have_libsystemd])
AS_IF([ test x$have_libsystemd = xno && test x$with_libsystemd != xmaybe ], [
AC_MSG_ERROR([libsystemd is enabled but could not be found])
])
AS_IF([ test x$have_libsystemd = xyes], [
AC_DEFINE([HAVE_LIBSYSTEMD], 1, [Define if we have libsystemd.pc])
PKG_CHECK_MODULES([LIBSYSTEMD], [libsystemd])
with_libsystemd=yes
], [
with_libsystemd=no
])
], [ with_libsystemd=no ])
AS_IF([test "x$with_libsystemd" = "xyes"], [
AC_ARG_WITH([systemdsystemunitdir],
AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [Directory for systemd service files]),
[],
[with_systemdsystemunitdir=$($PKG_CONFIG --variable=systemdsystemunitdir systemd)])
AS_IF([test "x$with_systemdsystemunitdir" != "xno"], [
AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])
])
AC_ARG_WITH([systemdsystemgeneratordir],
AS_HELP_STRING([--with-systemdsystemgeneratordir=DIR], [Directory for systemd generators]),
[],
[with_systemdsystemgeneratordir=$($PKG_CONFIG --variable=systemdsystemgeneratordir systemd)])
AS_IF([test "x$with_systemdsystemgeneratordir" != "xno"], [
AC_SUBST([systemdsystemgeneratordir], [$with_systemdsystemgeneratordir])
])
])
AM_CONDITIONAL(BUILDOPT_SYSTEMD, test x$with_libsystemd = xyes)
dnl If we have both, we use the "new /var" model with ostree-system-generator
AM_CONDITIONAL(BUILDOPT_SYSTEMD_AND_LIBMOUNT,[test x$with_libsystemd = xyes && test x$with_libmount = xyes])
AM_COND_IF(BUILDOPT_SYSTEMD_AND_LIBMOUNT,
AC_DEFINE([BUILDOPT_LIBSYSTEMD_AND_LIBMOUNT], 1, [Define if systemd and libmount]))
if test x$with_libsystemd = xyes; then OSTREE_FEATURES="$OSTREE_FEATURES systemd"; fi
AC_ARG_WITH(builtin-grub2-mkconfig,
AS_HELP_STRING([--with-builtin-grub2-mkconfig],
[Use a builtin minimal grub2-mkconfig to generate a GRUB2 configuration file (default: no)]),,
[with_builtin_grub2_mkconfig=no])
AM_CONDITIONAL(BUILDOPT_BUILTIN_GRUB2_MKCONFIG, test x$with_builtin_grub2_mkconfig = xyes)
AM_COND_IF(BUILDOPT_BUILTIN_GRUB2_MKCONFIG,
AC_DEFINE([USE_BUILTIN_GRUB2_MKCONFIG], 1, [Define if using internal ostree-grub-generator]))
AC_ARG_WITH(grub2-mkconfig-path,
AS_HELP_STRING([--with-grub2-mkconfig-path],
[Path to grub2-mkconfig]))
AS_IF([test x$with_grub2_mkconfig_path = x], [
dnl Otherwise, look for the path to the system generator. On some
dnl distributions GRUB2 *-mkconfig executable has 'grub2' prefix and
dnl on some 'grub'. We default to grub2-mkconfig.
AC_CHECK_PROGS(GRUB2_MKCONFIG, [grub2-mkconfig grub-mkconfig], [grub2-mkconfig])
],[GRUB2_MKCONFIG=$with_grub2_mkconfig_path])
AC_DEFINE_UNQUOTED([GRUB2_MKCONFIG_PATH], ["$GRUB2_MKCONFIG"], [The system grub2-mkconfig executable name])
AC_ARG_WITH(static-compiler,
AS_HELP_STRING([--with-static-compiler],
[Use the given compiler to build ostree-prepare-root statically linked (default: no)]),,
[with_static_compiler=no])
AM_CONDITIONAL(BUILDOPT_USE_STATIC_COMPILER, test "x$with_static_compiler" != xno)
AC_SUBST(STATIC_COMPILER, $with_static_compiler)
dnl for tests (but we can't use asan with gjs or any introspection,
dnl see https://github.com/google/sanitizers/wiki/AddressSanitizerAsDso for more info)
AS_IF([test "x$found_introspection" = xyes && test x$using_asan != xyes], [
AC_PATH_PROG(GJS, [gjs])
if test -n "$GJS"; then
have_gjs=yes
else
have_gjs=no
fi
], [have_gjs=no])
AM_CONDITIONAL(BUILDOPT_GJS, test x$have_gjs = xyes)
# Do we enable building experimental (non-stable) API?
# The OSTREE_ENABLE_EXPERIMENTAL_API #define is used internally and in public
# headers, so any consumer of libostree who wants to use experimental API must
# #define OSTREE_ENABLE_EXPERIMENTAL_API 1
# before including libostree headers. This means the name in the AC_DEFINE below
# is public API.
AC_ARG_ENABLE([experimental-api],
[AS_HELP_STRING([--enable-experimental-api],
[Enable unstable experimental API in libostree [default=no]])],,
[enable_experimental_api=no])
AS_IF([test x$enable_experimental_api = xyes],
[AC_DEFINE([OSTREE_ENABLE_EXPERIMENTAL_API],[1],[Define if experimental API should be enabled])
OSTREE_FEATURES="$OSTREE_FEATURES experimental"]
)
AM_CONDITIONAL([ENABLE_EXPERIMENTAL_API],[test x$enable_experimental_api = xyes])
AM_CONDITIONAL([BUILDOPT_IS_DEVEL_BUILD],[test x$is_release_build != xyes])
AM_COND_IF([BUILDOPT_IS_DEVEL_BUILD],
AC_DEFINE([BUILDOPT_IS_DEVEL_BUILD], [1], [Define if doing a development build])
release_build_type=devel,
release_build_type=release)
OSTREE_FEATURES="$OSTREE_FEATURES $release_build_type"
# P2P API is public in OSTree >= 2018.6
OSTREE_FEATURES="$OSTREE_FEATURES p2p"
# Strip leading whitespace
OSTREE_FEATURES=$(echo ${OSTREE_FEATURES})
AC_CONFIG_FILES([
Makefile
apidoc/Makefile
src/libostree/ostree-1.pc
src/libostree/ostree-version.h
])
AC_OUTPUT
echo "
libostree $VERSION ($release_build_type)
features: $OSTREE_FEATURES
===============
introspection: $found_introspection
Rust (internal oxidation): $rust_debug_release
rofiles-fuse: $enable_rofiles_fuse
HTTP backend: $fetcher_backend
\"ostree trivial-httpd\": $enable_trivial_httpd_cmdline
SELinux: $with_selinux
fs-verity: $ac_cv_header_linux_fsverity_h
cryptographic checksums: $with_crypto
systemd: $with_libsystemd
libmount: $with_libmount
libarchive (parse tar files directly): $with_libarchive
static deltas: yes (always enabled now)
O_TMPFILE: $enable_otmpfile
wrpseudo-compat: $enable_wrpseudo_compat
man pages (xsltproc): $enable_man
api docs (gtk-doc): $enable_gtk_doc
installed tests: $enable_installed_tests
gjs-based tests: $have_gjs
dracut: $with_dracut
mkinitcpio: $with_mkinitcpio
Static compiler for ostree-prepare-root: $with_static_compiler
Experimental API $enable_experimental_api"
AS_IF([test x$with_builtin_grub2_mkconfig = xyes], [
echo " builtin grub2-mkconfig (instead of system): $with_builtin_grub2_mkconfig"
], [
echo " grub2-mkconfig path: $GRUB2_MKCONFIG"
])
echo ""
| 1 | 17,879 | ITYM to flip this one... | ostreedev-ostree | c |
@@ -7,7 +7,9 @@ module.exports = {
},
extends: [
'airbnb',
- 'plugin:@typescript-eslint/recommended',
+ "eslint:recommended",
+ "plugin:@typescript-eslint/eslint-recommended",
+ "plugin:@typescript-eslint/recommended",
'prettier',
'prettier/@typescript-eslint',
'plugin:prettier/recommended', | 1 | module.exports = {
ignorePatterns: ['commitlint.config.js', 'jest.config.js'],
env: {
browser: true,
es6: true,
'jest/globals': true,
},
extends: [
'airbnb',
'plugin:@typescript-eslint/recommended',
'prettier',
'prettier/@typescript-eslint',
'plugin:prettier/recommended',
'eslint-config-prettier',
],
globals: {
Atomics: 'readonly',
SharedArrayBuffer: 'readonly',
},
parser: '@typescript-eslint/parser',
parserOptions: {
project: './tsconfig.json',
tsconfigRootDir: './',
},
settings: {
'import/resolver': {
node: {
extensions: ['.js', '.jsx', '.ts', '.tsx'],
},
},
},
plugins: ['react', '@typescript-eslint', 'prettier', 'jest'],
rules: {
'prettier/prettier': 'error',
'@typescript-eslint/member-delimiter-style': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-unused-vars': 'off',
'@typescript-eslint/unified-signatures': 'error',
'@typescript-eslint/no-inferrable-types': ['error', { ignoreParameters: true }],
'react/jsx-filename-extension': ['error', { extensions: ['.tsx'] }],
'react/jsx-one-expression-per-line': 'off',
'react/jsx-wrap-multilines': 'off',
'react/jsx-props-no-spreading': 'off',
'arrow-body-style': ['warn', 'as-needed'],
'no-param-reassign': ['error', { props: false }],
'import/prefer-default-export': 'off',
'no-console': 'off',
'eol-last': ['error', 'always'],
'no-debugger': 'error',
'no-nested-ternary': 'off',
'import/no-unresolved': 'off',
'import/extensions': ['error', 'never'],
curly: ['error', 'all'],
},
}
| 1 | 15,120 | What's the difference between typescript-eslint/eslint-recommended and typescript-eslint/recommended? I really can't get it. | HospitalRun-hospitalrun-frontend | js |
@@ -223,6 +223,14 @@ type Config struct {
// If the timeout is exceeded, the connection is closed.
// If this value is zero, the timeout is set to 30 seconds.
IdleTimeout time.Duration
+ // AttackTimeout is the maximum duration that may pass after a suspicious packet is
+ // received without any incoming network activity, used for experimental mitigation
+ // against injection attacks. If the timeout is exceeded, the connection is closed.
+ // This timeout should be longer than any reasonable round trip time, but excessively
+ // long timeouts will delay reporting of rejected connections.
+ // If this value is zero, the default mitigation behavior will be used (currently off).
+ // If this value is negative, mitigation will be disabled.
+ AttackTimeout time.Duration
// AcceptToken determines if a Token is accepted.
// It is called with token = nil if the client didn't send a token.
// If not set, a default verification function is used: | 1 | package quic
import (
"context"
"crypto/tls"
"io"
"net"
"time"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/quictrace"
)
// The StreamID is the ID of a QUIC stream.
type StreamID = protocol.StreamID
// A VersionNumber is a QUIC version number.
type VersionNumber = protocol.VersionNumber
// A Token can be used to verify the ownership of the client address.
type Token struct {
// IsRetryToken encodes how the client received the token. There are two ways:
// * In a Retry packet sent when trying to establish a new connection.
// * In a NEW_TOKEN frame on a previous connection.
IsRetryToken bool
RemoteAddr string
SentTime time.Time
}
// A ClientToken is a token received by the client.
// It can be used to skip address validation on future connection attempts.
type ClientToken struct {
data []byte
}
type TokenStore interface {
// Pop searches for a ClientToken associated with the given key.
// Since tokens are not supposed to be reused, it must remove the token from the cache.
// It returns nil when no token is found.
Pop(key string) (token *ClientToken)
// Put adds a token to the cache with the given key. It might get called
// multiple times in a connection.
Put(key string, token *ClientToken)
}
// An ErrorCode is an application-defined error code.
// Valid values range between 0 and MAX_UINT62.
type ErrorCode = protocol.ApplicationErrorCode
// Stream is the interface implemented by QUIC streams
type Stream interface {
// StreamID returns the stream ID.
StreamID() StreamID
// Read reads data from the stream.
// Read can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
// If the stream was canceled by the peer, the error implements the StreamError
// interface, and Canceled() == true.
// If the session was closed due to a timeout, the error satisfies
// the net.Error interface, and Timeout() will be true.
io.Reader
// Write writes data to the stream.
// Write can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetWriteDeadline.
// If the stream was canceled by the peer, the error implements the StreamError
// interface, and Canceled() == true.
// If the session was closed due to a timeout, the error satisfies
// the net.Error interface, and Timeout() will be true.
io.Writer
// Close closes the write-direction of the stream.
// Future calls to Write are not permitted after calling Close.
// It must not be called concurrently with Write.
// It must not be called after calling CancelWrite.
io.Closer
// CancelWrite aborts sending on this stream.
// Data already written, but not yet delivered to the peer is not guaranteed to be delivered reliably.
// Write will unblock immediately, and future calls to Write will fail.
// When called multiple times or after closing the stream it is a no-op.
CancelWrite(ErrorCode)
// CancelRead aborts receiving on this stream.
// It will ask the peer to stop transmitting stream data.
// Read will unblock immediately, and future Read calls will fail.
// When called multiple times or after reading the io.EOF it is a no-op.
CancelRead(ErrorCode)
// The context is canceled as soon as the write-side of the stream is closed.
// This happens when Close() or CancelWrite() is called, or when the peer
// cancels the read-side of their stream.
// Warning: This API should not be considered stable and might change soon.
Context() context.Context
// SetReadDeadline sets the deadline for future Read calls and
// any currently-blocked Read call.
// A zero value for t means Read will not time out.
SetReadDeadline(t time.Time) error
// SetWriteDeadline sets the deadline for future Write calls
// and any currently-blocked Write call.
// Even if write times out, it may return n > 0, indicating that
// some of the data was successfully written.
// A zero value for t means Write will not time out.
SetWriteDeadline(t time.Time) error
// SetDeadline sets the read and write deadlines associated
// with the connection. It is equivalent to calling both
// SetReadDeadline and SetWriteDeadline.
SetDeadline(t time.Time) error
}
// A ReceiveStream is a unidirectional Receive Stream.
type ReceiveStream interface {
// see Stream.StreamID
StreamID() StreamID
// see Stream.Read
io.Reader
// see Stream.CancelRead
CancelRead(ErrorCode)
// see Stream.SetReadDealine
SetReadDeadline(t time.Time) error
}
// A SendStream is a unidirectional Send Stream.
type SendStream interface {
// see Stream.StreamID
StreamID() StreamID
// see Stream.Write
io.Writer
// see Stream.Close
io.Closer
// see Stream.CancelWrite
CancelWrite(ErrorCode)
// see Stream.Context
Context() context.Context
// see Stream.SetWriteDeadline
SetWriteDeadline(t time.Time) error
}
// StreamError is returned by Read and Write when the peer cancels the stream.
type StreamError interface {
error
Canceled() bool
ErrorCode() ErrorCode
}
// A Session is a QUIC connection between two peers.
type Session interface {
// AcceptStream returns the next stream opened by the peer, blocking until one is available.
// If the session was closed due to a timeout, the error satisfies
// the net.Error interface, and Timeout() will be true.
AcceptStream(context.Context) (Stream, error)
// AcceptUniStream returns the next unidirectional stream opened by the peer, blocking until one is available.
// If the session was closed due to a timeout, the error satisfies
// the net.Error interface, and Timeout() will be true.
AcceptUniStream(context.Context) (ReceiveStream, error)
// OpenStream opens a new bidirectional QUIC stream.
// There is no signaling to the peer about new streams:
// The peer can only accept the stream after data has been sent on the stream.
// If the error is non-nil, it satisfies the net.Error interface.
// When reaching the peer's stream limit, err.Temporary() will be true.
// If the session was closed due to a timeout, Timeout() will be true.
OpenStream() (Stream, error)
// OpenStreamSync opens a new bidirectional QUIC stream.
// It blocks until a new stream can be opened.
// If the error is non-nil, it satisfies the net.Error interface.
// If the session was closed due to a timeout, Timeout() will be true.
OpenStreamSync(context.Context) (Stream, error)
// OpenUniStream opens a new outgoing unidirectional QUIC stream.
// If the error is non-nil, it satisfies the net.Error interface.
// When reaching the peer's stream limit, Temporary() will be true.
// If the session was closed due to a timeout, Timeout() will be true.
OpenUniStream() (SendStream, error)
// OpenUniStreamSync opens a new outgoing unidirectional QUIC stream.
// It blocks until a new stream can be opened.
// If the error is non-nil, it satisfies the net.Error interface.
// If the session was closed due to a timeout, Timeout() will be true.
OpenUniStreamSync(context.Context) (SendStream, error)
// LocalAddr returns the local address.
LocalAddr() net.Addr
// RemoteAddr returns the address of the peer.
RemoteAddr() net.Addr
// Close the connection.
io.Closer
// Close the connection with an error.
// The error string will be sent to the peer.
CloseWithError(ErrorCode, string) error
// The context is cancelled when the session is closed.
// Warning: This API should not be considered stable and might change soon.
Context() context.Context
// ConnectionState returns basic details about the QUIC connection.
// Warning: This API should not be considered stable and might change soon.
ConnectionState() tls.ConnectionState
}
// An EarlySession is a session that is handshaking.
// Data sent during the handshake is encrypted using the forward secure keys.
// When using client certificates, the client's identity is only verified
// after completion of the handshake.
type EarlySession interface {
Session
// Blocks until the handshake completes (or fails).
// Data sent before completion of the handshake is encrypted with 1-RTT keys.
// Note that the client's identity hasn't been verified yet.
HandshakeComplete() context.Context
}
// Config contains all configuration data needed for a QUIC server or client.
type Config struct {
// The QUIC versions that can be negotiated.
// If not set, it uses all versions available.
// Warning: This API should not be considered stable and will change soon.
Versions []VersionNumber
// The length of the connection ID in bytes.
// It can be 0, or any value between 4 and 18.
// If not set, the interpretation depends on where the Config is used:
// If used for dialing an address, a 0 byte connection ID will be used.
// If used for a server, or dialing on a packet conn, a 4 byte connection ID will be used.
// When dialing on a packet conn, the ConnectionIDLength value must be the same for every Dial call.
ConnectionIDLength int
// HandshakeTimeout is the maximum duration that the cryptographic handshake may take.
// If the timeout is exceeded, the connection is closed.
// If this value is zero, the timeout is set to 10 seconds.
HandshakeTimeout time.Duration
// IdleTimeout is the maximum duration that may pass without any incoming network activity.
// This value only applies after the handshake has completed.
// If the timeout is exceeded, the connection is closed.
// If this value is zero, the timeout is set to 30 seconds.
IdleTimeout time.Duration
// AcceptToken determines if a Token is accepted.
// It is called with token = nil if the client didn't send a token.
// If not set, a default verification function is used:
// * it verifies that the address matches, and
// * if the token is a retry token, that it was issued within the last 5 seconds
// * else, that it was issued within the last 24 hours.
// This option is only valid for the server.
AcceptToken func(clientAddr net.Addr, token *Token) bool
// The TokenStore stores tokens received from the server.
// Tokens are used to skip address validation on future connection attempts.
// The key used to store tokens is the ServerName from the tls.Config, if set
// otherwise the token is associated with the server's IP address.
TokenStore TokenStore
// MaxReceiveStreamFlowControlWindow is the maximum stream-level flow control window for receiving data.
// If this value is zero, it will default to 1 MB for the server and 6 MB for the client.
MaxReceiveStreamFlowControlWindow uint64
// MaxReceiveConnectionFlowControlWindow is the connection-level flow control window for receiving data.
// If this value is zero, it will default to 1.5 MB for the server and 15 MB for the client.
MaxReceiveConnectionFlowControlWindow uint64
// MaxIncomingStreams is the maximum number of concurrent bidirectional streams that a peer is allowed to open.
// If not set, it will default to 100.
// If set to a negative value, it doesn't allow any bidirectional streams.
MaxIncomingStreams int
// MaxIncomingUniStreams is the maximum number of concurrent unidirectional streams that a peer is allowed to open.
// If not set, it will default to 100.
// If set to a negative value, it doesn't allow any unidirectional streams.
MaxIncomingUniStreams int
// The StatelessResetKey is used to generate stateless reset tokens.
// If no key is configured, sending of stateless resets is disabled.
StatelessResetKey []byte
// KeepAlive defines whether this peer will periodically send a packet to keep the connection alive.
KeepAlive bool
// QUIC Event Tracer.
// Warning: Experimental. This API should not be considered stable and will change soon.
QuicTracer quictrace.Tracer
}
// A Listener for incoming QUIC connections
type Listener interface {
// Close the server. All active sessions will be closed.
Close() error
// Addr returns the local network addr that the server is listening on.
Addr() net.Addr
// Accept returns new sessions. It should be called in a loop.
Accept(context.Context) (Session, error)
}
// An EarlyListener listens for incoming QUIC connections,
// and returns them before the handshake completes.
type EarlyListener interface {
// Close the server. All active sessions will be closed.
Close() error
// Addr returns the local network addr that the server is listening on.
Addr() net.Addr
// Accept returns new early sessions. It should be called in a loop.
Accept(context.Context) (EarlySession, error)
}
| 1 | 8,407 | Thinking: Maybe it would be more useful to define this in terms of RTTs. That would also make it easier to switch this to a bool, since we could then pick a reasonable default value. Unless of course an attacker could influence our RTT estimate. Is that the case? | lucas-clemente-quic-go | go |
@@ -1,4 +1,3 @@
-// Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package packagemanagement
import (
"context"
"fmt"
"log"
"os/exec"
"path"
"regexp"
"sync"
"time"
"github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/compute"
"github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/junitxml"
"github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/utils"
"github.com/kylelemons/godebug/pretty"
api "google.golang.org/api/compute/v1"
osconfigpb "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/_internal/gapi-cloud-osconfig-go/google.golang.org/genproto/googleapis/cloud/osconfig/v1alpha1"
daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
osconfigserver "github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/osconfig_server"
)
const testSuiteName = "PackageManagementTests"
// TODO: Should these be configurable via flags?
const testProject = "compute-image-test-pool-001"
const testZone = "us-central1-c"
var (
// dpkg-query
dpkgquery = "/usr/bin/dpkg-query"
dpkgqueryArgs = []string{"-W"}
serviceAccountEmail = "[email protected]"
serviceAccountScopes = []string{
"https://www.googleapis.com/auth/cloud-platform",
}
writeToSerialConsole = " | sudo tee /dev/ttyS0"
dump = &pretty.Config{IncludeUnexported: true}
)
type packageManagementTestSetup struct {
image string
name string
packageType []string
shortName string
startup *api.MetadataItems
}
// TestSuite is a PackageManagementTests test suite.
func TestSuite(ctx context.Context, tswg *sync.WaitGroup, testSuites chan *junitxml.TestSuite, logger *log.Logger, testSuiteRegex, testCaseRegex *regexp.Regexp) {
defer tswg.Done()
if testSuiteRegex != nil && !testSuiteRegex.MatchString(testSuiteName) {
return
}
testSuite := junitxml.NewTestSuite(testSuiteName)
defer testSuite.Finish(testSuites)
logger.Printf("Running TestSuite %q", testSuite.Name)
testSetup := []*packageManagementTestSetup{
// Debian images.
&packageManagementTestSetup{
image: "projects/debian-cloud/global/images/family/debian-9",
packageType: []string{"deb"},
shortName: "debian",
startup: &api.MetadataItems{
Key: "startup-script",
Value: &utils.InstallOSConfigDeb,
},
},
}
var wg sync.WaitGroup
tests := make(chan *junitxml.TestCase)
for _, setup := range testSetup {
wg.Add(1)
go packageManagementTestCase(ctx, setup, tests, &wg, logger, testCaseRegex)
}
go func() {
wg.Wait()
close(tests)
}()
for ret := range tests {
testSuite.TestCase = append(testSuite.TestCase, ret)
}
logger.Printf("Finished TestSuite %q", testSuite.Name)
}
func runCreateOsConfigTest(ctx context.Context, testCase *junitxml.TestCase, testSetup *packageManagementTestSetup, logger *log.Logger) {
oc := &osconfigserver.OsConfig{
&osconfigpb.OsConfig{
Name: "createosconfig-test-osconfig",
},
}
logger.Printf("create osconfig request:\n%s\n\n", dump.Sprint(oc))
parent := fmt.Sprintf("projects/%s", testProject)
res, err := osconfigserver.CreateOsConfig(ctx, logger, oc, parent)
defer cleanuposconfig(ctx, testCase, logger, oc)
if err != nil {
testCase.WriteFailure("error while creating osconfig:\n%s\n\n", err)
}
logger.Printf("CreateOsConfig response:\n%s\n\n", dump.Sprint(res))
}
func runPackageInstallTest(ctx context.Context, testCase *junitxml.TestCase, testSetup *packageManagementTestSetup, logger *log.Logger) {
osConfig, err := osconfigserver.JsonToOsConfig(packageInstallTestOsConfigString, logger)
if err != nil {
testCase.WriteFailure("error while converting json to osconfig: \n%s\n", err)
return
}
oc := &osconfigserver.OsConfig{OsConfig: osConfig}
parent := fmt.Sprintf("projects/%s", testProject)
osconfigserver.CreateOsConfig(ctx, logger, oc, parent)
if err != nil {
testCase.WriteFailure("error while creating osconfig: \n%s\n", err)
return
}
defer cleanuposconfig(ctx, testCase, logger, oc)
assignment, err := osconfigserver.JsonToAssignment(packageInstallTestAssignmentString, logger)
if err != nil {
testCase.WriteFailure("error while converting json to assignment: \n%s\n", err)
return
}
assign := &osconfigserver.Assignment{Assignment: assignment}
res, err := osconfigserver.CreateAssignment(ctx, logger, assign, parent)
if err != nil {
testCase.WriteFailure("error while creating assignment: \n%s\n", err)
}
_ = res
defer cleanupassignment(ctx, testCase, logger, assign)
client, err := daisyCompute.NewClient(ctx)
if err != nil {
testCase.WriteFailure("error creating client: %v", err)
return
}
testCase.Logf("Creating instance with image %q", testSetup.image)
testSetup.name = fmt.Sprintf("osconfig-test-%s-%s", path.Base(testSetup.image), "packageinstalltest")
i := &api.Instance{
Name: testSetup.name,
MachineType: fmt.Sprintf("projects/%s/zones/%s/machineTypes/n1-standard-1", testProject, testZone),
NetworkInterfaces: []*api.NetworkInterface{
&api.NetworkInterface{
Network: "global/networks/default",
AccessConfigs: []*api.AccessConfig{
&api.AccessConfig{
Type: "ONE_TO_ONE_NAT",
},
},
},
},
Metadata: &api.Metadata{
Items: []*api.MetadataItems{
testSetup.startup,
},
},
Disks: []*api.AttachedDisk{
&api.AttachedDisk{
AutoDelete: true,
Boot: true,
InitializeParams: &api.AttachedDiskInitializeParams{
SourceImage: testSetup.image,
},
},
},
ServiceAccounts: []*api.ServiceAccount{
&api.ServiceAccount{
Email: serviceAccountEmail,
Scopes: serviceAccountScopes,
},
},
}
inst, err := compute.CreateInstance(client, testProject, testZone, i)
if err != nil {
testCase.WriteFailure("Error creating instance: %v", err)
return
}
defer inst.Cleanup()
testCase.Logf("Waiting for agent install to complete")
if err := inst.WaitForSerialOutput("osconfig install done", 1, 5*time.Second, 5*time.Minute); err != nil {
testCase.WriteFailure("Error waiting for osconfig agent install: %v", err)
return
}
testCase.Logf("Agent installed successfully")
// allow agent to make the lookupconfig call and install the package
time.Sleep(1 * time.Minute)
// TODO refactor to remove hardcoding of package name
listPkgCmd := getDebListCmd()
sshCmd := getGcloudSshCmd(testZone, testSetup.name, listPkgCmd, logger)
out, err := run(sshCmd, logger)
if err != nil {
testCase.WriteFailure("Error running verification command: %v", err)
return
}
_ = out
if err = inst.WaitForSerialOutput("cowsay", 1, 5*time.Second, 5*time.Minute); err != nil {
testCase.WriteFailure("Error waiting for assertion: %v", err)
return
}
}
func getGcloudSshCmd(zone string, instance string, pkgManagerCommand string, logger *log.Logger) *exec.Cmd {
return exec.Command("gcloud", []string{"compute", "ssh", "--zone", fmt.Sprintf("%s", zone), instance, "--command", fmt.Sprintf("%s %s\n", pkgManagerCommand, writeToSerialConsole)}...)
}
//TODO move this to common library
func run(cmd *exec.Cmd, logger *log.Logger) ([]byte, error) {
logger.Printf("Running %q with args %q\n", cmd.Path, cmd.Args[1:])
out, err := cmd.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("error running %q with args %q: %v, stdout: %s", cmd.Path, cmd.Args, err, out)
}
return out, nil
}
func packageManagementTestCase(ctx context.Context, testSetup *packageManagementTestSetup, tests chan *junitxml.TestCase, wg *sync.WaitGroup, logger *log.Logger, regex *regexp.Regexp) {
defer wg.Done()
createOsConfigTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[CreateOsConfig] Create OsConfig"))
packageInstallTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[PackageInstall] Pacakge installation"))
for tc, f := range map[*junitxml.TestCase]func(context.Context, *junitxml.TestCase, *packageManagementTestSetup, *log.Logger){
createOsConfigTest: runCreateOsConfigTest,
packageInstallTest: runPackageInstallTest,
} {
if tc.FilterTestCase(regex) {
tc.Finish(tests)
} else {
logger.Printf("Running TestCase %s.%q", tc.Classname, tc.Name)
f(ctx, tc, testSetup, logger)
tc.Finish(tests)
logger.Printf("TestCase %s.%q finished in %fs", tc.Classname, tc.Name, tc.Time)
}
}
}
func cleanuposconfig(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, oc *osconfigserver.OsConfig) {
err := oc.Cleanup(ctx, logger)
if err != nil {
testCase.WriteFailure(fmt.Sprintf("error while deleting osconfig: %s", err))
}
}
func cleanupassignment(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, assignment *osconfigserver.Assignment) {
err := assignment.Cleanup(ctx, logger)
if err != nil {
testCase.WriteFailure(fmt.Sprintf("error while deleting assignment: %s", err))
}
}
func getDebListCmd() string {
cmd := dpkgquery
for _, arg := range dpkgqueryArgs {
cmd = fmt.Sprintf("%s %s", cmd, arg)
}
return cmd
}
| 1 | 8,235 | Why is this line deleted? | GoogleCloudPlatform-compute-image-tools | go |
@@ -115,9 +115,9 @@ void nano::active_transactions::request_confirm (std::unique_lock<std::mutex> &
std::deque<std::shared_ptr<nano::block>> rebroadcast_bundle;
std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> confirm_req_bundle;
- // Confirm frontiers when there aren't many confirmations already pending
+ // Confirm frontiers when there aren't many confirmations already pending and node finished initial boostrap
lock_a.unlock ();
- if (node.pending_confirmation_height.size () < confirmed_frontiers_max_pending_cut_off)
+ if (node.pending_confirmation_height.size () < confirmed_frontiers_max_pending_cut_off && node.store.block_count (transaction).sum () >= node.ledger.bootstrap_weight_max_blocks)
{
confirm_frontiers (transaction);
} | 1 | #include <nano/node/active_transactions.hpp>
#include <nano/node/node.hpp>
#include <boost/pool/pool_alloc.hpp>
#include <numeric>
size_t constexpr nano::active_transactions::max_broadcast_queue;
using namespace std::chrono;
nano::active_transactions::active_transactions (nano::node & node_a) :
node (node_a),
multipliers_cb (20, 1.),
trended_active_difficulty (node.network_params.network.publish_threshold),
next_frontier_check (steady_clock::now () + (node_a.flags.delay_frontier_confirmation_height_updating ? 60s : 0s)),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::request_loop);
request_loop ();
})
{
std::unique_lock<std::mutex> lock (mutex);
while (!started)
{
condition.wait (lock);
}
}
nano::active_transactions::~active_transactions ()
{
stop ();
}
void nano::active_transactions::confirm_frontiers (nano::transaction const & transaction_a)
{
// Limit maximum count of elections to start
bool representative (node.config.enable_voting && node.wallets.reps_count > 0);
/* Check less frequently for non-representative nodes */
auto representative_factor = representative ? 3min : 15min;
// Decrease check time for test network
auto is_test_network = node.network_params.network.is_test_network ();
int test_network_factor = is_test_network ? 1000 : 1;
auto roots_size = size ();
auto max_elections = (max_broadcast_queue / 4);
std::unique_lock<std::mutex> lk (mutex);
auto check_time_exceeded = std::chrono::steady_clock::now () >= next_frontier_check;
lk.unlock ();
auto low_active_elections = roots_size < max_elections;
// To minimise dropping real-time transactions, set the maximum number of elections
// for cementing frontiers to half the total active election maximum.
const auto max_active = node.config.active_elections_size / 2;
if (roots_size <= max_active && (check_time_exceeded || (!is_test_network && low_active_elections)))
{
// When the number of active elections is low increase max number of elections for setting confirmation height.
if (max_active > roots_size + max_elections)
{
max_elections = max_active - roots_size;
}
// Spend time prioritizing accounts to reduce voting traffic
auto time_spent_prioritizing_ledger_accounts = (frontiers_fully_confirmed ? std::chrono::milliseconds (200) : std::chrono::seconds (2));
auto time_spent_prioritizing_wallet_accounts = std::chrono::milliseconds (50);
prioritize_frontiers_for_confirmation (transaction_a, is_test_network ? std::chrono::milliseconds (50) : time_spent_prioritizing_ledger_accounts, time_spent_prioritizing_wallet_accounts);
size_t elections_count (0);
lk.lock ();
auto start_elections_for_prioritized_frontiers = [&transaction_a, &elections_count, max_elections, &lk, &representative, this](prioritize_num_uncemented & cementable_frontiers) {
while (!cementable_frontiers.empty () && !this->stopped && elections_count < max_elections)
{
auto cementable_account_front_it = cementable_frontiers.get<1> ().begin ();
auto cementable_account = *cementable_account_front_it;
cementable_frontiers.get<1> ().erase (cementable_account_front_it);
lk.unlock ();
nano::account_info info;
auto error = node.store.account_get (transaction_a, cementable_account.account, info);
release_assert (!error);
uint64_t confirmation_height;
error = node.store.confirmation_height_get (transaction_a, cementable_account.account, confirmation_height);
release_assert (!error);
if (info.block_count > confirmation_height && !this->node.pending_confirmation_height.is_processing_block (info.head))
{
auto block (this->node.store.block_get (transaction_a, info.head));
if (!this->start (block))
{
++elections_count;
// Calculate votes for local representatives
if (representative)
{
this->node.block_processor.generator.add (block->hash ());
}
}
}
lk.lock ();
}
};
start_elections_for_prioritized_frontiers (priority_cementable_frontiers);
start_elections_for_prioritized_frontiers (priority_wallet_cementable_frontiers);
frontiers_fully_confirmed = (elections_count < max_elections);
// 4 times slower check if all frontiers were confirmed
auto fully_confirmed_factor = frontiers_fully_confirmed ? 4 : 1;
// Calculate next check time
next_frontier_check = steady_clock::now () + (representative_factor * fully_confirmed_factor / test_network_factor);
}
}
void nano::active_transactions::request_confirm (std::unique_lock<std::mutex> & lock_a)
{
std::unordered_set<nano::qualified_root> inactive;
auto transaction (node.store.tx_begin_read ());
unsigned unconfirmed_count (0);
unsigned unconfirmed_request_count (0);
unsigned could_fit_delay = node.network_params.network.is_test_network () ? high_confirmation_request_count - 1 : 1;
std::unordered_map<std::shared_ptr<nano::transport::channel>, std::vector<std::pair<nano::block_hash, nano::block_hash>>> requests_bundle;
std::deque<std::shared_ptr<nano::block>> rebroadcast_bundle;
std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> confirm_req_bundle;
// Confirm frontiers when there aren't many confirmations already pending
lock_a.unlock ();
if (node.pending_confirmation_height.size () < confirmed_frontiers_max_pending_cut_off)
{
confirm_frontiers (transaction);
}
lock_a.lock ();
auto roots_size (roots.size ());
for (auto i (roots.get<1> ().begin ()), n (roots.get<1> ().end ()); i != n; ++i)
{
auto root (i->root);
auto election_l (i->election);
if ((election_l->confirmed || election_l->stopped) && election_l->confirmation_request_count >= minimum_confirmation_request_count - 1)
{
if (election_l->confirmed)
{
add_confirmed (election_l->status, root);
}
inactive.insert (root);
}
else
{
if (election_l->confirmation_request_count > high_confirmation_request_count)
{
++unconfirmed_count;
unconfirmed_request_count += election_l->confirmation_request_count;
// Log votes for very long unconfirmed elections
if (election_l->confirmation_request_count % 50 == 1)
{
auto tally_l (election_l->tally (transaction));
election_l->log_votes (tally_l);
}
/* Escalation for long unconfirmed elections
Start new elections for previous block & source
if there are less than 100 active elections */
if (election_l->confirmation_request_count % high_confirmation_request_count == 1 && roots_size < 100 && !node.network_params.network.is_test_network ())
{
bool escalated (false);
std::shared_ptr<nano::block> previous;
auto previous_hash (election_l->status.winner->previous ());
if (!previous_hash.is_zero ())
{
previous = node.store.block_get (transaction, previous_hash);
if (previous != nullptr && blocks.find (previous_hash) == blocks.end () && !node.block_confirmed_or_being_confirmed (transaction, previous_hash))
{
add (std::move (previous));
escalated = true;
}
}
/* If previous block not existing/not commited yet, block_source can cause segfault for state blocks
So source check can be done only if previous != nullptr or previous is 0 (open account) */
if (previous_hash.is_zero () || previous != nullptr)
{
auto source_hash (node.ledger.block_source (transaction, *election_l->status.winner));
if (!source_hash.is_zero () && source_hash != previous_hash && blocks.find (source_hash) == blocks.end ())
{
auto source (node.store.block_get (transaction, source_hash));
if (source != nullptr && !node.block_confirmed_or_being_confirmed (transaction, source_hash))
{
add (std::move (source));
escalated = true;
}
}
}
if (escalated)
{
election_l->update_dependent ();
}
}
}
if (election_l->confirmation_request_count < high_confirmation_request_count || election_l->confirmation_request_count % high_confirmation_request_count == could_fit_delay)
{
if (node.ledger.could_fit (transaction, *election_l->status.winner))
{
// Broadcast winner
if (rebroadcast_bundle.size () < max_broadcast_queue)
{
rebroadcast_bundle.push_back (election_l->status.winner);
}
}
else
{
if (election_l->confirmation_request_count != 0)
{
election_l->stop ();
inactive.insert (root);
}
}
}
auto rep_channels (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ());
auto reps (node.rep_crawler.representatives (std::numeric_limits<size_t>::max ()));
// Add all rep endpoints that haven't already voted. We use a set since multiple
// reps may exist on an endpoint.
std::unordered_set<std::shared_ptr<nano::transport::channel>> channels;
for (auto & rep : reps)
{
if (election_l->last_votes.find (rep.account) == election_l->last_votes.end ())
{
channels.insert (rep.channel);
if (node.config.logging.vote_logging ())
{
node.logger.try_log ("Representative did not respond to confirm_req, retrying: ", rep.account.to_account ());
}
}
}
rep_channels->insert (rep_channels->end (), channels.begin (), channels.end ());
bool low_reps_weight (rep_channels->empty () || node.rep_crawler.total_weight () < node.config.online_weight_minimum.number ());
if (low_reps_weight && roots_size <= 5 && !node.network_params.network.is_test_network ())
{
// Spam mode
auto deque_l (node.network.udp_channels.random_set (100));
auto vec (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ());
for (auto i : deque_l)
{
vec->push_back (i);
}
confirm_req_bundle.push_back (std::make_pair (election_l->status.winner, vec));
}
else
{
auto single_confirm_req_channels (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ());
for (auto & rep : *rep_channels)
{
if (rep->get_network_version () >= nano::tcp_realtime_protocol_version_min)
{
// Send batch request to peers supporting confirm_req by hash + root
auto rep_request (requests_bundle.find (rep));
auto block (election_l->status.winner);
auto root_hash (std::make_pair (block->hash (), block->root ()));
if (rep_request == requests_bundle.end ())
{
if (requests_bundle.size () < max_broadcast_queue)
{
std::vector<std::pair<nano::block_hash, nano::block_hash>> insert_vector = { root_hash };
requests_bundle.insert (std::make_pair (rep, insert_vector));
}
}
else if (rep_request->second.size () < max_broadcast_queue * nano::network::confirm_req_hashes_max)
{
rep_request->second.push_back (root_hash);
}
}
else
{
single_confirm_req_channels->push_back (rep);
}
}
// broadcast_confirm_req_base modifies reps, so we clone it once to avoid aliasing
if (confirm_req_bundle.size () < max_broadcast_queue && !single_confirm_req_channels->empty ())
{
confirm_req_bundle.push_back (std::make_pair (election_l->status.winner, single_confirm_req_channels));
}
}
}
++election_l->confirmation_request_count;
}
lock_a.unlock ();
// Rebroadcast unconfirmed blocks
if (!rebroadcast_bundle.empty ())
{
node.network.flood_block_batch (std::move (rebroadcast_bundle));
}
// Batch confirmation request
if (!requests_bundle.empty ())
{
node.network.broadcast_confirm_req_batch (requests_bundle, 50);
}
//confirm_req broadcast
if (!confirm_req_bundle.empty ())
{
node.network.broadcast_confirm_req_batch (confirm_req_bundle);
}
lock_a.lock ();
// Erase inactive elections
for (auto i (inactive.begin ()), n (inactive.end ()); i != n; ++i)
{
auto root_it (roots.find (*i));
if (root_it != roots.end ())
{
root_it->election->clear_blocks ();
root_it->election->clear_dependent ();
roots.erase (root_it);
}
}
long_unconfirmed_size = unconfirmed_count;
if (unconfirmed_count > 0)
{
node.logger.try_log (boost::str (boost::format ("%1% blocks have been unconfirmed averaging %2% confirmation requests") % unconfirmed_count % (unconfirmed_request_count / unconfirmed_count)));
}
}
void nano::active_transactions::request_loop ()
{
std::unique_lock<std::mutex> lock (mutex);
started = true;
lock.unlock ();
condition.notify_all ();
// The wallets and active_transactions objects are mutually dependent, so we need a fully
// constructed node before proceeding.
this->node.node_initialized_latch.wait ();
lock.lock ();
while (!stopped)
{
request_confirm (lock);
update_active_difficulty (lock);
// This prevents unnecessary waiting if stopped is set in-between the above check and now
if (stopped)
{
break;
}
const auto extra_delay (std::min (roots.size (), max_broadcast_queue) * node.network.broadcast_interval_ms * 2);
condition.wait_for (lock, std::chrono::milliseconds (node.network_params.network.request_interval_ms + extra_delay));
}
}
void nano::active_transactions::prioritize_account_for_confirmation (nano::active_transactions::prioritize_num_uncemented & cementable_frontiers_a, size_t & cementable_frontiers_size_a, nano::account const & account_a, nano::account_info const & info_a, uint64_t confirmation_height)
{
if (info_a.block_count > confirmation_height && !node.pending_confirmation_height.is_processing_block (info_a.head))
{
auto num_uncemented = info_a.block_count - confirmation_height;
std::lock_guard<std::mutex> guard (mutex);
auto it = cementable_frontiers_a.find (account_a);
if (it != cementable_frontiers_a.end ())
{
if (it->blocks_uncemented != num_uncemented)
{
// Account already exists and there is now a different uncemented block count so update it in the container
cementable_frontiers_a.modify (it, [num_uncemented](nano::cementable_account & info) {
info.blocks_uncemented = num_uncemented;
});
}
}
else
{
assert (cementable_frontiers_size_a <= max_priority_cementable_frontiers);
if (cementable_frontiers_size_a == max_priority_cementable_frontiers)
{
// The maximum amount of frontiers stored has been reached. Check if the current frontier
// has more uncemented blocks than the lowest uncemented frontier in the collection if so replace it.
auto least_uncemented_frontier_it = cementable_frontiers_a.get<1> ().end ();
--least_uncemented_frontier_it;
if (num_uncemented > least_uncemented_frontier_it->blocks_uncemented)
{
cementable_frontiers_a.get<1> ().erase (least_uncemented_frontier_it);
cementable_frontiers_a.emplace (account_a, num_uncemented);
}
}
else
{
cementable_frontiers_a.emplace (account_a, num_uncemented);
}
}
cementable_frontiers_size_a = cementable_frontiers_a.size ();
}
}
void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::transaction const & transaction_a, std::chrono::milliseconds ledger_accounts_time_a, std::chrono::milliseconds wallet_account_time_a)
{
// Don't try to prioritize when there are a large number of pending confirmation heights as blocks can be cemented in the meantime, making the prioritization less reliable
if (node.pending_confirmation_height.size () < confirmed_frontiers_max_pending_cut_off)
{
size_t priority_cementable_frontiers_size;
size_t priority_wallet_cementable_frontiers_size;
{
std::lock_guard<std::mutex> guard (mutex);
priority_cementable_frontiers_size = priority_cementable_frontiers.size ();
priority_wallet_cementable_frontiers_size = priority_wallet_cementable_frontiers.size ();
}
nano::timer<std::chrono::milliseconds> wallet_account_timer;
wallet_account_timer.start ();
if (!skip_wallets)
{
// Prioritize wallet accounts first
{
std::lock_guard<std::mutex> lock (node.wallets.mutex);
auto wallet_transaction (node.wallets.tx_begin_read ());
auto const & items = node.wallets.items;
for (auto item_it = items.cbegin (); item_it != items.cend (); ++item_it)
{
// Skip this wallet if it has been traversed already while there are others still awaiting
if (wallet_accounts_already_iterated.find (item_it->first) != wallet_accounts_already_iterated.end ())
{
continue;
}
nano::account_info info;
auto & wallet (item_it->second);
std::lock_guard<std::recursive_mutex> wallet_lock (wallet->store.mutex);
auto & next_wallet_frontier_account = next_wallet_frontier_accounts.emplace (item_it->first, wallet_store::special_count).first->second;
auto i (wallet->store.begin (wallet_transaction, next_wallet_frontier_account));
auto n (wallet->store.end ());
uint64_t confirmation_height = 0;
for (; i != n; ++i)
{
auto & account (i->first);
if (!node.store.account_get (transaction_a, account, info) && !node.store.confirmation_height_get (transaction_a, account, confirmation_height))
{
// If it exists in normal priority collection delete from there.
auto it = priority_cementable_frontiers.find (account);
if (it != priority_cementable_frontiers.end ())
{
std::lock_guard<std::mutex> guard (mutex);
priority_cementable_frontiers.erase (it);
priority_cementable_frontiers_size = priority_cementable_frontiers.size ();
}
prioritize_account_for_confirmation (priority_wallet_cementable_frontiers, priority_wallet_cementable_frontiers_size, account, info, confirmation_height);
if (wallet_account_timer.since_start () >= wallet_account_time_a)
{
break;
}
}
next_wallet_frontier_account = account.number () + 1;
}
// Go back to the beginning when we have reached the end of the wallet accounts for this wallet
if (i == n)
{
wallet_accounts_already_iterated.emplace (item_it->first);
next_wallet_frontier_accounts.at (item_it->first) = wallet_store::special_count;
// Skip wallet accounts when they have all been traversed
if (std::next (item_it) == items.cend ())
{
wallet_accounts_already_iterated.clear ();
skip_wallets = true;
}
}
}
}
}
nano::timer<std::chrono::milliseconds> timer;
timer.start ();
auto i (node.store.latest_begin (transaction_a, next_frontier_account));
auto n (node.store.latest_end ());
uint64_t confirmation_height = 0;
for (; i != n && !stopped; ++i)
{
auto const & account (i->first);
auto const & info (i->second);
if (priority_wallet_cementable_frontiers.find (account) == priority_wallet_cementable_frontiers.end ())
{
if (!node.store.confirmation_height_get (transaction_a, account, confirmation_height))
{
prioritize_account_for_confirmation (priority_cementable_frontiers, priority_cementable_frontiers_size, account, info, confirmation_height);
}
}
next_frontier_account = account.number () + 1;
if (timer.since_start () >= ledger_accounts_time_a)
{
break;
}
}
// Go back to the beginning when we have reached the end of the accounts and start with wallet accounts next time
if (i == n)
{
next_frontier_account = 0;
skip_wallets = false;
}
}
}
void nano::active_transactions::stop ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!started)
{
condition.wait (lock);
}
stopped = true;
lock.unlock ();
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
lock.lock ();
roots.clear ();
}
bool nano::active_transactions::start (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
{
std::lock_guard<std::mutex> lock (mutex);
return add (block_a, confirmation_action_a);
}
bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
{
auto error (true);
if (!stopped)
{
auto root (block_a->qualified_root ());
auto existing (roots.find (root));
if (existing == roots.end () && confirmed_set.get<1> ().find (root) == confirmed_set.get<1> ().end ())
{
auto hash (block_a->hash ());
auto election (nano::make_shared<nano::election> (node, block_a, confirmation_action_a));
uint64_t difficulty (0);
error = nano::work_validate (*block_a, &difficulty);
release_assert (!error);
roots.insert (nano::conflict_info{ root, difficulty, difficulty, election });
blocks.insert (std::make_pair (hash, election));
adjust_difficulty (hash);
}
if (roots.size () >= node.config.active_elections_size)
{
flush_lowest ();
}
}
return error;
}
// Validate a vote and apply it to the current election if one exists
bool nano::active_transactions::vote (std::shared_ptr<nano::vote> vote_a, bool single_lock)
{
std::shared_ptr<nano::election> election;
bool replay (false);
bool processed (false);
{
std::unique_lock<std::mutex> lock;
if (!single_lock)
{
lock = std::unique_lock<std::mutex> (mutex);
}
for (auto vote_block : vote_a->blocks)
{
nano::election_vote_result result;
if (vote_block.which ())
{
auto block_hash (boost::get<nano::block_hash> (vote_block));
auto existing (blocks.find (block_hash));
if (existing != blocks.end ())
{
result = existing->second->vote (vote_a->account, vote_a->sequence, block_hash);
}
}
else
{
auto block (boost::get<std::shared_ptr<nano::block>> (vote_block));
auto existing (roots.find (block->qualified_root ()));
if (existing != roots.end ())
{
result = existing->election->vote (vote_a->account, vote_a->sequence, block->hash ());
}
}
replay = replay || result.replay;
processed = processed || result.processed;
}
}
if (processed)
{
node.network.flood_vote (vote_a);
}
return replay;
}
bool nano::active_transactions::active (nano::qualified_root const & root_a)
{
std::lock_guard<std::mutex> lock (mutex);
return roots.find (root_a) != roots.end ();
}
bool nano::active_transactions::active (nano::block const & block_a)
{
return active (block_a.qualified_root ());
}
void nano::active_transactions::update_difficulty (nano::block const & block_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto existing (roots.find (block_a.qualified_root ()));
if (existing != roots.end ())
{
uint64_t difficulty;
auto error (nano::work_validate (block_a, &difficulty));
(void)error;
assert (!error);
if (difficulty > existing->difficulty)
{
roots.modify (existing, [difficulty](nano::conflict_info & info_a) {
info_a.difficulty = difficulty;
});
adjust_difficulty (block_a.hash ());
}
}
}
void nano::active_transactions::adjust_difficulty (nano::block_hash const & hash_a)
{
assert (!mutex.try_lock ());
std::deque<std::pair<nano::block_hash, int64_t>> remaining_blocks;
remaining_blocks.emplace_back (hash_a, 0);
std::unordered_set<nano::block_hash> processed_blocks;
std::vector<std::pair<nano::qualified_root, int64_t>> elections_list;
double sum (0.);
while (!remaining_blocks.empty ())
{
auto const & item (remaining_blocks.front ());
auto hash (item.first);
auto level (item.second);
if (processed_blocks.find (hash) == processed_blocks.end ())
{
auto existing (blocks.find (hash));
if (existing != blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->status.winner->hash () == hash)
{
auto previous (existing->second->status.winner->previous ());
if (!previous.is_zero ())
{
remaining_blocks.emplace_back (previous, level + 1);
}
auto source (existing->second->status.winner->source ());
if (!source.is_zero () && source != previous)
{
remaining_blocks.emplace_back (source, level + 1);
}
auto link (existing->second->status.winner->link ());
if (!link.is_zero () && !node.ledger.is_epoch_link (link) && link != previous)
{
remaining_blocks.emplace_back (link, level + 1);
}
for (auto & dependent_block : existing->second->dependent_blocks)
{
remaining_blocks.emplace_back (dependent_block, level - 1);
}
processed_blocks.insert (hash);
nano::qualified_root root (previous, existing->second->status.winner->root ());
auto existing_root (roots.find (root));
if (existing_root != roots.end ())
{
sum += nano::difficulty::to_multiplier (existing_root->difficulty, node.network_params.network.publish_threshold);
elections_list.emplace_back (root, level);
}
}
}
remaining_blocks.pop_front ();
}
if (!elections_list.empty ())
{
double multiplier = sum / elections_list.size ();
uint64_t average = nano::difficulty::from_multiplier (multiplier, node.network_params.network.publish_threshold);
auto highest_level = elections_list.back ().second;
uint64_t divider = 1;
// Possible overflow check, will not occur for negative levels
if ((multiplier + highest_level) > 10000000000)
{
divider = static_cast<uint64_t> (((multiplier + highest_level) / 10000000000) + 1);
}
// Set adjusted difficulty
for (auto & item : elections_list)
{
auto existing_root (roots.find (item.first));
uint64_t difficulty_a = average + item.second / divider;
roots.modify (existing_root, [difficulty_a](nano::conflict_info & info_a) {
info_a.adjusted_difficulty = difficulty_a;
});
}
}
}
void nano::active_transactions::update_active_difficulty (std::unique_lock<std::mutex> & lock_a)
{
assert (lock_a.mutex () == &mutex && lock_a.owns_lock ());
double multiplier (1.);
if (!roots.empty ())
{
std::vector<uint64_t> active_root_difficulties;
active_root_difficulties.reserve (roots.size ());
auto min_election_time (std::chrono::milliseconds (node.network_params.network.request_interval_ms));
auto cutoff (std::chrono::steady_clock::now () - min_election_time);
for (auto & root : roots)
{
if (!root.election->confirmed && !root.election->stopped && root.election->election_start < cutoff)
{
active_root_difficulties.push_back (root.adjusted_difficulty);
}
}
if (!active_root_difficulties.empty ())
{
multiplier = nano::difficulty::to_multiplier (active_root_difficulties[active_root_difficulties.size () / 2], node.network_params.network.publish_threshold);
}
}
assert (multiplier >= 1);
multipliers_cb.push_front (multiplier);
auto sum (std::accumulate (multipliers_cb.begin (), multipliers_cb.end (), double(0)));
auto difficulty = nano::difficulty::from_multiplier (sum / multipliers_cb.size (), node.network_params.network.publish_threshold);
assert (difficulty >= node.network_params.network.publish_threshold);
trended_active_difficulty = difficulty;
node.observers.difficulty.notify (trended_active_difficulty);
}
uint64_t nano::active_transactions::active_difficulty ()
{
std::lock_guard<std::mutex> lock (mutex);
return trended_active_difficulty;
}
// List of active blocks in elections
std::deque<std::shared_ptr<nano::block>> nano::active_transactions::list_blocks (bool single_lock)
{
std::deque<std::shared_ptr<nano::block>> result;
std::unique_lock<std::mutex> lock;
if (!single_lock)
{
lock = std::unique_lock<std::mutex> (mutex);
}
for (auto i (roots.begin ()), n (roots.end ()); i != n; ++i)
{
result.push_back (i->election->status.winner);
}
return result;
}
std::deque<nano::election_status> nano::active_transactions::list_confirmed ()
{
std::lock_guard<std::mutex> lock (mutex);
return confirmed;
}
void nano::active_transactions::add_confirmed (nano::election_status const & status_a, nano::qualified_root const & root_a)
{
confirmed.push_back (status_a);
auto inserted (confirmed_set.insert (nano::confirmed_set_info{ std::chrono::steady_clock::now (), root_a }));
if (confirmed.size () > node.config.confirmation_history_size)
{
confirmed.pop_front ();
if (inserted.second)
{
confirmed_set.erase (confirmed_set.begin ());
}
}
}
void nano::active_transactions::erase (nano::block const & block_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto root_it (roots.find (block_a.qualified_root ()));
if (root_it != roots.end ())
{
root_it->election->stop ();
root_it->election->clear_blocks ();
root_it->election->clear_dependent ();
roots.erase (root_it);
node.logger.try_log (boost::str (boost::format ("Election erased for block block %1% root %2%") % block_a.hash ().to_string () % block_a.root ().to_string ()));
}
}
void nano::active_transactions::flush_lowest ()
{
size_t count (0);
assert (!roots.empty ());
auto & sorted_roots = roots.get<1> ();
for (auto it = sorted_roots.rbegin (); it != sorted_roots.rend ();)
{
if (count != 2)
{
auto election = it->election;
if (election->confirmation_request_count > high_confirmation_request_count && !election->confirmed && !election->stopped && !node.wallets.watcher.is_watched (it->root))
{
it = decltype (it){ sorted_roots.erase (std::next (it).base ()) };
election->stop ();
election->clear_blocks ();
election->clear_dependent ();
count++;
}
else
{
++it;
}
}
else
{
break;
}
}
}
bool nano::active_transactions::empty ()
{
std::lock_guard<std::mutex> lock (mutex);
return roots.empty ();
}
size_t nano::active_transactions::size ()
{
std::lock_guard<std::mutex> lock (mutex);
return roots.size ();
}
bool nano::active_transactions::publish (std::shared_ptr<nano::block> block_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto existing (roots.find (block_a->qualified_root ()));
auto result (true);
if (existing != roots.end ())
{
auto election (existing->election);
result = election->publish (block_a);
if (!result && !election->confirmed)
{
blocks.insert (std::make_pair (block_a->hash (), election));
}
}
return result;
}
void nano::active_transactions::confirm_block (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a, nano::block_sideband const & sideband_a)
{
auto hash (block_a->hash ());
std::unique_lock<std::mutex> lock (mutex);
auto existing (blocks.find (hash));
if (existing != blocks.end ())
{
if (!existing->second->confirmed && !existing->second->stopped && existing->second->status.winner->hash () == hash)
{
existing->second->confirm_once (nano::election_status_type::active_confirmation_height);
}
}
else
{
lock.unlock ();
nano::account account (0);
nano::uint128_t amount (0);
bool is_state_send (false);
nano::account pending_account (0);
node.process_confirmed_data (transaction_a, block_a, hash, sideband_a, account, amount, is_state_send, pending_account);
node.observers.blocks.notify (nano::election_status{ block_a, 0, std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()), std::chrono::duration_values<std::chrono::milliseconds>::zero (), nano::election_status_type::inactive_confirmation_height }, account, amount, is_state_send);
}
}
size_t nano::active_transactions::priority_cementable_frontiers_size ()
{
std::lock_guard<std::mutex> guard (mutex);
return priority_cementable_frontiers.size ();
}
size_t nano::active_transactions::priority_wallet_cementable_frontiers_size ()
{
std::lock_guard<std::mutex> guard (mutex);
return priority_wallet_cementable_frontiers.size ();
}
boost::circular_buffer<double> nano::active_transactions::difficulty_trend ()
{
std::lock_guard<std::mutex> guard (mutex);
return multipliers_cb;
}
nano::cementable_account::cementable_account (nano::account const & account_a, size_t blocks_uncemented_a) :
account (account_a), blocks_uncemented (blocks_uncemented_a)
{
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (active_transactions & active_transactions, const std::string & name)
{
size_t roots_count = 0;
size_t blocks_count = 0;
size_t confirmed_count = 0;
{
std::lock_guard<std::mutex> guard (active_transactions.mutex);
roots_count = active_transactions.roots.size ();
blocks_count = active_transactions.blocks.size ();
confirmed_count = active_transactions.confirmed.size ();
}
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "roots", roots_count, sizeof (decltype (active_transactions.roots)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", blocks_count, sizeof (decltype (active_transactions.blocks)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "confirmed", confirmed_count, sizeof (decltype (active_transactions.confirmed)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "priority_wallet_cementable_frontiers_count", active_transactions.priority_wallet_cementable_frontiers_size (), sizeof (nano::cementable_account) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "priority_cementable_frontiers_count", active_transactions.priority_cementable_frontiers_size (), sizeof (nano::cementable_account) }));
return composite;
}
}
| 1 | 15,854 | minor type `boostrap` | nanocurrency-nano-node | cpp |
@@ -20,9 +20,12 @@
package com.netflix.iceberg;
import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.ResidualEvaluator;
+import java.util.Iterator;
+
class BaseFileScanTask implements FileScanTask {
private final DataFile file;
private final String schemaString; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.ResidualEvaluator;
class BaseFileScanTask implements FileScanTask {
private final DataFile file;
private final String schemaString;
private final String specString;
private final ResidualEvaluator residuals;
private transient PartitionSpec spec = null;
BaseFileScanTask(DataFile file, String schemaString, String specString, ResidualEvaluator residuals) {
this.file = file;
this.schemaString = schemaString;
this.specString = specString;
this.residuals = residuals;
}
@Override
public DataFile file() {
return file;
}
@Override
public PartitionSpec spec() {
if (spec == null) {
this.spec = PartitionSpecParser.fromJson(SchemaParser.fromJson(schemaString), specString);
}
return spec;
}
@Override
public long start() {
return 0;
}
@Override
public long length() {
return file.fileSizeInBytes();
}
@Override
public Expression residual() {
return residuals.residualFor(file.partition());
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("file", file.path())
.add("partition_data", file.partition())
.add("residual", residual())
.toString();
}
}
| 1 | 12,834 | Nit: please don't separate imports into groups. | apache-iceberg | java |
@@ -31,7 +31,10 @@ public class ApplicationSidebarToggleGroupSkin extends
final ToggleButton allCategoryButton = createSidebarToggleButton(tr("All"));
allCategoryButton.setId("allButton");
- allCategoryButton.setOnAction(event -> getControl().getOnAllCategorySelection().run());
+ allCategoryButton.setOnAction(event -> {
+ getControl().setSelectedElement(null);
+ getControl().getOnAllCategorySelection().run();
+ });
return Optional.of(allCategoryButton);
} | 1 | package org.phoenicis.javafx.components.skin;
import javafx.scene.control.ToggleButton;
import org.phoenicis.javafx.components.control.ApplicationSidebarToggleGroup;
import org.phoenicis.javafx.views.mainwindow.apps.ApplicationsSidebar;
import org.phoenicis.repository.dto.CategoryDTO;
import java.util.Optional;
import static org.phoenicis.configuration.localisation.Localisation.tr;
/**
* A {@link SidebarToggleGroupBaseSkin} implementation class used inside the {@link ApplicationsSidebar}
*/
public class ApplicationSidebarToggleGroupSkin extends
SidebarToggleGroupBaseSkin<CategoryDTO, ApplicationSidebarToggleGroup, ApplicationSidebarToggleGroupSkin> {
/**
* Constructor
*
* @param control The control belonging to the skin
*/
public ApplicationSidebarToggleGroupSkin(ApplicationSidebarToggleGroup control) {
super(control);
}
/**
* {@inheritDoc}
*/
@Override
protected Optional<ToggleButton> createAllButton() {
final ToggleButton allCategoryButton = createSidebarToggleButton(tr("All"));
allCategoryButton.setId("allButton");
allCategoryButton.setOnAction(event -> getControl().getOnAllCategorySelection().run());
return Optional.of(allCategoryButton);
}
/**
* {@inheritDoc}
*/
@Override
protected ToggleButton convertToToggleButton(CategoryDTO category) {
final ToggleButton categoryButton = createSidebarToggleButton(category.getName());
categoryButton.setId(String.format("%sButton", category.getId().toLowerCase()));
categoryButton.setOnAction(event -> getControl().getOnCategorySelection().accept(category));
return categoryButton;
}
}
| 1 | 12,439 | Thinking if it would be nice to have something like `setNothingSelected`. | PhoenicisOrg-phoenicis | java |
@@ -141,7 +141,7 @@ func (f *FileCertificateSource) updateCertificateFromDisk() error {
f.Log.V(logf.DebugLevel).Info("key and certificate on disk have not changed")
return nil
}
- f.Log.Info("detected private key or certificate data on disk has changed. reloading certificate")
+ f.Log.V(logf.DebugLevel).Info("detected private key or certificate data on disk has changed. reloading certificate")
cert, err := tls.X509KeyPair(certData, keyData)
if err != nil { | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tls
import (
"bytes"
"crypto/tls"
"fmt"
"io/ioutil"
"sync"
"time"
"github.com/go-logr/logr"
crlog "sigs.k8s.io/controller-runtime/pkg/log"
logf "github.com/jetstack/cert-manager/pkg/logs"
)
// FileCertificateSource provides certificate data for a golang HTTP server by
// reloading data on disk periodically.
type FileCertificateSource struct {
// CertPath is the path to the TLS certificate.
// This file will be read periodically and will be used as the private key
// for TLS connections.
CertPath string
// KeyPath is the path to the private key.
// This file will be read periodically and will be used as the private key
// for TLS connections.
KeyPath string
// UpdateInterval is how often the CertPath and KeyPath will be checked for
// changes.
// If not specified, a default of 10s will be used.
UpdateInterval time.Duration
// MaxFailures is the maximum number of times a failure to read data from
// disk should be allowed before treating it as fatal.
// If not specified, a default of 12 will be used.
MaxFailures int
// Log is an optional logger to write informational and error messages to.
// If not specified, no messages will be logged.
Log logr.Logger
cachedCertificate *tls.Certificate
cachedCertBytes []byte
cachedKeyBytes []byte
lock sync.Mutex
}
const defaultUpdateInterval = time.Second * 10
const defaultMaxFailures = 12
var _ CertificateSource = &FileCertificateSource{}
func (f *FileCertificateSource) Run(stopCh <-chan struct{}) error {
if f.Log == nil {
f.Log = crlog.NullLogger{}
}
updateInterval := f.UpdateInterval
if updateInterval == 0 {
updateInterval = defaultUpdateInterval
}
maxFailures := f.MaxFailures
if maxFailures == 0 {
maxFailures = defaultMaxFailures
}
// read the certificate data for the first time immediately, but allow
// retrying if the first attempt fails
if err := f.updateCertificateFromDisk(); err != nil {
f.Log.Error(err, "failed to read certificate from disk")
}
failures := 0
ticker := time.NewTicker(updateInterval)
defer ticker.Stop()
for {
select {
case <-stopCh:
return nil
case <-ticker.C:
if err := f.updateCertificateFromDisk(); err != nil {
failures++
f.Log.Error(err, "failed to update certificate from disk", "failures", failures)
if failures >= maxFailures {
return fmt.Errorf("failed to update certificate from disk %d times: %v", failures, err)
}
continue
}
f.Log.V(logf.DebugLevel).Info("refreshed certificate from data on disk")
}
}
}
func (f *FileCertificateSource) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.cachedCertificate == nil {
return nil, ErrNotAvailable
}
return f.cachedCertificate, nil
}
func (f *FileCertificateSource) Healthy() bool {
return f.cachedCertificate != nil
}
// updateCertificateFromDisk will read private key and certificate data from
// disk and update the cached tls.Certificate if the data on disk has changed.
func (f *FileCertificateSource) updateCertificateFromDisk() error {
keyData, err := ioutil.ReadFile(f.KeyPath)
if err != nil {
return fmt.Errorf("failed to read keyPath: %w", err)
}
certData, err := ioutil.ReadFile(f.CertPath)
if err != nil {
return fmt.Errorf("failed to read certPath: %w", err)
}
f.lock.Lock()
defer f.lock.Unlock()
if bytes.Compare(keyData, f.cachedKeyBytes) == 0 && bytes.Compare(certData, f.cachedCertBytes) == 0 {
f.Log.V(logf.DebugLevel).Info("key and certificate on disk have not changed")
return nil
}
f.Log.Info("detected private key or certificate data on disk has changed. reloading certificate")
cert, err := tls.X509KeyPair(certData, keyData)
if err != nil {
return err
}
f.cachedCertBytes = certData
f.cachedKeyBytes = keyData
f.cachedCertificate = &cert
return nil
}
| 1 | 22,899 | I think this is a useful InfoLevel message. In production, if webhook client connections suddenly start failing due to change of serving cert, you'd want to see this information in the logs. | jetstack-cert-manager | go |
@@ -28,7 +28,7 @@ void AddTagProcessor::process(const cpp2::AddTagReq& req) {
auto version = time::TimeUtils::nowInMSeconds();
TagID tagId = autoIncrementId();
data.emplace_back(MetaUtils::indexKey(EntryType::TAG, req.get_tag_name()),
- std::string(reinterpret_cast<const char*>(&tagId), sizeof(tagId)));
+ folly::to<std::string>(tagId));
LOG(INFO) << "Add Tag " << req.get_tag_name() << ", tagId " << tagId;
data.emplace_back(MetaUtils::schemaTagKey(req.get_space_id(), tagId, version),
MetaUtils::schemaTagVal(req.get_tag_name(), req.get_schema())); | 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "meta/processors/AddTagProcessor.h"
#include "time/TimeUtils.h"
namespace nebula {
namespace meta {
void AddTagProcessor::process(const cpp2::AddTagReq& req) {
if (spaceExist(req.get_space_id()) == Status::SpaceNotFound()) {
resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND);
onFinished();
return;
}
folly::SharedMutex::WriteHolder wHolder(LockUtils::tagLock());
auto ret = getTag(req.get_tag_name());
std::vector<kvstore::KV> data;
if (ret.ok()) {
resp_.set_id(to(ret.value(), EntryType::TAG));
resp_.set_code(cpp2::ErrorCode::E_TAG_EXISTED);
onFinished();
return;
}
auto version = time::TimeUtils::nowInMSeconds();
TagID tagId = autoIncrementId();
data.emplace_back(MetaUtils::indexKey(EntryType::TAG, req.get_tag_name()),
std::string(reinterpret_cast<const char*>(&tagId), sizeof(tagId)));
LOG(INFO) << "Add Tag " << req.get_tag_name() << ", tagId " << tagId;
data.emplace_back(MetaUtils::schemaTagKey(req.get_space_id(), tagId, version),
MetaUtils::schemaTagVal(req.get_tag_name(), req.get_schema()));
resp_.set_code(cpp2::ErrorCode::SUCCEEDED);
resp_.set_id(to(tagId, EntryType::TAG));
doPut(std::move(data));
}
StatusOr<TagID> AddTagProcessor::getTag(const std::string& tagName) {
auto indexKey = MetaUtils::indexKey(EntryType::TAG, tagName);
std::string val;
auto ret = kvstore_->get(kDefaultSpaceId_, kDefaultPartId_, indexKey, &val);
if (ret == kvstore::ResultCode::SUCCEEDED) {
try {
return folly::to<TagID>(val);
} catch (std::exception& e) {
LOG(ERROR) << "Convert failed for " << val << ", msg " << e.what();
}
}
return Status::Error("No Tag!");
}
} // namespace meta
} // namespace nebula
| 1 | 15,444 | Generally, if cast from int to char*, you could cast directly. Because no errors should happen. | vesoft-inc-nebula | cpp |
@@ -26,5 +26,12 @@ public class ServerInternals {
public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
-
+
+
+ // Memory check retry interval when OOM in ms
+ public static final long MEMORY_CHECK_INTERVAL = 1000*60*10;
+
+ // Max number of memory check retry
+ public static final int MEMORY_CHECK_RETRY_LIMIT = 100;
+
} | 1 | /*
* Copyright 2016 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.constants;
public class ServerInternals {
// Constants pertaining to the internal running of the Azkaban server
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
}
| 1 | 12,394 | Is 10 min too long? What do you think about 1 minute? Unless there are many jobs in this state, I don't expect the CPU overhead to be too high. | azkaban-azkaban | java |
@@ -28,10 +28,10 @@ type BackendServiceConfig struct {
ImageConfig ImageWithPortAndHealthcheck `yaml:"image,flow"`
ImageOverride `yaml:",inline"`
TaskConfig `yaml:",inline"`
- *Logging `yaml:"logging,flow"`
+ Logging `yaml:"logging,flow"`
Sidecars map[string]*SidecarConfig `yaml:"sidecars"`
- Network *NetworkConfig `yaml:"network"`
- Publish *PublishConfig `yaml:"publish"`
+ Network NetworkConfig `yaml:"network"`
+ Publish PublishConfig `yaml:"publish"`
TaskDefOverrides []OverrideRule `yaml:"taskdef_overrides"`
}
| 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/imdario/mergo"
)
const (
backendSvcManifestPath = "workloads/services/backend/manifest.yml"
)
// BackendService holds the configuration to create a backend service manifest.
type BackendService struct {
Workload `yaml:",inline"`
BackendServiceConfig `yaml:",inline"`
// Use *BackendServiceConfig because of https://github.com/imdario/mergo/issues/146
Environments map[string]*BackendServiceConfig `yaml:",flow"`
parser template.Parser
}
// BackendServiceConfig holds the configuration that can be overridden per environments.
type BackendServiceConfig struct {
ImageConfig ImageWithPortAndHealthcheck `yaml:"image,flow"`
ImageOverride `yaml:",inline"`
TaskConfig `yaml:",inline"`
*Logging `yaml:"logging,flow"`
Sidecars map[string]*SidecarConfig `yaml:"sidecars"`
Network *NetworkConfig `yaml:"network"`
Publish *PublishConfig `yaml:"publish"`
TaskDefOverrides []OverrideRule `yaml:"taskdef_overrides"`
}
// BackendServiceProps represents the configuration needed to create a backend service.
type BackendServiceProps struct {
WorkloadProps
Port uint16
HealthCheck *ContainerHealthCheck // Optional healthcheck configuration.
}
// NewBackendService applies the props to a default backend service configuration with
// minimal task sizes, single replica, no healthcheck, and then returns it.
func NewBackendService(props BackendServiceProps) *BackendService {
svc := newDefaultBackendService()
// Apply overrides.
svc.Name = stringP(props.Name)
svc.BackendServiceConfig.ImageConfig.Image.Location = stringP(props.Image)
svc.BackendServiceConfig.ImageConfig.Build.BuildArgs.Dockerfile = stringP(props.Dockerfile)
svc.BackendServiceConfig.ImageConfig.Port = uint16P(props.Port)
svc.BackendServiceConfig.ImageConfig.HealthCheck = props.HealthCheck
svc.parser = template.New()
return svc
}
// MarshalBinary serializes the manifest object into a binary YAML document.
// Implements the encoding.BinaryMarshaler interface.
func (s *BackendService) MarshalBinary() ([]byte, error) {
content, err := s.parser.Parse(backendSvcManifestPath, *s, template.WithFuncs(map[string]interface{}{
"fmtSlice": template.FmtSliceFunc,
"quoteSlice": template.QuoteSliceFunc,
}))
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// Port returns the exposed the exposed port in the manifest.
// If the backend service is not meant to be reachable, then ok is set to false.
func (s *BackendService) Port() (port uint16, ok bool) {
value := s.BackendServiceConfig.ImageConfig.Port
if value == nil {
return 0, false
}
return aws.Uint16Value(value), true
}
// Publish returns the list of topics where notifications can be published.
func (s *BackendService) Publish() []Topic {
if s.BackendServiceConfig.Publish == nil {
return nil
}
return s.BackendServiceConfig.Publish.Topics
}
// BuildRequired returns if the service requires building from the local Dockerfile.
func (s *BackendService) BuildRequired() (bool, error) {
return requiresBuild(s.ImageConfig.Image)
}
// BuildArgs returns a docker.BuildArguments object for the service given a workspace root directory.
func (s *BackendService) BuildArgs(wsRoot string) *DockerBuildArgs {
return s.ImageConfig.BuildConfig(wsRoot)
}
// ApplyEnv returns the service manifest with environment overrides.
// If the environment passed in does not have any overrides then it returns itself.
func (s BackendService) ApplyEnv(envName string) (WorkloadManifest, error) {
overrideConfig, ok := s.Environments[envName]
if !ok {
return &s, nil
}
if overrideConfig == nil {
return &s, nil
}
// Apply overrides to the original service s.
for _, t := range defaultTransformers {
err := mergo.Merge(&s, BackendService{
BackendServiceConfig: *overrideConfig,
}, mergo.WithOverride, mergo.WithTransformers(t))
if err != nil {
return nil, err
}
}
s.Environments = nil
return &s, nil
}
// newDefaultBackendService returns a backend service with minimal task sizes and a single replica.
func newDefaultBackendService() *BackendService {
return &BackendService{
Workload: Workload{
Type: aws.String(BackendServiceType),
},
BackendServiceConfig: BackendServiceConfig{
ImageConfig: ImageWithPortAndHealthcheck{},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
},
ExecuteCommand: ExecuteCommand{
Enable: aws.Bool(false),
},
},
Network: &NetworkConfig{
VPC: &vpcConfig{
Placement: stringP(PublicSubnetPlacement),
},
},
},
}
}
| 1 | 19,157 | can you remind me why we keep the pointer if it's a `map[string]<PStruct>`? are there other scenarios where the pointer is kept? | aws-copilot-cli | go |
@@ -61,8 +61,9 @@ public class RewriteDataFilesAction extends BaseRewriteDataFilesAction<RewriteDa
return this;
}
- public void maxParallelism(int parallelism) {
+ public RewriteDataFilesAction maxParallelism(int parallelism) {
Preconditions.checkArgument(parallelism > 0, "Invalid max parallelism %d", parallelism);
this.maxParallelism = parallelism;
+ return this;
}
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink.actions;
import java.util.List;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Table;
import org.apache.iceberg.actions.BaseRewriteDataFilesAction;
import org.apache.iceberg.flink.source.RowDataRewriter;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
public class RewriteDataFilesAction extends BaseRewriteDataFilesAction<RewriteDataFilesAction> {
private StreamExecutionEnvironment env;
private int maxParallelism;
public RewriteDataFilesAction(StreamExecutionEnvironment env, Table table) {
super(table);
this.env = env;
this.maxParallelism = env.getParallelism();
}
@Override
protected FileIO fileIO() {
return table().io();
}
@Override
protected List<DataFile> rewriteDataForTasks(List<CombinedScanTask> combinedScanTasks) {
int size = combinedScanTasks.size();
int parallelism = Math.min(size, maxParallelism);
DataStream<CombinedScanTask> dataStream = env.fromCollection(combinedScanTasks);
RowDataRewriter rowDataRewriter = new RowDataRewriter(table(), caseSensitive(), fileIO(), encryptionManager());
List<DataFile> addedDataFiles = rowDataRewriter.rewriteDataForTasks(dataStream, parallelism);
return addedDataFiles;
}
@Override
protected RewriteDataFilesAction self() {
return this;
}
public void maxParallelism(int parallelism) {
Preconditions.checkArgument(parallelism > 0, "Invalid max parallelism %d", parallelism);
this.maxParallelism = parallelism;
}
}
| 1 | 27,563 | Would it make sense to also add `setMaxParallelism` in addition to this to match the Flink API? | apache-iceberg | java |
@@ -1,4 +1,13 @@
import ModalComponent from 'ghost-admin/components/modal-base';
+import {inject as service} from '@ember/service';
export default ModalComponent.extend({
+ billing: service(),
+
+ actions: {
+ closeModal() {
+ this._super(arguments);
+ this.billing.closeBillingWindow();
+ }
+ }
}); | 1 | import ModalComponent from 'ghost-admin/components/modal-base';
export default ModalComponent.extend({
});
| 1 | 9,387 | Not needed for actions. `_super(...arguments)` is only needed when you're extending from a base class and want to run the logic in the base class before your own logic | TryGhost-Admin | js |
@@ -104,7 +104,14 @@ Suspense.prototype._childDidSuspend = function(promise, suspendingComponent) {
}
};
- if (!c._pendingSuspensionCount++) {
+ /**
+ * We do not set `suspended: true` during hydration because we want the actual markup
+ * to remain on screen and hydrate it when the suspense actually gets resolved.
+ * While in non-hydration cases the usual fallback -> component flow would occour.
+ */
+ const vnode = c._vnode;
+ const wasHydrating = vnode && vnode._hydrating === true;
+ if (!wasHydrating && !c._pendingSuspensionCount++) {
c.setState({ _suspended: (c._detachOnNextRender = c._vnode._children[0]) });
}
promise.then(onResolved, onResolved); | 1 | import { Component, createElement, options, Fragment } from 'preact';
import { assign } from './util';
const oldCatchError = options._catchError;
options._catchError = function(error, newVNode, oldVNode) {
if (error.then) {
/** @type {import('./internal').Component} */
let component;
let vnode = newVNode;
for (; (vnode = vnode._parent); ) {
if ((component = vnode._component) && component._childDidSuspend) {
if (newVNode._dom == null) {
newVNode._dom = oldVNode._dom;
newVNode._children = oldVNode._children;
}
// Don't call oldCatchError if we found a Suspense
return component._childDidSuspend(error, newVNode._component);
}
}
}
oldCatchError(error, newVNode, oldVNode);
};
function detachedClone(vnode) {
if (vnode) {
vnode = assign({}, vnode);
vnode._component = null;
vnode._children = vnode._children && vnode._children.map(detachedClone);
}
return vnode;
}
function removeOriginal(vnode) {
if (vnode) {
vnode._original = null;
vnode._children = vnode._children && vnode._children.map(removeOriginal);
}
return vnode;
}
// having custom inheritance instead of a class here saves a lot of bytes
export function Suspense() {
// we do not call super here to golf some bytes...
this._pendingSuspensionCount = 0;
this._suspenders = null;
this._detachOnNextRender = null;
}
// Things we do here to save some bytes but are not proper JS inheritance:
// - call `new Component()` as the prototype
// - do not set `Suspense.prototype.constructor` to `Suspense`
Suspense.prototype = new Component();
/**
* @param {Promise} promise The thrown promise
* @param {Component<any, any>} suspendingComponent The suspending component
*/
Suspense.prototype._childDidSuspend = function(promise, suspendingComponent) {
/** @type {import('./internal').SuspenseComponent} */
const c = this;
if (c._suspenders == null) {
c._suspenders = [];
}
c._suspenders.push(suspendingComponent);
const resolve = suspended(c._vnode);
let resolved = false;
const onResolved = () => {
if (resolved) return;
resolved = true;
suspendingComponent.componentWillUnmount =
suspendingComponent._suspendedComponentWillUnmount;
if (resolve) {
resolve(onSuspensionComplete);
} else {
onSuspensionComplete();
}
};
suspendingComponent._suspendedComponentWillUnmount =
suspendingComponent.componentWillUnmount;
suspendingComponent.componentWillUnmount = () => {
onResolved();
if (suspendingComponent._suspendedComponentWillUnmount) {
suspendingComponent._suspendedComponentWillUnmount();
}
};
const onSuspensionComplete = () => {
if (!--c._pendingSuspensionCount) {
c._vnode._children[0] = removeOriginal(c.state._suspended);
c.setState({ _suspended: (c._detachOnNextRender = null) });
let suspended;
while ((suspended = c._suspenders.pop())) {
suspended.forceUpdate();
}
}
};
if (!c._pendingSuspensionCount++) {
c.setState({ _suspended: (c._detachOnNextRender = c._vnode._children[0]) });
}
promise.then(onResolved, onResolved);
};
Suspense.prototype.componentWillUnmount = function() {
this._suspenders = [];
};
Suspense.prototype.render = function(props, state) {
if (this._detachOnNextRender) {
// When the Suspense's _vnode was created by a call to createVNode
// (i.e. due to a setState further up in the tree)
// it's _children prop is null, in this case we "forget" about the parked vnodes to detach
if (this._vnode._children)
this._vnode._children[0] = detachedClone(this._detachOnNextRender);
this._detachOnNextRender = null;
}
return [
createElement(Fragment, null, state._suspended ? null : props.children),
state._suspended && props.fallback
];
};
/**
* Checks and calls the parent component's _suspended method, passing in the
* suspended vnode. This is a way for a parent (e.g. SuspenseList) to get notified
* that one of its children/descendants suspended.
*
* The parent MAY return a callback. The callback will get called when the
* suspension resolves, notifying the parent of the fact.
* Moreover, the callback gets function `unsuspend` as a parameter. The resolved
* child descendant will not actually get unsuspended until `unsuspend` gets called.
* This is a way for the parent to delay unsuspending.
*
* If the parent does not return a callback then the resolved vnode
* gets unsuspended immediately when it resolves.
*
* @param {import('../src/internal').VNode} vnode
* @returns {((unsuspend: () => void) => void)?}
*/
export function suspended(vnode) {
let component = vnode._parent._component;
return component && component._suspended && component._suspended(vnode);
}
export function lazy(loader) {
let prom;
let component;
let error;
function Lazy(props) {
if (!prom) {
prom = loader();
prom.then(
exports => {
component = exports.default || exports;
},
e => {
error = e;
}
);
}
if (error) {
throw error;
}
if (!component) {
throw prom;
}
return createElement(component, props);
}
Lazy.displayName = 'Lazy';
Lazy._forwarded = true;
return Lazy;
}
| 1 | 16,148 | I still need to run this code to verify my understanding, but my first reading through makes me think this may need to be `suspendingComponent._vnode` since we set `vnode._hydrating` on the vnode that threw the error (diff/index.js:275), not on `Suspense._vnode` (though we could in Suspense's `_catchError` implementation at the top of this file). | preactjs-preact | js |
@@ -372,8 +372,14 @@ def get_path_if_valid(pathstr, cwd=None, relative=False, check_exists=False):
path = None
if check_exists:
- if path is not None and os.path.exists(path):
- log.url.debug("URL is a local file")
+ if path is not None:
+ # If the path contains characters that the locale cannot handle,
+ # then we consider it as non-existent.
+ try:
+ if os.path.exists(path):
+ log.url.debug("URL is a local file")
+ except UnicodeEncodeError:
+ path = None
else:
path = None
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utils regarding URL handling."""
import re
import base64
import os.path
import ipaddress
import posixpath
import urllib.parse
from PyQt5.QtCore import QUrl
from PyQt5.QtNetwork import QHostInfo, QHostAddress, QNetworkProxy
from qutebrowser.config import config
from qutebrowser.utils import log, qtutils, message, utils
from qutebrowser.commands import cmdexc
from qutebrowser.browser.network import pac
# FIXME: we probably could raise some exceptions on invalid URLs
# https://github.com/qutebrowser/qutebrowser/issues/108
class InvalidUrlError(ValueError):
"""Error raised if a function got an invalid URL.
Inherits ValueError because that was the exception originally used for
that, so there still might be some code around which checks for that.
"""
def __init__(self, url):
if url.isValid():
raise ValueError("Got valid URL {}!".format(url.toDisplayString()))
self.url = url
self.msg = get_errstring(url)
super().__init__(self.msg)
def _parse_search_term(s):
"""Get a search engine name and search term from a string.
Args:
s: The string to get a search engine for.
Return:
A (engine, term) tuple, where engine is None for the default engine.
"""
s = s.strip()
split = s.split(maxsplit=1)
if len(split) == 2:
engine = split[0]
try:
config.val.url.searchengines[engine]
except KeyError:
engine = None
term = s
else:
term = split[1]
elif not split:
raise ValueError("Empty search term!")
else:
engine = None
term = s
log.url.debug("engine {}, term {!r}".format(engine, term))
return (engine, term)
def _get_search_url(txt):
"""Get a search engine URL for a text.
Args:
txt: Text to search for.
Return:
The search URL as a QUrl.
"""
log.url.debug("Finding search engine for {!r}".format(txt))
engine, term = _parse_search_term(txt)
assert term
if engine is None:
engine = 'DEFAULT'
template = config.val.url.searchengines[engine]
url = qurl_from_user_input(template.format(urllib.parse.quote(term)))
qtutils.ensure_valid(url)
return url
def _is_url_naive(urlstr):
"""Naive check if given URL is really a URL.
Args:
urlstr: The URL to check for, as string.
Return:
True if the URL really is a URL, False otherwise.
"""
url = qurl_from_user_input(urlstr)
assert url.isValid()
if not utils.raises(ValueError, ipaddress.ip_address, urlstr):
# Valid IPv4/IPv6 address
return True
# Qt treats things like "23.42" or "1337" or "0xDEAD" as valid URLs
# which we don't want to. Note we already filtered *real* valid IPs
# above.
if not QHostAddress(urlstr).isNull():
return False
host = url.host()
return '.' in host and not host.endswith('.')
def _is_url_dns(urlstr):
"""Check if a URL is really a URL via DNS.
Args:
url: The URL to check for as a string.
Return:
True if the URL really is a URL, False otherwise.
"""
url = qurl_from_user_input(urlstr)
assert url.isValid()
if (utils.raises(ValueError, ipaddress.ip_address, urlstr) and
not QHostAddress(urlstr).isNull()):
log.url.debug("Bogus IP URL -> False")
# Qt treats things like "23.42" or "1337" or "0xDEAD" as valid URLs
# which we don't want to.
return False
host = url.host()
if not host:
log.url.debug("URL has no host -> False")
return False
log.url.debug("Doing DNS request for {}".format(host))
info = QHostInfo.fromName(host)
return not info.error()
def fuzzy_url(urlstr, cwd=None, relative=False, do_search=True,
force_search=False):
"""Get a QUrl based on a user input which is URL or search term.
Args:
urlstr: URL to load as a string.
cwd: The current working directory, or None.
relative: Whether to resolve relative files.
do_search: Whether to perform a search on non-URLs.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A target QUrl to a search page or the original URL.
"""
urlstr = urlstr.strip()
path = get_path_if_valid(urlstr, cwd=cwd, relative=relative,
check_exists=True)
if not force_search and path is not None:
url = QUrl.fromLocalFile(path)
elif force_search or (do_search and not is_url(urlstr)):
# probably a search term
log.url.debug("URL is a fuzzy search term")
try:
url = _get_search_url(urlstr)
except ValueError: # invalid search engine
url = qurl_from_user_input(urlstr)
else: # probably an address
log.url.debug("URL is a fuzzy address")
url = qurl_from_user_input(urlstr)
log.url.debug("Converting fuzzy term {!r} to URL -> {}".format(
urlstr, url.toDisplayString()))
if do_search and config.val.url.auto_search != 'never' and urlstr:
qtutils.ensure_valid(url)
else:
if not url.isValid():
raise InvalidUrlError(url)
return url
def _has_explicit_scheme(url):
"""Check if a url has an explicit scheme given.
Args:
url: The URL as QUrl.
"""
# Note that generic URI syntax actually would allow a second colon
# after the scheme delimiter. Since we don't know of any URIs
# using this and want to support e.g. searching for scoped C++
# symbols, we treat this as not a URI anyways.
return (url.isValid() and url.scheme() and
(url.host() or url.path()) and
' ' not in url.path() and
not url.path().startswith(':'))
def is_special_url(url):
"""Return True if url is an about:... or other special URL.
Args:
url: The URL as QUrl.
"""
if not url.isValid():
return False
special_schemes = ('about', 'qute', 'file')
return url.scheme() in special_schemes
def is_url(urlstr):
"""Check if url seems to be a valid URL.
Args:
urlstr: The URL as string.
Return:
True if it is a valid URL, False otherwise.
"""
autosearch = config.val.url.auto_search
log.url.debug("Checking if {!r} is a URL (autosearch={}).".format(
urlstr, autosearch))
urlstr = urlstr.strip()
qurl = QUrl(urlstr)
qurl_userinput = qurl_from_user_input(urlstr)
if autosearch == 'never':
# no autosearch, so everything is a URL unless it has an explicit
# search engine.
try:
engine, _term = _parse_search_term(urlstr)
except ValueError:
return False
else:
return engine is None
if not qurl_userinput.isValid():
# This will also catch URLs containing spaces.
return False
if _has_explicit_scheme(qurl):
# URLs with explicit schemes are always URLs
log.url.debug("Contains explicit scheme")
url = True
elif qurl_userinput.host() in ['localhost', '127.0.0.1', '::1']:
log.url.debug("Is localhost.")
url = True
elif is_special_url(qurl):
# Special URLs are always URLs, even with autosearch=never
log.url.debug("Is a special URL.")
url = True
elif autosearch == 'dns':
log.url.debug("Checking via DNS check")
# We want to use qurl_from_user_input here, as the user might enter
# "foo.de" and that should be treated as URL here.
url = _is_url_dns(urlstr)
elif autosearch == 'naive':
log.url.debug("Checking via naive check")
url = _is_url_naive(urlstr)
else: # pragma: no cover
raise ValueError("Invalid autosearch value")
log.url.debug("url = {}".format(url))
return url
def qurl_from_user_input(urlstr):
"""Get a QUrl based on a user input. Additionally handles IPv6 addresses.
QUrl.fromUserInput handles something like '::1' as a file URL instead of an
IPv6, so we first try to handle it as a valid IPv6, and if that fails we
use QUrl.fromUserInput.
WORKAROUND - https://bugreports.qt.io/browse/QTBUG-41089
FIXME - Maybe https://codereview.qt-project.org/#/c/93851/ has a better way
to solve this?
https://github.com/qutebrowser/qutebrowser/issues/109
Args:
urlstr: The URL as string.
Return:
The converted QUrl.
"""
# First we try very liberally to separate something like an IPv6 from the
# rest (e.g. path info or parameters)
match = re.match(r'\[?([0-9a-fA-F:.]+)\]?(.*)', urlstr.strip())
if match:
ipstr, rest = match.groups()
else:
ipstr = urlstr.strip()
rest = ''
# Then we try to parse it as an IPv6, and if we fail use
# QUrl.fromUserInput.
try:
ipaddress.IPv6Address(ipstr)
except ipaddress.AddressValueError:
return QUrl.fromUserInput(urlstr)
else:
return QUrl('http://[{}]{}'.format(ipstr, rest))
def invalid_url_error(url, action):
"""Display an error message for a URL.
Args:
action: The action which was interrupted by the error.
"""
if url.isValid():
raise ValueError("Calling invalid_url_error with valid URL {}".format(
url.toDisplayString()))
errstring = get_errstring(
url, "Trying to {} with invalid URL".format(action))
message.error(errstring)
def raise_cmdexc_if_invalid(url):
"""Check if the given QUrl is invalid, and if so, raise a CommandError."""
if not url.isValid():
raise cmdexc.CommandError(get_errstring(url))
def get_path_if_valid(pathstr, cwd=None, relative=False, check_exists=False):
"""Check if path is a valid path.
Args:
pathstr: The path as string.
cwd: The current working directory, or None.
relative: Whether to resolve relative files.
check_exists: Whether to check if the file
actually exists of filesystem.
Return:
The path if it is a valid path, None otherwise.
"""
pathstr = pathstr.strip()
log.url.debug("Checking if {!r} is a path".format(pathstr))
expanded = os.path.expanduser(pathstr)
if os.path.isabs(expanded):
path = expanded
elif relative and cwd:
path = os.path.join(cwd, expanded)
elif relative:
try:
path = os.path.abspath(expanded)
except OSError:
path = None
else:
path = None
if check_exists:
if path is not None and os.path.exists(path):
log.url.debug("URL is a local file")
else:
path = None
return path
def filename_from_url(url):
"""Get a suitable filename from a URL.
Args:
url: The URL to parse, as a QUrl.
Return:
The suggested filename as a string, or None.
"""
if not url.isValid():
return None
pathname = posixpath.basename(url.path())
if pathname:
return pathname
elif url.host():
return url.host() + '.html'
else:
return None
def host_tuple(url):
"""Get a (scheme, host, port) tuple from a QUrl.
This is suitable to identify a connection, e.g. for SSL errors.
"""
if not url.isValid():
raise InvalidUrlError(url)
scheme, host, port = url.scheme(), url.host(), url.port()
assert scheme
if not host:
raise ValueError("Got URL {} without host.".format(
url.toDisplayString()))
if port == -1:
port_mapping = {
'http': 80,
'https': 443,
'ftp': 21,
}
try:
port = port_mapping[scheme]
except KeyError:
raise ValueError("Got URL {} with unknown port.".format(
url.toDisplayString()))
return scheme, host, port
def get_errstring(url, base="Invalid URL"):
"""Get an error string for a URL.
Args:
url: The URL as a QUrl.
base: The base error string.
Return:
A new string with url.errorString() is appended if available.
"""
url_error = url.errorString()
if url_error:
return base + " - {}".format(url_error)
else:
return base
def same_domain(url1, url2):
"""Check if url1 and url2 belong to the same website.
This will use a "public suffix list" to determine what a "top level domain"
is. All further domains are ignored.
For example example.com and www.example.com are considered the same. but
example.co.uk and test.co.uk are not.
Return:
True if the domains are the same, False otherwise.
"""
if not url1.isValid():
raise InvalidUrlError(url1)
if not url2.isValid():
raise InvalidUrlError(url2)
suffix1 = url1.topLevelDomain()
suffix2 = url2.topLevelDomain()
if suffix1 == '':
return url1.host() == url2.host()
if suffix1 != suffix2:
return False
domain1 = url1.host()[:-len(suffix1)].split('.')[-1]
domain2 = url2.host()[:-len(suffix2)].split('.')[-1]
return domain1 == domain2
def encoded_url(url):
"""Return the fully encoded url as string.
Args:
url: The url to encode as QUrl.
"""
return bytes(url.toEncoded()).decode('ascii')
class IncDecError(Exception):
"""Exception raised by incdec_number on problems.
Attributes:
msg: The error message.
url: The QUrl which caused the error.
"""
def __init__(self, msg, url):
super().__init__(msg)
self.url = url
self.msg = msg
def __str__(self):
return '{}: {}'.format(self.msg, self.url.toString())
def _get_incdec_value(match, incdec, url, count):
"""Get an incremented/decremented URL based on a URL match."""
pre, zeroes, number, post = match.groups()
# This should always succeed because we match \d+
val = int(number)
if incdec == 'decrement':
if val <= 0:
raise IncDecError("Can't decrement {}!".format(val), url)
val -= count
elif incdec == 'increment':
val += count
else:
raise ValueError("Invalid value {} for indec!".format(incdec))
if zeroes:
if len(number) < len(str(val)):
zeroes = zeroes[1:]
elif len(number) > len(str(val)):
zeroes += '0'
return ''.join([pre, zeroes, str(val), post])
def incdec_number(url, incdec, count=1, segments=None):
"""Find a number in the url and increment or decrement it.
Args:
url: The current url
incdec: Either 'increment' or 'decrement'
count: The number to increment or decrement by
segments: A set of URL segments to search. Valid segments are:
'host', 'path', 'query', 'anchor'.
Default: {'path', 'query'}
Return:
The new url with the number incremented/decremented.
Raises IncDecError if the url contains no number.
"""
if not url.isValid():
raise InvalidUrlError(url)
if segments is None:
segments = {'path', 'query'}
valid_segments = {'host', 'path', 'query', 'anchor'}
if segments - valid_segments:
extra_elements = segments - valid_segments
raise IncDecError("Invalid segments: {}".format(
', '.join(extra_elements)), url)
# Make a copy of the QUrl so we don't modify the original
url = QUrl(url)
# Order as they appear in a URL
segment_modifiers = [
('host', url.host, url.setHost),
('path', url.path, url.setPath),
('query', url.query, url.setQuery),
('anchor', url.fragment, url.setFragment),
]
# We're searching the last number so we walk the url segments backwards
for segment, getter, setter in reversed(segment_modifiers):
if segment not in segments:
continue
# Get the last number in a string
match = re.match(r'(.*\D|^)(0*)(\d+)(.*)', getter())
if not match:
continue
setter(_get_incdec_value(match, incdec, url, count))
return url
raise IncDecError("No number found in URL!", url)
def file_url(path):
"""Return a file:// url (as string) to the given local path.
Arguments:
path: The absolute path to the local file
"""
return QUrl.fromLocalFile(path).toString(QUrl.FullyEncoded)
def data_url(mimetype, data):
"""Get a data: QUrl for the given data."""
b64 = base64.b64encode(data).decode('ascii')
url = QUrl('data:{};base64,{}'.format(mimetype, b64))
qtutils.ensure_valid(url)
return url
def safe_display_string(qurl):
"""Get a IDN-homograph phishing safe form of the given QUrl.
If we're dealing with a Punycode-encoded URL, this prepends the hostname in
its encoded form, to make sure those URLs are distinguishable.
See https://github.com/qutebrowser/qutebrowser/issues/2547
and https://bugreports.qt.io/browse/QTBUG-60365
"""
if not qurl.isValid():
raise InvalidUrlError(qurl)
host = qurl.host(QUrl.FullyEncoded)
if '..' in host: # pragma: no cover
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-60364
return '(unparseable URL!) {}'.format(qurl.toDisplayString())
for part in host.split('.'):
if part.startswith('xn--') and host != qurl.host(QUrl.FullyDecoded):
return '({}) {}'.format(host, qurl.toDisplayString())
return qurl.toDisplayString()
class InvalidProxyTypeError(Exception):
"""Error raised when proxy_from_url gets an unknown proxy type."""
def __init__(self, typ):
super().__init__("Invalid proxy type {}!".format(typ))
def proxy_from_url(url):
"""Create a QNetworkProxy from QUrl and a proxy type.
Args:
url: URL of a proxy (possibly with credentials).
Return:
New QNetworkProxy.
"""
if not url.isValid():
raise InvalidUrlError(url)
scheme = url.scheme()
if scheme in ['pac+http', 'pac+https', 'pac+file']:
fetcher = pac.PACFetcher(url)
fetcher.fetch()
return fetcher
types = {
'http': QNetworkProxy.HttpProxy,
'socks': QNetworkProxy.Socks5Proxy,
'socks5': QNetworkProxy.Socks5Proxy,
'direct': QNetworkProxy.NoProxy,
}
if scheme not in types:
raise InvalidProxyTypeError(scheme)
proxy = QNetworkProxy(types[scheme], url.host())
if url.port() != -1:
proxy.setPort(url.port())
if url.userName():
proxy.setUser(url.userName())
if url.password():
proxy.setPassword(url.password())
return proxy
| 1 | 19,326 | I think it's a good idea to log this as debug, the same way the other branch gets logged. | qutebrowser-qutebrowser | py |
@@ -73,7 +73,7 @@ class TestJSONLD(AnnotationTestCase):
"comment": 1}
a = API.add_annotation(model='annotation_note', **data)
- # JSONAlchemy issue with overwritting fields
+ # JSONAlchemy issue with overwriting fields
self.assert_(len(a.validate()) == 0)
ld = a.get_jsonld("oaf", | 1 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
from datetime import datetime
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, nottest, \
InvenioTestCase
CFG = lazy_import('invenio.base.globals.cfg')
USER = lazy_import('invenio.modules.accounts.models.User')
API = lazy_import('invenio.modules.annotations.api')
NOTEUTILS = lazy_import('invenio.modules.annotations.noteutils')
COMMENT = lazy_import('invenio.modules.comments.models.CmtRECORDCOMMENT')
class AnnotationTestCase(InvenioTestCase):
def setUp(self):
self.app.config['ANNOTATIONS_ENGINE'] = \
"invenio.modules.jsonalchemy.jsonext.engines.memory:MemoryStorage"
class TestAnnotation(AnnotationTestCase):
def test_initialization(self):
u = USER(id=1)
a = API.Annotation.create({"who": u, "what": "lorem", "where": "/"})
self.assert_(len(a.validate()) == 0)
self.assert_(type(a["when"]) == datetime)
self.assert_(a["who"].get_id() == 1)
# invalid annotation
a = API.Annotation.create({"who": u, "what": "lorem", "where": "/",
"perm": {"public": True, "groups": []},
"uuid": "1m"})
self.assert_(len(a.validate()) == 1)
def test_jsonld(self):
u = USER(id=1, nickname="johndoe")
a = API.Annotation.create({"who": u, "what": "lorem", "where": "/",
"perm": {"public": True, "groups": []}})
ld = a.get_jsonld("oaf")
self.assert_(ld["hasTarget"]["@id"] == CFG["CFG_SITE_URL"] + "/")
self.assert_(ld["hasBody"]["chars"] == "lorem")
class TestJSONLD(AnnotationTestCase):
@nottest
def test(self):
u = USER(id=1)
data = {"who": u, "what": "lorem",
"where": {"record": 1, "marker": "P.1_T.2a.2_L.100"},
"comment": 1}
a = API.add_annotation(model='annotation_note', **data)
# JSONAlchemy issue with overwritting fields
self.assert_(len(a.validate()) == 0)
ld = a.get_jsonld("oaf",
new_context={"ST": "http://www.w3.org/ns/oa#"
"FragmentSelector"},
format="compacted")
self.assert_(ld["http://www.w3.org/ns/oa#hasTarget"]
["http://www.w3.org/ns/oa#hasSelector"]
["@type"] == "ST")
self.assert_(ld["http://www.w3.org/ns/oa#hasTarget"]
["http://www.w3.org/ns/oa#hasSelector"]
["http://www.w3.org/1999/02/22-rdf-syntax-ns#value"] ==
"P.1_T.2a.2_L.100")
TEST_SUITE = make_test_suite(TestAnnotation, TestJSONLD)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| 1 | 11,057 | 1:D100: Docstring missing 35:D101: Docstring missing 37:D102: Docstring missing 42:D101: Docstring missing 44:D102: Docstring missing 57:D102: Docstring missing 66:D101: Docstring missing 69:D102: Docstring missing | inveniosoftware-invenio | py |
@@ -20,6 +20,10 @@ import (
"context"
)
+const (
+ systemGuestRoleName = "system.guest"
+)
+
var (
systemTokenInst TokenGenerator = &noauth{}
| 1 | /*
Package auth can be used for authentication and authorization
Copyright 2018 Portworx
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"context"
)
var (
systemTokenInst TokenGenerator = &noauth{}
// Inst returns the instance of system token manager.
// This function can be overridden for testing purposes
InitSystemTokenManager = func(tg TokenGenerator) {
systemTokenInst = tg
}
// SystemTokenManagerInst returns the systemTokenManager instance
SystemTokenManagerInst = func() TokenGenerator {
return systemTokenInst
}
)
// Authenticator interface validates and extracts the claims from a raw token
type Authenticator interface {
// AuthenticateToken validates the token and returns the claims
AuthenticateToken(context.Context, string) (*Claims, error)
// Username returns the unique id according to the configuration. Default
// it will return the value for "sub" in the token claims, but it can be
// configured to return the email or name as the unique id.
Username(*Claims) string
}
// Enabled returns whether or not auth is enabled.
func Enabled() bool {
return len(systemTokenInst.Issuer()) != 0
}
| 1 | 8,609 | keeping this as an unexported constant, as importing the role pkg creates a cyclic dependency. | libopenstorage-openstorage | go |
@@ -349,9 +349,12 @@ func (c *collection) newPut(a *driver.Action, opts *driver.RunActionsOptions) (*
// It doesn't make sense to generate a random sort key.
return nil, fmt.Errorf("missing sort key %q", c.sortKey)
}
- rev := driver.UniqueString()
- if av.M[c.opts.RevisionField], err = encodeValue(rev); err != nil {
- return nil, err
+ var rev string
+ if a.Doc.RevisionOn(c.RevisionField()) {
+ rev = driver.UniqueString()
+ if av.M[c.opts.RevisionField], err = encodeValue(rev); err != nil {
+ return nil, err
+ }
}
dput := &dyn.Put{
TableName: &c.table, | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package awsdynamodb provides a docstore implementation backed by AWS
// DynamoDB.
// Use OpenCollection to construct a *docstore.Collection.
//
// URLs
//
// For docstore.OpenCollection, awsdynamodb registers for the scheme
// "dynamodb". The default URL opener will use an AWS session with the default
// credentials and configuration; see
// https://docs.aws.amazon.com/sdk-for-go/api/aws/session/ for more details.
// To customize the URL opener, or for more details on the URL format, see
// URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
// As
//
// awsdynamodb exposes the following types for As:
// - Collection.As: *dynamodb.DynamoDB
// - ActionList.BeforeDo: *dynamodb.BatchGetItemInput or *dynamodb.PutItemInput or *dynamodb.DeleteItemInput
// or *dynamodb.UpdateItemInput
// - Query.BeforeQuery: *dynamodb.QueryInput or *dynamodb.ScanInput
// - DocumentIterator: *dynamodb.QueryOutput or *dynamodb.ScanOutput
// - ErrorAs: awserr.Error
package awsdynamodb
import (
"context"
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
dyn "github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/expression"
"gocloud.dev/docstore"
"gocloud.dev/docstore/driver"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/gcerr"
)
type collection struct {
db *dyn.DynamoDB
table string // DynamoDB table name
partitionKey string
sortKey string
description *dyn.TableDescription
opts *Options
}
// FallbackFunc is a function for executing queries that cannot be run by the built-in
// awsdynamodb logic. See Options.RunQueryFunc for details.
type FallbackFunc func(context.Context, *driver.Query, RunQueryFunc) (driver.DocumentIterator, error)
type Options struct {
// If false, queries that can only be executed by scanning the entire table
// return an error instead (with the exception of a query with no filters).
AllowScans bool
// The name of the field holding the document revision.
// Defaults to docstore.DefaultRevisionField.
RevisionField string
// If set, call this function on queries that we cannot execute at all (for
// example, a query with an OrderBy clause that lacks an equality filter on a
// partition key). The function should execute the query however it wishes, and
// return an iterator over the results. It can use the RunQueryFunc passed as its
// third argument to have the DynamoDB driver run a query, for instance a
// modified version of the original query.
//
// If RunQueryFallback is nil, queries that cannot be executed will fail with a
// error that has code Unimplemented.
RunQueryFallback FallbackFunc
// The maximum number of concurrent goroutines started for a single call to
// ActionList.Do. If less than 1, there is no limit.
MaxOutstandingActionRPCs int
}
// RunQueryFunc is the type of the function passed to RunQueryFallback.
type RunQueryFunc func(context.Context, *driver.Query) (driver.DocumentIterator, error)
// OpenCollection creates a *docstore.Collection representing a DynamoDB collection.
func OpenCollection(db *dyn.DynamoDB, tableName, partitionKey, sortKey string, opts *Options) (*docstore.Collection, error) {
c, err := newCollection(db, tableName, partitionKey, sortKey, opts)
if err != nil {
return nil, err
}
return docstore.NewCollection(c), nil
}
func newCollection(db *dyn.DynamoDB, tableName, partitionKey, sortKey string, opts *Options) (*collection, error) {
out, err := db.DescribeTable(&dyn.DescribeTableInput{TableName: &tableName})
if err != nil {
return nil, err
}
if opts == nil {
opts = &Options{}
}
if opts.RevisionField == "" {
opts.RevisionField = docstore.DefaultRevisionField
}
return &collection{
db: db,
table: tableName,
partitionKey: partitionKey,
sortKey: sortKey,
description: out.Table,
opts: opts,
}, nil
}
// Key returns a two-element array with the partition key and sort key, if any.
func (c *collection) Key(doc driver.Document) (interface{}, error) {
var keys [2]interface{}
var err error
keys[0], err = doc.GetField(c.partitionKey)
if err != nil {
return nil, nil // missing key is not an error
}
if c.sortKey != "" {
keys[1], _ = doc.GetField(c.sortKey) // ignore error since keys[1] is nil in that case
}
return keys, nil
}
func (c *collection) RevisionField() string { return c.opts.RevisionField }
func (c *collection) RunActions(ctx context.Context, actions []*driver.Action, opts *driver.RunActionsOptions) driver.ActionListError {
errs := make([]error, len(actions))
beforeGets, gets, writes, afterGets := driver.GroupActions(actions)
c.runGets(ctx, beforeGets, errs, opts)
ch := make(chan struct{})
go func() { defer close(ch); c.runWrites(ctx, writes, errs, opts) }()
c.runGets(ctx, gets, errs, opts)
<-ch
c.runGets(ctx, afterGets, errs, opts)
return driver.NewActionListError(errs)
}
func (c *collection) runGets(ctx context.Context, actions []*driver.Action, errs []error, opts *driver.RunActionsOptions) {
const batchSize = 100
t := driver.NewThrottle(c.opts.MaxOutstandingActionRPCs)
for _, group := range driver.GroupByFieldPath(actions) {
n := len(group) / batchSize
for i := 0; i < n; i++ {
i := i
t.Acquire()
go func() {
defer t.Release()
c.batchGet(ctx, group, errs, opts, batchSize*i, batchSize*(i+1)-1)
}()
}
if n*batchSize < len(group) {
t.Acquire()
go func() {
defer t.Release()
c.batchGet(ctx, group, errs, opts, batchSize*n, len(group)-1)
}()
}
}
t.Wait()
}
func (c *collection) batchGet(ctx context.Context, gets []*driver.Action, errs []error, opts *driver.RunActionsOptions, start, end int) {
// errors need to be mapped to the actions' indices.
setErr := func(err error) {
for i := start; i <= end; i++ {
errs[gets[i].Index] = err
}
}
keys := make([]map[string]*dyn.AttributeValue, 0, end-start+1)
for i := start; i <= end; i++ {
av, err := encodeDocKeyFields(gets[i].Doc, c.partitionKey, c.sortKey)
if err != nil {
errs[gets[i].Index] = err
}
keys = append(keys, av.M)
}
ka := &dyn.KeysAndAttributes{
Keys: keys,
ConsistentRead: aws.Bool(true),
}
if len(gets[start].FieldPaths) != 0 {
// We need to add the key fields if the user doesn't include them. The
// BatchGet API doesn't return them otherwise.
var hasP, hasS bool
nbs := []expression.NameBuilder{expression.Name(c.opts.RevisionField)}
for _, fp := range gets[start].FieldPaths {
p := strings.Join(fp, ".")
nbs = append(nbs, expression.Name(p))
if p == c.partitionKey {
hasP = true
} else if p == c.sortKey {
hasS = true
}
}
if !hasP {
nbs = append(nbs, expression.Name(c.partitionKey))
}
if c.sortKey != "" && !hasS {
nbs = append(nbs, expression.Name(c.sortKey))
}
expr, err := expression.NewBuilder().
WithProjection(expression.AddNames(expression.ProjectionBuilder{}, nbs...)).
Build()
if err != nil {
setErr(err)
return
}
ka.ProjectionExpression = expr.Projection()
ka.ExpressionAttributeNames = expr.Names()
}
in := &dyn.BatchGetItemInput{RequestItems: map[string]*dyn.KeysAndAttributes{c.table: ka}}
if opts.BeforeDo != nil {
if err := opts.BeforeDo(driver.AsFunc(in)); err != nil {
setErr(err)
return
}
}
out, err := c.db.BatchGetItemWithContext(ctx, in)
if err != nil {
setErr(err)
return
}
found := make([]bool, end-start+1)
am := mapActionIndices(gets, start, end)
for _, item := range out.Responses[c.table] {
if item != nil {
key := map[string]interface{}{c.partitionKey: nil}
if c.sortKey != "" {
key[c.sortKey] = nil
}
keysOnly, err := driver.NewDocument(key)
if err != nil {
panic(err)
}
err = decodeDoc(&dyn.AttributeValue{M: item}, keysOnly)
if err != nil {
continue
}
decKey, err := c.Key(keysOnly)
if err != nil {
continue
}
i := am[decKey]
errs[gets[i].Index] = decodeDoc(&dyn.AttributeValue{M: item}, gets[i].Doc)
found[i-start] = true
}
}
for delta, f := range found {
if !f {
errs[gets[start+delta].Index] = gcerr.Newf(gcerr.NotFound, nil, "item %v not found", gets[start+delta].Doc)
}
}
}
func mapActionIndices(actions []*driver.Action, start, end int) map[interface{}]int {
m := make(map[interface{}]int)
for i := start; i <= end; i++ {
m[actions[i].Key] = i
}
return m
}
// runWrites executes all the writes as separate RPCs, concurrently.
func (c *collection) runWrites(ctx context.Context, writes []*driver.Action, errs []error, opts *driver.RunActionsOptions) {
var ops []*writeOp
for _, w := range writes {
op, err := c.newWriteOp(w, opts)
if err != nil {
errs[w.Index] = err
} else {
ops = append(ops, op)
}
}
t := driver.NewThrottle(c.opts.MaxOutstandingActionRPCs)
for _, op := range ops {
op := op
t.Acquire()
go func() {
defer t.Release()
err := op.run(ctx)
a := op.action
if err != nil {
errs[a.Index] = err
} else {
c.onSuccess(op)
}
}()
}
t.Wait()
}
// A writeOp describes a single write to DynamoDB. The write can be executed
// on its own, or included as part of a transaction.
type writeOp struct {
action *driver.Action
writeItem *dyn.TransactWriteItem // for inclusion in a transaction
newPartitionKey string // for a Create on a document without a partition key
newRevision string
run func(context.Context) error // run as a single RPC
}
func (c *collection) newWriteOp(a *driver.Action, opts *driver.RunActionsOptions) (*writeOp, error) {
switch a.Kind {
case driver.Create, driver.Replace, driver.Put:
return c.newPut(a, opts)
case driver.Update:
return c.newUpdate(a, opts)
case driver.Delete:
return c.newDelete(a, opts)
default:
panic("bad write kind")
}
}
func (c *collection) newPut(a *driver.Action, opts *driver.RunActionsOptions) (*writeOp, error) {
av, err := encodeDoc(a.Doc)
if err != nil {
return nil, err
}
mf := c.missingKeyField(av.M)
if a.Kind != driver.Create && mf != "" {
return nil, fmt.Errorf("missing key field %q", mf)
}
var newPartitionKey string
if mf == c.partitionKey {
newPartitionKey = driver.UniqueString()
av.M[c.partitionKey] = new(dyn.AttributeValue).SetS(newPartitionKey)
}
if c.sortKey != "" && mf == c.sortKey {
// It doesn't make sense to generate a random sort key.
return nil, fmt.Errorf("missing sort key %q", c.sortKey)
}
rev := driver.UniqueString()
if av.M[c.opts.RevisionField], err = encodeValue(rev); err != nil {
return nil, err
}
dput := &dyn.Put{
TableName: &c.table,
Item: av.M,
}
cb, err := c.precondition(a)
if err != nil {
return nil, err
}
if cb != nil {
ce, err := expression.NewBuilder().WithCondition(*cb).Build()
if err != nil {
return nil, err
}
dput.ExpressionAttributeNames = ce.Names()
dput.ExpressionAttributeValues = ce.Values()
dput.ConditionExpression = ce.Condition()
}
return &writeOp{
action: a,
writeItem: &dyn.TransactWriteItem{Put: dput},
newPartitionKey: newPartitionKey,
newRevision: rev,
run: func(ctx context.Context) error {
return c.runPut(ctx, dput, a, opts)
},
}, nil
}
func (c *collection) runPut(ctx context.Context, dput *dyn.Put, a *driver.Action, opts *driver.RunActionsOptions) error {
in := &dyn.PutItemInput{
TableName: dput.TableName,
Item: dput.Item,
ConditionExpression: dput.ConditionExpression,
ExpressionAttributeNames: dput.ExpressionAttributeNames,
ExpressionAttributeValues: dput.ExpressionAttributeValues,
}
if opts.BeforeDo != nil {
if err := opts.BeforeDo(driver.AsFunc(in)); err != nil {
return err
}
}
_, err := c.db.PutItemWithContext(ctx, in)
if ae, ok := err.(awserr.Error); ok && ae.Code() == dyn.ErrCodeConditionalCheckFailedException {
if a.Kind == driver.Create {
err = gcerr.Newf(gcerr.AlreadyExists, err, "document already exists")
}
if rev, _ := a.Doc.GetField(c.opts.RevisionField); rev == nil && a.Kind == driver.Replace {
err = gcerr.Newf(gcerr.NotFound, nil, "document not found")
}
}
return err
}
func (c *collection) newDelete(a *driver.Action, opts *driver.RunActionsOptions) (*writeOp, error) {
av, err := encodeDocKeyFields(a.Doc, c.partitionKey, c.sortKey)
if err != nil {
return nil, err
}
del := &dyn.Delete{
TableName: &c.table,
Key: av.M,
}
cb, err := c.precondition(a)
if err != nil {
return nil, err
}
if cb != nil {
ce, err := expression.NewBuilder().WithCondition(*cb).Build()
if err != nil {
return nil, err
}
del.ExpressionAttributeNames = ce.Names()
del.ExpressionAttributeValues = ce.Values()
del.ConditionExpression = ce.Condition()
}
return &writeOp{
action: a,
writeItem: &dyn.TransactWriteItem{Delete: del},
run: func(ctx context.Context) error {
in := &dyn.DeleteItemInput{
TableName: del.TableName,
Key: del.Key,
ConditionExpression: del.ConditionExpression,
ExpressionAttributeNames: del.ExpressionAttributeNames,
ExpressionAttributeValues: del.ExpressionAttributeValues,
}
if opts.BeforeDo != nil {
if err := opts.BeforeDo(driver.AsFunc(in)); err != nil {
return err
}
}
_, err := c.db.DeleteItemWithContext(ctx, in)
return err
},
}, nil
}
func (c *collection) newUpdate(a *driver.Action, opts *driver.RunActionsOptions) (*writeOp, error) {
av, err := encodeDocKeyFields(a.Doc, c.partitionKey, c.sortKey)
if err != nil {
return nil, err
}
var ub expression.UpdateBuilder
for _, m := range a.Mods {
// TODO(shantuo): check for invalid field paths
fp := expression.Name(strings.Join(m.FieldPath, "."))
if inc, ok := m.Value.(driver.IncOp); ok {
ub = ub.Add(fp, expression.Value(inc.Amount))
} else if m.Value == nil {
ub = ub.Remove(fp)
} else {
ub = ub.Set(fp, expression.Value(m.Value))
}
}
rev := driver.UniqueString()
ub = ub.Set(expression.Name(c.opts.RevisionField), expression.Value(rev))
cb, err := c.precondition(a)
if err != nil {
return nil, err
}
ce, err := expression.NewBuilder().WithCondition(*cb).WithUpdate(ub).Build()
if err != nil {
return nil, err
}
up := &dyn.Update{
TableName: &c.table,
Key: av.M,
ConditionExpression: ce.Condition(),
UpdateExpression: ce.Update(),
ExpressionAttributeNames: ce.Names(),
ExpressionAttributeValues: ce.Values(),
}
return &writeOp{
action: a,
writeItem: &dyn.TransactWriteItem{Update: up},
newRevision: rev,
run: func(ctx context.Context) error {
in := &dyn.UpdateItemInput{
TableName: up.TableName,
Key: up.Key,
ConditionExpression: up.ConditionExpression,
UpdateExpression: up.UpdateExpression,
ExpressionAttributeNames: up.ExpressionAttributeNames,
ExpressionAttributeValues: up.ExpressionAttributeValues,
}
if opts.BeforeDo != nil {
if err := opts.BeforeDo(driver.AsFunc(in)); err != nil {
return err
}
}
_, err := c.db.UpdateItemWithContext(ctx, in)
return err
},
}, nil
}
// Handle the effects of successful execution.
func (c *collection) onSuccess(op *writeOp) {
// Set the new partition key (if any) and the new revision into the user's document.
if op.newPartitionKey != "" {
_ = op.action.Doc.SetField(c.partitionKey, op.newPartitionKey) // cannot fail
}
if op.newRevision != "" {
_ = op.action.Doc.SetField(c.opts.RevisionField, op.newRevision) // OK if there is no revision field
}
}
func (c *collection) missingKeyField(m map[string]*dyn.AttributeValue) string {
if v, ok := m[c.partitionKey]; !ok || v.NULL != nil {
return c.partitionKey
}
if v, ok := m[c.sortKey]; (!ok || v.NULL != nil) && c.sortKey != "" {
return c.sortKey
}
return ""
}
// Construct the precondition for the action.
func (c *collection) precondition(a *driver.Action) (*expression.ConditionBuilder, error) {
switch a.Kind {
case driver.Create:
// Precondition: the document doesn't already exist. (Precisely: the partitionKey
// field is not on the document.)
c := expression.AttributeNotExists(expression.Name(c.partitionKey))
return &c, nil
case driver.Replace, driver.Update:
// Precondition: the revision matches, or if there is no revision, then
// the document exists.
cb, err := revisionPrecondition(a.Doc, c.opts.RevisionField)
if err != nil {
return nil, err
}
if cb == nil {
c := expression.AttributeExists(expression.Name(c.partitionKey))
cb = &c
}
return cb, nil
case driver.Put, driver.Delete:
// Precondition: the revision matches, if any.
return revisionPrecondition(a.Doc, c.opts.RevisionField)
case driver.Get:
// No preconditions on a Get.
return nil, nil
default:
panic("bad action kind")
}
}
// revisionPrecondition returns a DynamoDB expression that asserts that the
// stored document's revision matches the revision of doc.
func revisionPrecondition(doc driver.Document, revField string) (*expression.ConditionBuilder, error) {
v, err := doc.GetField(revField)
if err != nil { // field not present
return nil, nil
}
if v == nil { // field is present, but nil
return nil, nil
}
rev, ok := v.(string)
if !ok {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil,
"%s field contains wrong type: got %T, want string",
revField, v)
}
if rev == "" {
return nil, nil
}
// Value encodes rev to an attribute value.
cb := expression.Name(revField).Equal(expression.Value(rev))
return &cb, nil
}
// TODO(jba): use this if/when we support atomic writes.
func (c *collection) transactWrite(ctx context.Context, actions []*driver.Action, errs []error, opts *driver.RunActionsOptions, start, end int) {
setErr := func(err error) {
for i := start; i <= end; i++ {
errs[actions[i].Index] = err
}
}
var ops []*writeOp
tws := make([]*dyn.TransactWriteItem, 0, end-start+1)
for i := start; i <= end; i++ {
a := actions[i]
op, err := c.newWriteOp(a, opts)
if err != nil {
setErr(err)
return
}
ops = append(ops, op)
tws = append(tws, op.writeItem)
}
in := &dyn.TransactWriteItemsInput{
ClientRequestToken: aws.String(driver.UniqueString()),
TransactItems: tws,
}
if opts.BeforeDo != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**dyn.TransactWriteItemsInput)
if !ok {
return false
}
*p = in
return true
}
if err := opts.BeforeDo(asFunc); err != nil {
setErr(err)
return
}
}
if _, err := c.db.TransactWriteItemsWithContext(ctx, in); err != nil {
setErr(err)
return
}
for _, op := range ops {
c.onSuccess(op)
}
}
// RevisionToBytes implements driver.RevisionToBytes.
func (c *collection) RevisionToBytes(rev interface{}) ([]byte, error) {
s, ok := rev.(string)
if !ok {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "revision %v of type %[1]T is not a string", rev)
}
return []byte(s), nil
}
// BytesToRevision implements driver.BytesToRevision.
func (c *collection) BytesToRevision(b []byte) (interface{}, error) {
return string(b), nil
}
func (c *collection) As(i interface{}) bool {
p, ok := i.(**dyn.DynamoDB)
if !ok {
return false
}
*p = c.db
return true
}
// ErrorAs implements driver.Collection.ErrorAs.
func (c *collection) ErrorAs(err error, i interface{}) bool {
e, ok := err.(awserr.Error)
if !ok {
return false
}
p, ok := i.(*awserr.Error)
if !ok {
return false
}
*p = e
return true
}
func (c *collection) ErrorCode(err error) gcerr.ErrorCode {
ae, ok := err.(awserr.Error)
if !ok {
return gcerr.Unknown
}
ec, ok := errorCodeMap[ae.Code()]
if !ok {
return gcerr.Unknown
}
return ec
}
var errorCodeMap = map[string]gcerrors.ErrorCode{
dyn.ErrCodeConditionalCheckFailedException: gcerr.FailedPrecondition,
dyn.ErrCodeProvisionedThroughputExceededException: gcerr.ResourceExhausted,
dyn.ErrCodeResourceNotFoundException: gcerr.NotFound,
dyn.ErrCodeItemCollectionSizeLimitExceededException: gcerr.ResourceExhausted,
dyn.ErrCodeTransactionConflictException: gcerr.Internal,
dyn.ErrCodeRequestLimitExceeded: gcerr.ResourceExhausted,
dyn.ErrCodeInternalServerError: gcerr.Internal,
dyn.ErrCodeTransactionCanceledException: gcerr.FailedPrecondition,
dyn.ErrCodeTransactionInProgressException: gcerr.InvalidArgument,
dyn.ErrCodeIdempotentParameterMismatchException: gcerr.InvalidArgument,
"ValidationException": gcerr.InvalidArgument,
}
// Close implements driver.Collection.Close.
func (c *collection) Close() error { return nil }
| 1 | 19,113 | Why `c.RevisionField()` here but `c.opts.RevisionField` just below? | google-go-cloud | go |
@@ -139,7 +139,6 @@ type Context struct {
OriginalTargetReached bool
OvmExecutionManager dump.OvmDumpAccount
OvmStateManager dump.OvmDumpAccount
- OvmMockAccount dump.OvmDumpAccount
OvmSafetyChecker dump.OvmDumpAccount
}
| 1 | // Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vm
import (
"bytes"
"crypto/rand"
"encoding/hex"
"math/big"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rollup/dump"
)
// Will be removed when we update EM to return data in `run`.
var deadPrefix, fortyTwoPrefix, zeroPrefix []byte
func init() {
deadPrefix = hexutil.MustDecode("0xdeaddeaddeaddeaddeaddeaddeaddeaddead")
zeroPrefix = hexutil.MustDecode("0x000000000000000000000000000000000000")
fortyTwoPrefix = hexutil.MustDecode("0x420000000000000000000000000000000000")
}
// emptyCodeHash is used by create to ensure deployment is disallowed to already
// deployed contract addresses (relevant after the account abstraction).
var emptyCodeHash = crypto.Keccak256Hash(nil)
type (
// CanTransferFunc is the signature of a transfer guard function
CanTransferFunc func(StateDB, common.Address, *big.Int) bool
// TransferFunc is the signature of a transfer function
TransferFunc func(StateDB, common.Address, common.Address, *big.Int)
// GetHashFunc returns the n'th block hash in the blockchain
// and is used by the BLOCKHASH EVM op code.
GetHashFunc func(uint64) common.Hash
)
// run runs the given contract and takes care of running precompiles with a fallback to the byte code interpreter.
func run(evm *EVM, contract *Contract, input []byte, readOnly bool) ([]byte, error) {
if UsingOVM {
// OVM_ENABLED
// Only log for non `eth_call`s
if evm.Context.EthCallSender == nil {
log.Debug("Calling contract", "ID", evm.Id, "Address", contract.Address().Hex(), "Data", hexutil.Encode(input))
}
// Uncomment to make Safety checker always returns true.
// if contract.Address() == evm.Context.SafetyChecker.Address {
// return AbiBytesTrue, nil
// }
// If we're calling the state manager, we want to use our native implementation instead.
if contract.Address() == evm.Context.OvmStateManager.Address {
// The caller must be the execution manager
if contract.Caller() != evm.Context.OvmExecutionManager.Address {
log.Error("StateManager called by non ExecutionManager", "ID", evm.Id, "caller", contract.Caller().Hex())
return nil, ErrOvmSandboxEscape
}
return callStateManager(input, evm, contract)
}
}
if contract.CodeAddr != nil {
precompiles := PrecompiledContractsHomestead
if evm.chainRules.IsByzantium {
precompiles = PrecompiledContractsByzantium
}
if evm.chainRules.IsIstanbul {
precompiles = PrecompiledContractsIstanbul
}
if p := precompiles[*contract.CodeAddr]; p != nil {
return RunPrecompiledContract(p, input, contract)
}
}
for _, interpreter := range evm.interpreters {
if interpreter.CanRun(contract.Code) {
if evm.interpreter != interpreter {
// Ensure that the interpreter pointer is set back
// to its current value upon return.
defer func(i Interpreter) {
evm.interpreter = i
}(evm.interpreter)
evm.interpreter = interpreter
}
return interpreter.Run(contract, input, readOnly)
}
}
return nil, ErrNoCompatibleInterpreter
}
// Context provides the EVM with auxiliary information. Once provided
// it shouldn't be modified.
type Context struct {
// CanTransfer returns whether the account contains
// sufficient ether to transfer the value
CanTransfer CanTransferFunc
// Transfer transfers ether from one account to the other
Transfer TransferFunc
// GetHash returns the hash corresponding to n
GetHash GetHashFunc
// Message information
Origin common.Address // Provides information for ORIGIN
GasPrice *big.Int // Provides information for GASPRICE
// Block information
Coinbase common.Address // Provides information for COINBASE
GasLimit uint64 // Provides information for GASLIMIT
BlockNumber *big.Int // Provides information for NUMBER
Time *big.Int // Provides information for TIME
Difficulty *big.Int // Provides information for DIFFICULTY
// OVM_ADDITION
EthCallSender *common.Address
OriginalTargetAddress *common.Address
OriginalTargetResult []byte
OriginalTargetReached bool
OvmExecutionManager dump.OvmDumpAccount
OvmStateManager dump.OvmDumpAccount
OvmMockAccount dump.OvmDumpAccount
OvmSafetyChecker dump.OvmDumpAccount
}
// EVM is the Ethereum Virtual Machine base object and provides
// the necessary tools to run a contract on the given state with
// the provided context. It should be noted that any error
// generated through any of the calls should be considered a
// revert-state-and-consume-all-gas operation, no checks on
// specific errors should ever be performed. The interpreter makes
// sure that any errors generated are to be considered faulty code.
//
// The EVM should never be reused and is not thread safe.
type EVM struct {
// Context provides auxiliary blockchain related information
Context
// StateDB gives access to the underlying state
StateDB StateDB
// Depth is the current call stack
depth int
// chainConfig contains information about the current chain
chainConfig *params.ChainConfig
// chain rules contains the chain rules for the current epoch
chainRules params.Rules
// virtual machine configuration options used to initialise the
// evm.
vmConfig Config
// global (to this context) ethereum virtual machine
// used throughout the execution of the tx.
interpreters []Interpreter
interpreter Interpreter
// abort is used to abort the EVM calling operations
// NOTE: must be set atomically
abort int32
// callGasTemp holds the gas available for the current call. This is needed because the
// available gas is calculated in gasCall* according to the 63/64 rule and later
// applied in opCall*.
callGasTemp uint64
Id string
}
// NewEVM returns a new EVM. The returned EVM is not thread safe and should
// only ever be used *once*.
func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmConfig Config) *EVM {
// Add the ExecutionManager and StateManager to the Context here to
// prevent the need to update function signatures across the codebase.
if chainConfig != nil && chainConfig.StateDump != nil {
ctx.OvmExecutionManager = chainConfig.StateDump.Accounts["OVM_ExecutionManager"]
ctx.OvmStateManager = chainConfig.StateDump.Accounts["OVM_StateManager"]
ctx.OvmMockAccount = chainConfig.StateDump.Accounts["mockOVM_ECDSAContractAccount"]
ctx.OvmSafetyChecker = chainConfig.StateDump.Accounts["OVM_SafetyChecker"]
}
id := make([]byte, 4)
rand.Read(id)
evm := &EVM{
Context: ctx,
StateDB: statedb,
vmConfig: vmConfig,
chainConfig: chainConfig,
chainRules: chainConfig.Rules(ctx.BlockNumber),
interpreters: make([]Interpreter, 0, 1),
Id: hex.EncodeToString(id),
}
if chainConfig.IsEWASM(ctx.BlockNumber) {
// to be implemented by EVM-C and Wagon PRs.
// if vmConfig.EWASMInterpreter != "" {
// extIntOpts := strings.Split(vmConfig.EWASMInterpreter, ":")
// path := extIntOpts[0]
// options := []string{}
// if len(extIntOpts) > 1 {
// options = extIntOpts[1..]
// }
// evm.interpreters = append(evm.interpreters, NewEVMVCInterpreter(evm, vmConfig, options))
// } else {
// evm.interpreters = append(evm.interpreters, NewEWASMInterpreter(evm, vmConfig))
// }
panic("No supported ewasm interpreter yet.")
}
// vmConfig.EVMInterpreter will be used by EVM-C, it won't be checked here
// as we always want to have the built-in EVM as the failover option.
evm.interpreters = append(evm.interpreters, NewEVMInterpreter(evm, vmConfig))
evm.interpreter = evm.interpreters[0]
return evm
}
// Cancel cancels any running EVM operation. This may be called concurrently and
// it's safe to be called multiple times.
func (evm *EVM) Cancel() {
atomic.StoreInt32(&evm.abort, 1)
}
// Cancelled returns true if Cancel has been called
func (evm *EVM) Cancelled() bool {
return atomic.LoadInt32(&evm.abort) == 1
}
// Interpreter returns the current interpreter
func (evm *EVM) Interpreter() Interpreter {
return evm.interpreter
}
// Call executes the contract associated with the addr with the given input as
// parameters. It also handles any necessary value transfer required and takes
// the necessary steps to create accounts and reverses the state in case of an
// execution error or failed value transfer.
func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) {
var isTarget = false
if UsingOVM {
// OVM_ENABLED
if evm.depth == 0 {
// We're inside a new transaction, so make sure to wipe these variables beforehand.
evm.Context.OriginalTargetAddress = nil
evm.Context.OriginalTargetResult = []byte("00")
evm.Context.OriginalTargetReached = false
}
if caller.Address() == evm.Context.OvmExecutionManager.Address &&
!bytes.HasPrefix(addr.Bytes(), deadPrefix) &&
!bytes.HasPrefix(addr.Bytes(), zeroPrefix) &&
!bytes.HasPrefix(addr.Bytes(), fortyTwoPrefix) &&
evm.Context.OriginalTargetAddress == nil {
// Whew. Okay, so: we consider ourselves to be at a "target" as long as we were called
// by the execution manager, and we're not a precompile or "dead" address.
evm.Context.OriginalTargetAddress = &addr
evm.Context.OriginalTargetReached = true
isTarget = true
}
// Handle eth_call
if evm.Context.EthCallSender != nil && (caller.Address() == common.Address{}) {
evm.Context.OriginalTargetReached = true
isTarget = true
}
}
if evm.vmConfig.NoRecursion && evm.depth > 0 {
return nil, gas, nil
}
// Fail if we're trying to execute above the call depth limit
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
}
if !UsingOVM {
// OVM_DISABLED
// Fail if we're trying to transfer more than the available balance
if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, gas, ErrInsufficientBalance
}
}
var (
to = AccountRef(addr)
snapshot = evm.StateDB.Snapshot()
)
if !evm.StateDB.Exist(addr) {
precompiles := PrecompiledContractsHomestead
if evm.chainRules.IsByzantium {
precompiles = PrecompiledContractsByzantium
}
if evm.chainRules.IsIstanbul {
precompiles = PrecompiledContractsIstanbul
}
if precompiles[addr] == nil && evm.chainRules.IsEIP158 && value.Sign() == 0 {
// Calling a non existing account, don't do anything, but ping the tracer
if evm.vmConfig.Debug && evm.depth == 0 {
evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value)
evm.vmConfig.Tracer.CaptureEnd(ret, 0, 0, nil)
}
return nil, gas, nil
}
evm.StateDB.CreateAccount(addr)
}
if !UsingOVM {
// OVM_DISABLED
evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value)
}
// Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only.
contract := NewContract(caller, to, value, gas)
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
// Even if the account has no code, we need to continue because it might be a precompile
start := time.Now()
// Capture the tracer start/end events in debug mode
if evm.vmConfig.Debug && evm.depth == 0 {
evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value)
defer func() { // Lazy evaluation of the parameters
evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
}()
}
ret, err = run(evm, contract, input, false)
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in homestead this also counts for code storage gas errors.
if err != nil {
evm.StateDB.RevertToSnapshot(snapshot)
if err != errExecutionReverted {
contract.UseGas(contract.Gas)
}
}
if UsingOVM {
// OVM_ENABLED
if isTarget {
// If this was our target contract, store the result so that it can be later re-inserted
// into the user-facing return data (as seen below).
evm.Context.OriginalTargetResult = ret
}
if evm.depth == 0 {
// We're back at the root-level message call, so we'll need to modify the return data
// sent to us by the OVM_ExecutionManager to instead be the intended return data.
if !evm.Context.OriginalTargetReached {
// If we didn't get to the target contract, then our execution somehow failed
// (perhaps due to insufficient gas). Just return an error that represents this.
ret = common.FromHex("0x")
err = ErrOvmExecutionFailed
} else if len(evm.Context.OriginalTargetResult) >= 96 {
// We expect that EOA contracts return at least 96 bytes of data, where the first
// 32 bytes are the boolean success value and the next 64 bytes are unnecessary
// ABI encoding data. The actual return data starts at the 96th byte and can be
// empty.
success := evm.Context.OriginalTargetResult[:32]
ret = evm.Context.OriginalTargetResult[96:]
if !bytes.Equal(success, AbiBytesTrue) && !bytes.Equal(success, AbiBytesFalse) {
// If the first 32 bytes not either are the ABI encoding of "true" or "false",
// then the user hasn't correctly ABI encoded the result. We return the null
// hex string as a default here (an annoying default that would convince most
// people to just use the standard form).
ret = common.FromHex("0x")
} else if bytes.Equal(success, AbiBytesFalse) {
// If the first 32 bytes are the ABI encoding of "false", then we need to add an
// artificial error that represents the revert.
err = errExecutionReverted
// We also currently need to add an extra four empty bytes to the return data
// to appease ethers.js. Our return correctly inserts the four specific bytes
// that represent a "string error" to clients, but somehow the returndata size
// is a multiple of 32 (when we expect size % 32 == 4). ethers.js checks that
// [size % 32 == 4] before trying to decode a string error result. Adding these
// four empty bytes tricks ethers into correctly decoding the error string.
// ovmTODO: Figure out how to actually deal with this.
// ovmTODO: This may actually be completely broken if the first four bytes of
// the return data are **not** the specific "string error" bytes.
ret = append(ret, make([]byte, 4)...)
}
} else {
// User hasn't conformed the standard format, just return "null" for the success
// (with no return data) to convince them to use the standard.
ret = common.FromHex("0x")
}
if evm.Context.EthCallSender == nil {
log.Debug("Reached the end of an OVM execution", "ID", evm.Id, "Return Data", hexutil.Encode(ret), "Error", err)
}
}
}
return ret, contract.Gas, err
}
// CallCode executes the contract associated with the addr with the given input
// as parameters. It also handles any necessary value transfer required and takes
// the necessary steps to create accounts and reverses the state in case of an
// execution error or failed value transfer.
//
// CallCode differs from Call in the sense that it executes the given address'
// code with the caller as context.
func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) {
if evm.vmConfig.NoRecursion && evm.depth > 0 {
return nil, gas, nil
}
// Fail if we're trying to execute above the call depth limit
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
}
if !UsingOVM {
// OVM_DISABLED
// Fail if we're trying to transfer more than the available balance
if !evm.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, gas, ErrInsufficientBalance
}
}
var (
snapshot = evm.StateDB.Snapshot()
to = AccountRef(caller.Address())
)
// Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only.
contract := NewContract(caller, to, value, gas)
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
ret, err = run(evm, contract, input, false)
if err != nil {
evm.StateDB.RevertToSnapshot(snapshot)
if err != errExecutionReverted {
contract.UseGas(contract.Gas)
}
}
return ret, contract.Gas, err
}
// DelegateCall executes the contract associated with the addr with the given input
// as parameters. It reverses the state in case of an execution error.
//
// DelegateCall differs from CallCode in the sense that it executes the given address'
// code with the caller as context and the caller is set to the caller of the caller.
func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) {
if evm.vmConfig.NoRecursion && evm.depth > 0 {
return nil, gas, nil
}
// Fail if we're trying to execute above the call depth limit
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
}
var (
snapshot = evm.StateDB.Snapshot()
to = AccountRef(caller.Address())
)
// Initialise a new contract and make initialise the delegate values
contract := NewContract(caller, to, nil, gas).AsDelegate()
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
ret, err = run(evm, contract, input, false)
if err != nil {
evm.StateDB.RevertToSnapshot(snapshot)
if err != errExecutionReverted {
contract.UseGas(contract.Gas)
}
}
return ret, contract.Gas, err
}
// StaticCall executes the contract associated with the addr with the given input
// as parameters while disallowing any modifications to the state during the call.
// Opcodes that attempt to perform such modifications will result in exceptions
// instead of performing the modifications.
func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) {
if evm.vmConfig.NoRecursion && evm.depth > 0 {
return nil, gas, nil
}
// Fail if we're trying to execute above the call depth limit
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
}
var (
to = AccountRef(addr)
snapshot = evm.StateDB.Snapshot()
)
// Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only.
contract := NewContract(caller, to, new(big.Int), gas)
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
// We do an AddBalance of zero here, just in order to trigger a touch.
// This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium,
// but is the correct thing to do and matters on other networks, in tests, and potential
// future scenarios
evm.StateDB.AddBalance(addr, bigZero)
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in Homestead this also counts for code storage gas errors.
ret, err = run(evm, contract, input, true)
if err != nil {
evm.StateDB.RevertToSnapshot(snapshot)
if err != errExecutionReverted {
contract.UseGas(contract.Gas)
}
}
return ret, contract.Gas, err
}
type codeAndHash struct {
code []byte
hash common.Hash
}
func (c *codeAndHash) Hash() common.Hash {
if c.hash == (common.Hash{}) {
c.hash = crypto.Keccak256Hash(c.code)
}
return c.hash
}
// create creates a new contract using code as deployment code.
func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) {
// Depth check execution. Fail if we're trying to execute above the
// limit.
if evm.depth > int(params.CallCreateDepth) {
return nil, common.Address{}, gas, ErrDepth
}
if !UsingOVM {
// OVM_DISABLED
if !evm.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, common.Address{}, gas, ErrInsufficientBalance
}
}
// Ensure there's no existing contract already at the designated address
contractHash := evm.StateDB.GetCodeHash(address)
if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) {
return nil, common.Address{}, 0, ErrContractAddressCollision
}
// Create a new account on the state
snapshot := evm.StateDB.Snapshot()
evm.StateDB.CreateAccount(address)
if evm.chainRules.IsEIP158 {
evm.StateDB.SetNonce(address, 1)
}
if !UsingOVM {
// OVM_DISABLED
evm.Transfer(evm.StateDB, caller.Address(), address, value)
}
// Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only.
contract := NewContract(caller, AccountRef(address), value, gas)
contract.SetCodeOptionalHash(&address, codeAndHash)
if evm.vmConfig.NoRecursion && evm.depth > 0 {
return nil, address, gas, nil
}
if evm.vmConfig.Debug && evm.depth == 0 {
evm.vmConfig.Tracer.CaptureStart(caller.Address(), address, true, codeAndHash.code, gas, value)
}
start := time.Now()
ret, err := run(evm, contract, nil, false)
// check whether the max code size has been exceeded
maxCodeSizeExceeded := evm.chainRules.IsEIP158 && len(ret) > params.MaxCodeSize
// if the contract creation ran successfully and no errors were returned
// calculate the gas required to store the code. If the code could not
// be stored due to not enough gas set an error and let it be handled
// by the error checking condition below.
if err == nil && !maxCodeSizeExceeded {
createDataGas := uint64(len(ret)) * params.CreateDataGas
if contract.UseGas(createDataGas) {
evm.StateDB.SetCode(address, ret)
} else {
err = ErrCodeStoreOutOfGas
}
}
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in homestead this also counts for code storage gas errors.
if maxCodeSizeExceeded || (err != nil && (evm.chainRules.IsHomestead || err != ErrCodeStoreOutOfGas)) {
evm.StateDB.RevertToSnapshot(snapshot)
if err != errExecutionReverted {
contract.UseGas(contract.Gas)
}
}
// Assign err if contract code size exceeds the max while the err is still empty.
if maxCodeSizeExceeded && err == nil {
err = errMaxCodeSizeExceeded
}
if evm.vmConfig.Debug && evm.depth == 0 {
evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
}
return ret, address, contract.Gas, err
}
// Create creates a new contract using code as deployment code.
func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
if !UsingOVM {
// OVM_DISABLED
contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetNonce(caller.Address()))
} else {
// OVM_ENABLED
if caller.Address() != evm.Context.OvmExecutionManager.Address {
log.Error("Creation called by non-Execution Manager contract! This should never happen.", "Offending address", caller.Address().Hex())
return nil, caller.Address(), 0, ErrOvmCreationFailed
}
contractAddr = evm.OvmADDRESS()
if evm.Context.EthCallSender == nil {
log.Debug("[EM] Creating contract.", "ID", evm.Id, "New contract address", contractAddr.Hex(), "Caller Addr", caller.Address().Hex(), "Caller nonce", evm.StateDB.GetNonce(caller.Address()))
}
}
return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr)
}
// Create2 creates a new contract using code as deployment code.
//
// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:]
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
codeAndHash := &codeAndHash{code: code}
if !UsingOVM {
// OVM_DISABLED
contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), codeAndHash.Hash().Bytes())
} else {
// OVM_ENABLED
if caller.Address() != evm.Context.OvmExecutionManager.Address {
log.Error("Creation called by non-Execution Manager contract! This should never happen.", "Offending address", caller.Address().Hex())
return nil, caller.Address(), 0, ErrOvmCreationFailed
}
contractAddr = evm.OvmADDRESS()
if evm.Context.EthCallSender == nil {
log.Debug("[EM] Creating contract [create2].", "ID", evm.Id, "New contract address", contractAddr.Hex(), "Caller Addr", caller.Address().Hex(), "Caller nonce", evm.StateDB.GetNonce(caller.Address()))
}
}
return evm.create(caller, codeAndHash, gas, endowment, contractAddr)
}
// ChainConfig returns the environment's chain configuration
func (evm *EVM) ChainConfig() *params.ChainConfig { return evm.chainConfig }
// OvmADDRESS will be set by the execution manager to the target address whenever it's
// about to create a new contract. This value is currently stored at the [15] storage slot.
// Can pull this specific storage slot to get the address that the execution manager is
// trying to create to, and create to it.
func (evm *EVM) OvmADDRESS() common.Address {
slot := common.Hash{31: 0x0f}
return common.BytesToAddress(evm.StateDB.GetState(evm.Context.OvmExecutionManager.Address, slot).Bytes())
}
| 1 | 14,947 | nit: this file diff seems unrelated and probably should have been a separate PR | ethereum-optimism-optimism | go |
@@ -38,7 +38,7 @@ extern "C" {
void latte(int *, int *, double *, int *, int *,
double *, double *, double *, double *,
double *, double *, double *, int*,
- double *, double *, double *, double * );
+ double *, double *, double *, double *, bool *);
}
#define INVOKED_PERATOM 8 | 1 | /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, [email protected]
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Christian Negre (LANL)
------------------------------------------------------------------------- */
#include <stdio.h>
#include <string.h>
#include "fix_latte.h"
#include "atom.h"
#include "comm.h"
#include "update.h"
#include "neighbor.h"
#include "domain.h"
#include "force.h"
#include "neigh_request.h"
#include "neigh_list.h"
#include "modify.h"
#include "compute.h"
#include "memory.h"
#include "error.h"
using namespace LAMMPS_NS;
using namespace FixConst;
extern "C" {
void latte(int *, int *, double *, int *, int *,
double *, double *, double *, double *,
double *, double *, double *, int*,
double *, double *, double *, double * );
}
#define INVOKED_PERATOM 8
/* ---------------------------------------------------------------------- */
FixLatte::FixLatte(LAMMPS *lmp, int narg, char **arg) :
Fix(lmp, narg, arg)
{
if (strcmp(update->unit_style,"metal") != 0)
error->all(FLERR,"Must use units metal with fix latte command");
if (comm->nprocs != 1)
error->all(FLERR,"Fix latte currently runs only in serial");
if (narg != 4) error->all(FLERR,"Illegal fix latte command");
scalar_flag = 1;
global_freq = 1;
extscalar = 1;
virial_flag = 1;
thermo_virial = 1;
// store ID of compute pe/atom used to generate Coulomb potential for LATTE
// NULL means LATTE will compute Coulombic potential
coulomb = 0;
id_pe = NULL;
if (strcmp(arg[3],"NULL") != 0) {
coulomb = 1;
error->all(FLERR,"Fix latte does not yet support a LAMMPS calculation "
"of a Coulomb potential");
int n = strlen(arg[3]) + 1;
id_pe = new char[n];
strcpy(id_pe,arg[3]);
int ipe = modify->find_compute(id_pe);
if (ipe < 0) error->all(FLERR,"Could not find fix latte compute ID");
if (modify->compute[ipe]->peatomflag == 0)
error->all(FLERR,"Fix latte compute ID does not compute pe/atom");
}
// initializations
nmax = 0;
qpotential = NULL;
flatte = NULL;
latte_energy = 0.0;
}
/* ---------------------------------------------------------------------- */
FixLatte::~FixLatte()
{
delete [] id_pe;
memory->destroy(qpotential);
memory->destroy(flatte);
}
/* ---------------------------------------------------------------------- */
int FixLatte::setmask()
{
int mask = 0;
//mask |= INITIAL_INTEGRATE;
//mask |= FINAL_INTEGRATE;
mask |= PRE_REVERSE;
mask |= POST_FORCE;
mask |= MIN_POST_FORCE;
mask |= THERMO_ENERGY;
return mask;
}
/* ---------------------------------------------------------------------- */
void FixLatte::init()
{
// error checks
if (domain->dimension == 2)
error->all(FLERR,"Fix latte requires 3d problem");
if (coulomb) {
if (atom->q_flag == 0 || force->pair == NULL || force->kspace == NULL)
error->all(FLERR,"Fix latte cannot compute Coulomb potential");
int ipe = modify->find_compute(id_pe);
if (ipe < 0) error->all(FLERR,"Could not find fix latte compute ID");
c_pe = modify->compute[ipe];
}
// must be fully periodic or fully non-periodic
if (domain->nonperiodic == 0) pbcflag = 1;
else if (!domain->xperiodic && !domain->yperiodic && !domain->zperiodic)
pbcflag = 0;
else error->all(FLERR,"Fix latte requires 3d simulation");
// create qpotential & flatte if needed
// for now, assume nlocal will never change
if (coulomb && qpotential == NULL) {
memory->create(qpotential,atom->nlocal,"latte:qpotential");
memory->create(flatte,atom->nlocal,3,"latte:flatte");
}
/*
// warn if any integrate fix comes after this one
// is it actually necessary for q(n) update to come after x,v update ??
int after = 0;
int flag = 0;
for (int i = 0; i < modify->nfix; i++) {
if (strcmp(id,modify->fix[i]->id) == 0) after = 1;
else if ((modify->fmask[i] & INITIAL_INTEGRATE) && after) flag = 1;
}
if (flag && comm->me == 0)
error->warning(FLERR,"Fix latte should come after all other "
"integration fixes");
*/
/*
// need a full neighbor list
// could we use a half list?
// perpetual list, built whenever re-neighboring occurs
int irequest = neighbor->request(this,instance_me);
neighbor->requests[irequest]->pair = 0;
neighbor->requests[irequest]->fix = 1;
neighbor->requests[irequest]->half = 0;
neighbor->requests[irequest]->full = 1;
*/
}
/* ---------------------------------------------------------------------- */
void FixLatte::init_list(int id, NeighList *ptr)
{
// list = ptr;
}
/* ---------------------------------------------------------------------- */
void FixLatte::setup(int vflag)
{
post_force(vflag);
}
/* ---------------------------------------------------------------------- */
void FixLatte::min_setup(int vflag)
{
post_force(vflag);
}
/* ---------------------------------------------------------------------- */
void FixLatte::setup_pre_reverse(int eflag, int vflag)
{
pre_reverse(eflag,vflag);
}
/* ----------------------------------------------------------------------
integrate electronic degrees of freedom
------------------------------------------------------------------------- */
void FixLatte::initial_integrate(int vflag) {}
/* ----------------------------------------------------------------------
store eflag, so can use it in post_force to tally per-atom energies
------------------------------------------------------------------------- */
void FixLatte::pre_reverse(int eflag, int vflag)
{
eflag_caller = eflag;
}
/* ---------------------------------------------------------------------- */
void FixLatte::post_force(int vflag)
{
int eflag = eflag_caller;
if (eflag || vflag) ev_setup(eflag,vflag);
else evflag = eflag_global = vflag_global = eflag_atom = vflag_atom = 0;
// compute Coulombic potential = pe[i]/q[i]
// invoke compute pe/atom
// wrap with clear/add and trigger pe/atom calculation every step
if (coulomb) {
modify->clearstep_compute();
if (!(c_pe->invoked_flag & INVOKED_PERATOM)) {
c_pe->compute_peratom();
c_pe->invoked_flag |= INVOKED_PERATOM;
}
modify->addstep_compute(update->ntimestep+1);
double *pe = c_pe->vector_atom;
double *q = atom->q;
int nlocal = atom->nlocal;
for (int i = 0; i < nlocal; i++)
if (q[i]) qpotential[i] = pe[i]/q[i];
else qpotential[i] = 0.0;
}
// hardwire these unsupported flags for now
int coulombflag = 0;
// pe_peratom = 0;
// virial_global = 1; // set via vflag_global at some point
// virial_peratom = 0;
neighflag = 0;
// set flags used by LATTE
// NOTE: LATTE does not compute per-atom energies or virials
int flags[6];
flags[0] = pbcflag; // 1 for fully periodic, 0 for fully non-periodic
flags[1] = coulombflag; // 1 for LAMMPS computes Coulombics, 0 for LATTE
flags[2] = eflag_atom; // 1 to return per-atom energies, 0 for no
flags[3] = vflag_global && thermo_virial; // 1 to return global/per-atom
flags[4] = vflag_atom && thermo_virial; // virial, 0 for no
flags[5] = neighflag; // 1 to pass neighbor list to LATTE, 0 for no
// setup LATTE arguments
int natoms = atom->nlocal;
double *coords = &atom->x[0][0];
int *type = atom->type;
int ntypes = atom->ntypes;
double *mass = &atom->mass[1];
double *boxlo = domain->boxlo;
double *boxhi = domain->boxhi;
double *forces;
if (coulomb) forces = &flatte[0][0];
else forces = &atom->f[0][0];
int maxiter = -1;
latte(flags,&natoms,coords,type,&ntypes,mass,boxlo,boxhi,&domain->xy,
&domain->xz,&domain->yz,
forces,&maxiter,&latte_energy,&atom->v[0][0],&update->dt,virial);
// sum LATTE forces to LAMMPS forces
// e.g. LAMMPS may compute Coulombics at some point
if (coulomb) {
double **f = atom->f;
int nlocal = atom->nlocal;
for (int i = 0; i < nlocal; i++) {
f[i][0] += flatte[i][0];
f[i][1] += flatte[i][1];
f[i][2] += flatte[i][2];
}
}
}
/* ---------------------------------------------------------------------- */
void FixLatte::min_post_force(int vflag)
{
post_force(vflag);
}
/* ----------------------------------------------------------------------
integrate electronic degrees of freedom
------------------------------------------------------------------------- */
void FixLatte::final_integrate() {}
/* ---------------------------------------------------------------------- */
void FixLatte::reset_dt()
{
//dtv = update->dt;
//dtf = 0.5 * update->dt * force->ftm2v;
}
/* ----------------------------------------------------------------------
DFTB energy from LATTE
------------------------------------------------------------------------- */
double FixLatte::compute_scalar()
{
return latte_energy;
}
/* ----------------------------------------------------------------------
memory usage of local arrays
------------------------------------------------------------------------- */
double FixLatte::memory_usage()
{
double bytes = 0.0;
if (coulomb) bytes += nmax * sizeof(double);
if (coulomb) bytes += nmax*3 * sizeof(double);
return bytes;
}
| 1 | 24,436 | @sjplimp just checked this API in the latte repository master branch (which is what Install.py downloads) and this still does not provide the 18th argument. We cannot merge this pull request until this is available. i would also suggest to implement a second binding, a function called latte_abiversion() returning an int with an ABI version number (starting with 1). This way, it can be tested, whether the ABI expected by the LAMMPS interface and provided by the library is compatible *before* calling the `latte()` function and risking a segmentation fault. | lammps-lammps | cpp |
@@ -214,7 +214,10 @@ namespace Nethermind.JsonRpc.Modules.Eth
}
Account account = _stateReader.GetAccount(header.StateRoot, address);
- return Task.FromResult(ResultWrapper<UInt256?>.Success(account?.Nonce ?? 0));
+ UInt256 nonce = account?.Nonce ?? 0;
+ _logger.Warn($"{address} nonce is {nonce}");
+
+ return Task.FromResult(ResultWrapper<UInt256?>.Success(nonce));
}
public ResultWrapper<UInt256?> eth_getBlockTransactionCountByHash(Keccak blockHash) | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Security;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Nethermind.Blockchain.Filters;
using Nethermind.Blockchain.Find;
using Nethermind.Core;
using Nethermind.Core.Attributes;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Int256;
using Nethermind.Facade;
using Nethermind.JsonRpc.Data;
using Nethermind.Logging;
using Nethermind.Serialization.Rlp;
using Nethermind.State;
using Nethermind.State.Proofs;
using Nethermind.Trie;
using Nethermind.TxPool;
using Nethermind.Wallet;
using Block = Nethermind.Core.Block;
using BlockHeader = Nethermind.Core.BlockHeader;
using Signature = Nethermind.Core.Crypto.Signature;
using Transaction = Nethermind.Core.Transaction;
namespace Nethermind.JsonRpc.Modules.Eth
{
public class EthModule : IEthModule
{
private Encoding _messageEncoding = Encoding.UTF8;
private readonly IJsonRpcConfig _rpcConfig;
private readonly IBlockchainBridge _blockchainBridge;
private readonly IBlockFinder _blockFinder;
private readonly IStateReader _stateReader;
private readonly ITxPool _txPoolBridge;
private readonly ITxSender _txSender;
private readonly IWallet _wallet;
private readonly ILogger _logger;
private TimeSpan _cancellationTokenTimeout;
private bool HasStateForBlock(BlockHeader header)
{
RootCheckVisitor rootCheckVisitor = new RootCheckVisitor();
_blockchainBridge.RunTreeVisitor(rootCheckVisitor, header.StateRoot);
return rootCheckVisitor.HasRoot;
}
public EthModule(
IJsonRpcConfig rpcConfig,
IBlockchainBridge blockchainBridge,
IBlockFinder blockFinder,
IStateReader stateReader,
ITxPool _txPool,
ITxSender txSender,
IWallet wallet,
ILogManager logManager)
{
_logger = logManager.GetClassLogger();
_rpcConfig = rpcConfig ?? throw new ArgumentNullException(nameof(rpcConfig));
_blockchainBridge = blockchainBridge ?? throw new ArgumentNullException(nameof(blockchainBridge));
_blockFinder = blockFinder ?? throw new ArgumentNullException(nameof(blockFinder));
_stateReader = stateReader ?? throw new ArgumentNullException(nameof(stateReader));
_txPoolBridge = _txPool ?? throw new ArgumentNullException(nameof(_txPool));
_txSender = txSender ?? throw new ArgumentNullException(nameof(txSender));
_wallet = wallet ?? throw new ArgumentNullException(nameof(wallet));
_cancellationTokenTimeout = TimeSpan.FromMilliseconds(rpcConfig.Timeout);
}
public ResultWrapper<string> eth_protocolVersion()
{
return ResultWrapper<string>.Success("0x41");
}
public ResultWrapper<SyncingResult> eth_syncing()
{
SyncingResult result;
long bestSuggestedNumber = _blockFinder.FindBestSuggestedHeader().Number;
bool isSyncing = bestSuggestedNumber > _blockFinder.Head.Number + 1;
if (isSyncing)
{
result = new SyncingResult
{
CurrentBlock = _blockFinder.Head.Number,
HighestBlock = bestSuggestedNumber,
StartingBlock = 0L,
IsSyncing = true
};
}
else
{
result = SyncingResult.NotSyncing;
}
return ResultWrapper<SyncingResult>.Success(result);
}
public ResultWrapper<byte[]> eth_snapshot()
{
return ResultWrapper<byte[]>.Fail("eth_snapshot not supported");
}
public ResultWrapper<Address> eth_coinbase()
{
return ResultWrapper<Address>.Success(Address.Zero);
}
public ResultWrapper<bool?> eth_mining()
{
return ResultWrapper<bool?>.Success(_blockchainBridge.IsMining);
}
public ResultWrapper<UInt256?> eth_hashrate()
{
return ResultWrapper<UInt256?>.Success(0);
}
[Todo("Gas pricer to be implemented")]
public ResultWrapper<UInt256?> eth_gasPrice()
{
return ResultWrapper<UInt256?>.Success(20.GWei());
}
public ResultWrapper<IEnumerable<Address>> eth_accounts()
{
try
{
var result = _wallet.GetAccounts();
Address[] data = result.ToArray();
return ResultWrapper<IEnumerable<Address>>.Success(data.ToArray());
}
catch (Exception)
{
return ResultWrapper<IEnumerable<Address>>.Fail("Error while getting key addresses from wallet.");
}
}
public Task<ResultWrapper<long?>> eth_blockNumber()
{
long number = _blockchainBridge.BeamHead?.Number ?? 0;
return Task.FromResult(ResultWrapper<long?>.Success(number));
}
public Task<ResultWrapper<UInt256?>> eth_getBalance(Address address, BlockParameter blockParameter = null)
{
SearchResult<BlockHeader> searchResult = _blockFinder.SearchForHeader(blockParameter);
if (searchResult.IsError)
{
return Task.FromResult(ResultWrapper<UInt256?>.Fail(searchResult));
}
BlockHeader header = searchResult.Object;
if (!HasStateForBlock(header))
{
return Task.FromResult(ResultWrapper<UInt256?>.Fail($"No state available for block {header.Hash}", ErrorCodes.ResourceUnavailable));
}
Account account = _stateReader.GetAccount(header.StateRoot, address);
return Task.FromResult(ResultWrapper<UInt256?>.Success(account?.Balance ?? UInt256.Zero));
}
public ResultWrapper<byte[]> eth_getStorageAt(Address address, UInt256 positionIndex, BlockParameter blockParameter = null)
{
SearchResult<BlockHeader> searchResult = _blockFinder.SearchForHeader(blockParameter);
if (searchResult.IsError)
{
return ResultWrapper<byte[]>.Fail(searchResult);
}
BlockHeader header = searchResult.Object;
Account account = _stateReader.GetAccount(header.StateRoot, address);
if (account == null)
{
return ResultWrapper<byte[]>.Success(Array.Empty<byte>());
}
var storage = _stateReader.GetStorage(account.StorageRoot, positionIndex);
return ResultWrapper<byte[]>.Success(storage.PadLeft(32));
}
public Task<ResultWrapper<UInt256?>> eth_getTransactionCount(Address address, BlockParameter blockParameter)
{
SearchResult<BlockHeader> searchResult = _blockFinder.SearchForHeader(blockParameter);
if (searchResult.IsError)
{
return Task.FromResult(ResultWrapper<UInt256?>.Fail(searchResult));
}
BlockHeader header = searchResult.Object;
if (!HasStateForBlock(header))
{
return Task.FromResult(ResultWrapper<UInt256?>.Fail($"No state available for block {header.Hash}", ErrorCodes.ResourceUnavailable));
}
Account account = _stateReader.GetAccount(header.StateRoot, address);
return Task.FromResult(ResultWrapper<UInt256?>.Success(account?.Nonce ?? 0));
}
public ResultWrapper<UInt256?> eth_getBlockTransactionCountByHash(Keccak blockHash)
{
SearchResult<Block> searchResult = _blockFinder.SearchForBlock(new BlockParameter(blockHash));
if (searchResult.IsError)
{
return ResultWrapper<UInt256?>.Fail(searchResult);
}
return ResultWrapper<UInt256?>.Success((UInt256) searchResult.Object.Transactions.Length);
}
public ResultWrapper<UInt256?> eth_getBlockTransactionCountByNumber(BlockParameter blockParameter)
{
SearchResult<Block> searchResult = _blockFinder.SearchForBlock(blockParameter);
if (searchResult.IsError)
{
return ResultWrapper<UInt256?>.Fail(searchResult);
}
return ResultWrapper<UInt256?>.Success((UInt256) searchResult.Object.Transactions.Length);
}
public ResultWrapper<UInt256?> eth_getUncleCountByBlockHash(Keccak blockHash)
{
SearchResult<Block> searchResult = _blockFinder.SearchForBlock(new BlockParameter(blockHash));
if (searchResult.IsError)
{
return ResultWrapper<UInt256?>.Fail(searchResult);
}
return ResultWrapper<UInt256?>.Success((UInt256) searchResult.Object.Ommers.Length);
}
public ResultWrapper<UInt256?> eth_getUncleCountByBlockNumber(BlockParameter blockParameter)
{
SearchResult<Block> searchResult = _blockFinder.SearchForBlock(blockParameter);
if (searchResult.IsError)
{
return ResultWrapper<UInt256?>.Fail(searchResult);
}
return ResultWrapper<UInt256?>.Success((UInt256) searchResult.Object.Ommers.Length);
}
public ResultWrapper<byte[]> eth_getCode(Address address, BlockParameter blockParameter = null)
{
SearchResult<BlockHeader> searchResult = _blockFinder.SearchForHeader(blockParameter);
if (searchResult.IsError)
{
return ResultWrapper<byte[]>.Fail(searchResult);
}
BlockHeader header = searchResult.Object;
if (!HasStateForBlock(header))
{
return ResultWrapper<byte[]>.Fail($"No state available for block {header.Hash}", ErrorCodes.ResourceUnavailable);
}
Account account = _stateReader.GetAccount(header.StateRoot, address);
if (account == null)
{
return ResultWrapper<byte[]>.Success(Array.Empty<byte>());
}
var code = _stateReader.GetCode(account.CodeHash);
return ResultWrapper<byte[]>.Success(code);
}
public ResultWrapper<byte[]> eth_sign(Address addressData, byte[] message)
{
Signature sig;
try
{
Address address = addressData;
string messageText = _messageEncoding.GetString(message);
const string signatureTemplate = "\x19Ethereum Signed Message:\n{0}{1}";
string signatureText = string.Format(signatureTemplate, messageText.Length, messageText);
sig = _wallet.Sign(Keccak.Compute(signatureText), address);
}
catch (SecurityException e)
{
return ResultWrapper<byte[]>.Fail(e.Message, ErrorCodes.AccountLocked);
}
catch (Exception)
{
return ResultWrapper<byte[]>.Fail($"Unable to sign as {addressData}");
}
if (_logger.IsTrace) _logger.Trace($"eth_sign request {addressData}, {message}, result: {sig}");
return ResultWrapper<byte[]>.Success(sig.Bytes);
}
public Task<ResultWrapper<Keccak>> eth_sendTransaction(TransactionForRpc transactionForRpc)
{
Transaction tx = transactionForRpc.ToTransactionWithDefaults();
return SendTx(tx);
}
public async Task<ResultWrapper<Keccak>> eth_sendRawTransaction(byte[] transaction)
{
try
{
Transaction tx = Rlp.Decode<Transaction>(transaction, RlpBehaviors.AllowUnsigned);
return await SendTx(tx);
}
catch (RlpException)
{
return ResultWrapper<Keccak>.Fail("Invalid RLP.", ErrorCodes.TransactionRejected);
}
}
private async Task<ResultWrapper<Keccak>> SendTx(Transaction tx)
{
try
{
Keccak txHash = await _txSender.SendTransaction(tx, TxHandlingOptions.PersistentBroadcast);
return ResultWrapper<Keccak>.Success(txHash);
}
catch (SecurityException e)
{
return ResultWrapper<Keccak>.Fail(e.Message, ErrorCodes.AccountLocked);
}
catch (Exception e)
{
return ResultWrapper<Keccak>.Fail(e.Message, ErrorCodes.TransactionRejected);
}
}
public ResultWrapper<string> eth_call(TransactionForRpc transactionCall, BlockParameter blockParameter = null)
{
SearchResult<BlockHeader> searchResult = _blockFinder.SearchForHeader(blockParameter);
if (searchResult.IsError)
{
return ResultWrapper<string>.Fail(searchResult);
}
BlockHeader header = searchResult.Object;
if (!HasStateForBlock(header))
{
return ResultWrapper<string>.Fail($"No state available for block {header.Hash}", ErrorCodes.ResourceUnavailable);
}
FixCallTx(transactionCall, header);
using CancellationTokenSource cancellationTokenSource = new CancellationTokenSource(_cancellationTokenTimeout);
CancellationToken cancellationToken = cancellationTokenSource.Token;
Transaction tx = transactionCall.ToTransaction();
BlockchainBridge.CallOutput result = _blockchainBridge.Call(header, tx, cancellationToken);
return result.Error != null ? ResultWrapper<string>.Fail("VM execution error.", ErrorCodes.ExecutionError, result.Error) : ResultWrapper<string>.Success(result.OutputData.ToHexString(true));
}
private void FixCallTx(TransactionForRpc transactionCall, BlockHeader header)
{
if (transactionCall.Gas == null || transactionCall.Gas == 0)
{
transactionCall.Gas = _rpcConfig.GasCap ?? long.MaxValue;
}
else
{
transactionCall.Gas = Math.Min(_rpcConfig.GasCap ?? long.MaxValue, transactionCall.Gas.Value);
}
transactionCall.From ??= Address.SystemUser;
}
public ResultWrapper<UInt256?> eth_estimateGas(TransactionForRpc transactionCall, BlockParameter blockParameter)
{
SearchResult<BlockHeader> searchResult = _blockFinder.SearchForHeader(blockParameter);
if (searchResult.IsError)
{
return ResultWrapper<UInt256?>.Fail(searchResult);
}
BlockHeader header = searchResult.Object;
if (!HasStateForBlock(header))
{
return ResultWrapper<UInt256?>.Fail($"No state available for block {header.Hash}", ErrorCodes.ResourceUnavailable);
}
return EstimateGas(transactionCall, header);
}
private ResultWrapper<UInt256?> EstimateGas(TransactionForRpc transactionCall, BlockHeader head)
{
FixCallTx(transactionCall, head);
using CancellationTokenSource cancellationTokenSource = new CancellationTokenSource(_cancellationTokenTimeout);
CancellationToken cancellationToken = cancellationTokenSource.Token;
BlockchainBridge.CallOutput result = _blockchainBridge.EstimateGas(head, transactionCall.ToTransaction(), cancellationToken);
if (result.Error == null)
{
return ResultWrapper<UInt256?>.Success((UInt256) result.GasSpent);
}
return ResultWrapper<UInt256?>.Fail(result.Error);
}
public ResultWrapper<BlockForRpc> eth_getBlockByHash(Keccak blockHash, bool returnFullTransactionObjects)
{
return GetBlock(new BlockParameter(blockHash), returnFullTransactionObjects);
}
public ResultWrapper<BlockForRpc> eth_getBlockByNumber(BlockParameter blockParameter, bool returnFullTransactionObjects)
{
return GetBlock(blockParameter, returnFullTransactionObjects);
}
private ResultWrapper<BlockForRpc> GetBlock(BlockParameter blockParameter, bool returnFullTransactionObjects)
{
SearchResult<Block> searchResult = _blockFinder.SearchForBlock(blockParameter, true);
if (searchResult.IsError)
{
return ResultWrapper<BlockForRpc>.Fail(searchResult);
}
Block block = searchResult.Object;
if (block != null)
{
_blockchainBridge.RecoverTxSenders(block);
}
return ResultWrapper<BlockForRpc>.Success(block == null ? null : new BlockForRpc(block, returnFullTransactionObjects));
}
public ResultWrapper<TransactionForRpc> eth_getTransactionByHash(Keccak transactionHash)
{
_txPoolBridge.TryGetPendingTransaction(transactionHash, out Transaction transaction);
TxReceipt receipt = null; // note that if transaction is pending then for sure no receipt is known
if (transaction == null)
{
(receipt, transaction) = _blockchainBridge.GetTransaction(transactionHash);
if (transaction == null)
{
return ResultWrapper<TransactionForRpc>.Success(null);
}
}
RecoverTxSenderIfNeeded(transaction);
TransactionForRpc transactionModel = new TransactionForRpc(receipt?.BlockHash, receipt?.BlockNumber, receipt?.Index, transaction);
if (_logger.IsTrace) _logger.Trace($"eth_getTransactionByHash request {transactionHash}, result: {transactionModel.Hash}");
return ResultWrapper<TransactionForRpc>.Success(transactionModel);
}
public ResultWrapper<TransactionForRpc[]> eth_pendingTransactions()
{
var transactions = _txPoolBridge.GetPendingTransactions();
var transactionsModels = new TransactionForRpc[transactions.Length];
for (int i = 0; i < transactions.Length; i++)
{
var transaction = transactions[i];
RecoverTxSenderIfNeeded(transaction);
transactionsModels[i] = new TransactionForRpc(transaction);
transactionsModels[i].BlockHash = Keccak.Zero;
}
if (_logger.IsTrace) _logger.Trace($"eth_pendingTransactions request, result: {transactionsModels.Length}");
return ResultWrapper<TransactionForRpc[]>.Success(transactionsModels);
}
public ResultWrapper<TransactionForRpc> eth_getTransactionByBlockHashAndIndex(Keccak blockHash, UInt256 positionIndex)
{
SearchResult<Block> searchResult = _blockFinder.SearchForBlock(new BlockParameter(blockHash));
if (searchResult.IsError)
{
return ResultWrapper<TransactionForRpc>.Fail(searchResult);
}
Block block = searchResult.Object;
if (positionIndex < 0 || positionIndex > block.Transactions.Length - 1)
{
return ResultWrapper<TransactionForRpc>.Fail("Position Index is incorrect", ErrorCodes.InvalidParams);
}
Transaction transaction = block.Transactions[(int) positionIndex];
RecoverTxSenderIfNeeded(transaction);
TransactionForRpc transactionModel = new TransactionForRpc(block.Hash, block.Number, (int) positionIndex, transaction);
return ResultWrapper<TransactionForRpc>.Success(transactionModel);
}
public ResultWrapper<TransactionForRpc> eth_getTransactionByBlockNumberAndIndex(BlockParameter blockParameter, UInt256 positionIndex)
{
SearchResult<Block> searchResult = _blockFinder.SearchForBlock(blockParameter);
if (searchResult.IsError)
{
return ResultWrapper<TransactionForRpc>.Fail(searchResult);
}
Block block = searchResult.Object;
if (positionIndex < 0 || positionIndex > block.Transactions.Length - 1)
{
return ResultWrapper<TransactionForRpc>.Fail("Position Index is incorrect", ErrorCodes.InvalidParams);
}
Transaction transaction = block.Transactions[(int) positionIndex];
RecoverTxSenderIfNeeded(transaction);
TransactionForRpc transactionModel = new TransactionForRpc(block.Hash, block.Number, (int) positionIndex, transaction);
if (_logger.IsDebug) _logger.Debug($"eth_getTransactionByBlockNumberAndIndex request {blockParameter}, index: {positionIndex}, result: {transactionModel.Hash}");
return ResultWrapper<TransactionForRpc>.Success(transactionModel);
}
public Task<ResultWrapper<ReceiptForRpc>> eth_getTransactionReceipt(Keccak txHash)
{
TxReceipt receipt = _blockchainBridge.GetReceipt(txHash);
if (receipt == null)
{
return Task.FromResult(ResultWrapper<ReceiptForRpc>.Success(null));
}
ReceiptForRpc receiptModel = new ReceiptForRpc(txHash, receipt);
if (_logger.IsTrace) _logger.Trace($"eth_getTransactionReceipt request {txHash}, result: {txHash}");
return Task.FromResult(ResultWrapper<ReceiptForRpc>.Success(receiptModel));
}
public ResultWrapper<BlockForRpc> eth_getUncleByBlockHashAndIndex(Keccak blockHash, UInt256 positionIndex)
{
return GetUncle(new BlockParameter(blockHash), positionIndex);
}
public ResultWrapper<BlockForRpc> eth_getUncleByBlockNumberAndIndex(BlockParameter blockParameter, UInt256 positionIndex)
{
return GetUncle(blockParameter, positionIndex);
}
private ResultWrapper<BlockForRpc> GetUncle(BlockParameter blockParameter, UInt256 positionIndex)
{
SearchResult<Block> searchResult = _blockFinder.SearchForBlock(blockParameter);
if (searchResult.IsError)
{
return ResultWrapper<BlockForRpc>.Fail(searchResult);
}
Block block = searchResult.Object;
if (positionIndex < 0 || positionIndex > block.Ommers.Length - 1)
{
return ResultWrapper<BlockForRpc>.Fail("Position Index is incorrect", ErrorCodes.InvalidParams);
}
BlockHeader ommerHeader = block.Ommers[(int) positionIndex];
return ResultWrapper<BlockForRpc>.Success(new BlockForRpc(new Block(ommerHeader, BlockBody.Empty), false));
}
public ResultWrapper<UInt256?> eth_newFilter(Filter filter)
{
BlockParameter fromBlock = filter.FromBlock;
BlockParameter toBlock = filter.ToBlock;
int filterId = _blockchainBridge.NewFilter(fromBlock, toBlock, filter.Address, filter.Topics);
return ResultWrapper<UInt256?>.Success((UInt256) filterId);
}
public ResultWrapper<UInt256?> eth_newBlockFilter()
{
int filterId = _blockchainBridge.NewBlockFilter();
return ResultWrapper<UInt256?>.Success((UInt256) filterId);
}
public ResultWrapper<UInt256?> eth_newPendingTransactionFilter()
{
int filterId = _blockchainBridge.NewPendingTransactionFilter();
return ResultWrapper<UInt256?>.Success((UInt256) filterId);
}
public ResultWrapper<bool?> eth_uninstallFilter(UInt256 filterId)
{
_blockchainBridge.UninstallFilter((int) filterId);
return ResultWrapper<bool?>.Success(true);
}
public ResultWrapper<IEnumerable<object>> eth_getFilterChanges(UInt256 filterId)
{
int id = (int) filterId;
FilterType filterType = _blockchainBridge.GetFilterType(id);
switch (filterType)
{
case FilterType.BlockFilter:
{
return _blockchainBridge.FilterExists(id)
? ResultWrapper<IEnumerable<object>>.Success(_blockchainBridge.GetBlockFilterChanges(id))
: ResultWrapper<IEnumerable<object>>.Fail($"Filter with id: '{filterId}' does not exist.");
}
case FilterType.PendingTransactionFilter:
{
return _blockchainBridge.FilterExists(id)
? ResultWrapper<IEnumerable<object>>.Success(_blockchainBridge.GetPendingTransactionFilterChanges(id))
: ResultWrapper<IEnumerable<object>>.Fail($"Filter with id: '{filterId}' does not exist.");
}
case FilterType.LogFilter:
{
return _blockchainBridge.FilterExists(id)
? ResultWrapper<IEnumerable<object>>.Success(
_blockchainBridge.GetLogFilterChanges(id).ToArray())
: ResultWrapper<IEnumerable<object>>.Fail($"Filter with id: '{filterId}' does not exist.");
}
default:
{
throw new NotSupportedException($"Filter type {filterType} is not supported");
}
}
}
public ResultWrapper<IEnumerable<FilterLog>> eth_getFilterLogs(UInt256 filterId)
{
int id = (int) filterId;
return _blockchainBridge.FilterExists(id)
? ResultWrapper<IEnumerable<FilterLog>>.Success(_blockchainBridge.GetFilterLogs(id))
: ResultWrapper<IEnumerable<FilterLog>>.Fail($"Filter with id: '{filterId}' does not exist.");
}
public ResultWrapper<IEnumerable<FilterLog>> eth_getLogs(Filter filter)
{
IEnumerable<FilterLog> GetLogs(BlockParameter blockParameter, BlockParameter toBlockParameter, CancellationTokenSource cancellationTokenSource, CancellationToken token)
{
using (cancellationTokenSource)
{
foreach (FilterLog log in _blockchainBridge.GetLogs(blockParameter, toBlockParameter, filter.Address, filter.Topics, token))
{
yield return log;
}
}
}
BlockParameter fromBlock = filter.FromBlock;
BlockParameter toBlock = filter.ToBlock;
try
{
CancellationTokenSource cancellationTokenSource = new CancellationTokenSource(_cancellationTokenTimeout);
return ResultWrapper<IEnumerable<FilterLog>>.Success(GetLogs(fromBlock, toBlock, cancellationTokenSource, cancellationTokenSource.Token));
}
catch (ArgumentException e)
{
switch (e.Message)
{
case ILogFinder.NotFoundError: return ResultWrapper<IEnumerable<FilterLog>>.Fail(e.Message, ErrorCodes.ResourceNotFound);
default:
return ResultWrapper<IEnumerable<FilterLog>>.Fail(e.Message, ErrorCodes.InvalidParams);
}
}
}
public ResultWrapper<IEnumerable<byte[]>> eth_getWork()
{
return ResultWrapper<IEnumerable<byte[]>>.Fail("eth_getWork not supported", ErrorCodes.MethodNotFound);
}
public ResultWrapper<bool?> eth_submitWork(byte[] nonce, Keccak headerPowHash, byte[] mixDigest)
{
return ResultWrapper<bool?>.Fail("eth_submitWork not supported", ErrorCodes.MethodNotFound, null);
}
public ResultWrapper<bool?> eth_submitHashrate(string hashRate, string id)
{
return ResultWrapper<bool?>.Fail("eth_submitHashrate not supported", ErrorCodes.MethodNotFound, null);
}
// https://github.com/ethereum/EIPs/issues/1186
public ResultWrapper<AccountProof> eth_getProof(Address accountAddress, byte[][] storageKeys, BlockParameter blockParameter)
{
BlockHeader header;
try
{
header = _blockFinder.FindHeader(blockParameter);
if (header == null)
{
return ResultWrapper<AccountProof>.Fail($"{blockParameter} block not found", ErrorCodes.ResourceNotFound, null);
}
if (!HasStateForBlock(header))
{
return ResultWrapper<AccountProof>.Fail($"No state available for block {header.Hash}", ErrorCodes.ResourceUnavailable);
}
}
catch (Exception ex)
{
return ResultWrapper<AccountProof>.Fail(ex.Message, ErrorCodes.InternalError, null);
}
AccountProofCollector accountProofCollector = new AccountProofCollector(accountAddress, storageKeys);
_blockchainBridge.RunTreeVisitor(accountProofCollector, header.StateRoot);
return ResultWrapper<AccountProof>.Success(accountProofCollector.BuildResult());
}
public ResultWrapper<long> eth_chainId()
{
try
{
long chainId = _blockchainBridge.GetChainId();
return ResultWrapper<long>.Success(chainId);
}
catch (Exception ex)
{
return ResultWrapper<long>.Fail(ex.Message, ErrorCodes.InternalError, 0L);
}
}
private void RecoverTxSenderIfNeeded(Transaction transaction)
{
if (transaction.SenderAddress == null)
{
_blockchainBridge.RecoverTxSender(transaction);
}
}
}
}
| 1 | 24,644 | Is this from debugging? | NethermindEth-nethermind | .cs |
@@ -51,12 +51,17 @@ class NodeTest extends TestCase
/**
*/
- public function testAddChildren():void
+ public function testAddChildren(): void
{
+ $categoryId2 = new CategoryId('cde02652-70ce-484e-bc9d-3bf61391522d');
+
/** @var Node|MockObject $children */
- $children = $this->createMock(Node::class);
+ $children = new Node($categoryId2);
$node = new Node($this->categoryId);
+ $this->assertFalse($node->hasChildren($categoryId2));
$node->addChildren($children);
$this->assertEquals($children, $node->getChildrens()[0]);
+ $this->assertFalse($node->hasSuccessor($this->categoryId));
+ $this->assertTrue($node->hasChildren($categoryId2));
}
} | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types = 1);
namespace Ergonode\CategoryTree\Tests\Domain\ValueObject;
use Ergonode\Category\Domain\Entity\CategoryId;
use Ergonode\CategoryTree\Domain\ValueObject\Node;
use PHPUnit\Framework\MockObject\MockObject;
use PHPUnit\Framework\TestCase;
/**
*/
class NodeTest extends TestCase
{
/**
* @var CategoryId|MockObject
*/
private $categoryId;
/**
*/
protected function setUp()
{
$this->categoryId = $this->createMock(CategoryId::class);
}
/**
*/
public function testCreateNode(): void
{
$node = new Node($this->categoryId);
$this->assertEquals($this->categoryId, $node->getCategoryId());
}
/**
*/
public function testSettingParent(): void
{
/** @var Node|MockObject $parent */
$parent = $this->createMock(Node::class);
$node = new Node($this->categoryId);
$node->setParent($parent);
$this->assertEquals($parent, $node->getParent());
}
/**
*/
public function testAddChildren():void
{
/** @var Node|MockObject $children */
$children = $this->createMock(Node::class);
$node = new Node($this->categoryId);
$node->addChildren($children);
$this->assertEquals($children, $node->getChildrens()[0]);
}
}
| 1 | 8,473 | Its betet use MockedObject | ergonode-backend | php |
@@ -840,8 +840,12 @@ type DiskBlockCache interface {
// Put puts a block to the disk cache.
Put(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf) error
- // DeleteByTLF deletes some blocks from the disk cache.
- DeleteByTLF(ctx context.Context, tlfID tlf.ID, blockIDs []kbfsblock.ID) (numRemoved int, sizeRemoved int64, err error)
+ // Delete deletes some blocks from the disk cache.
+ Delete(ctx context.Context, blockIDs []kbfsblock.ID) (numRemoved int,
+ sizeRemoved int64, err error)
+ // UpdateMetadata updates the metadata for the given block, including
+ // setting the LRU time to now.
+ UpdateMetadata(ctx context.Context, blockID kbfsblock.ID) error
// Size returns the size in bytes of the disk cache.
Size() int64
// Shutdown cleanly shuts down the disk block cache. | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
metrics "github.com/rcrowley/go-metrics"
"golang.org/x/net/context"
)
type dataVersioner interface {
// DataVersion returns the data version for this block
DataVersion() DataVer
}
type logMaker interface {
MakeLogger(module string) logger.Logger
}
type blockCacher interface {
BlockCache() BlockCache
}
type keyGetterGetter interface {
keyGetter() blockKeyGetter
}
type codecGetter interface {
Codec() kbfscodec.Codec
}
type blockServerGetter interface {
BlockServer() BlockServer
}
type cryptoPureGetter interface {
cryptoPure() cryptoPure
}
type cryptoGetter interface {
Crypto() Crypto
}
type currentSessionGetterGetter interface {
currentSessionGetter() currentSessionGetter
}
type signerGetter interface {
Signer() kbfscrypto.Signer
}
type diskBlockCacheGetter interface {
DiskBlockCache() DiskBlockCache
}
type diskBlockCacheSetter interface {
SetDiskBlockCache(DiskBlockCache)
}
type clockGetter interface {
Clock() Clock
}
type diskLimiterGetter interface {
DiskLimiter() DiskLimiter
}
// Block just needs to be (de)serialized using msgpack
type Block interface {
dataVersioner
// GetEncodedSize returns the encoded size of this block, but only
// if it has been previously set; otherwise it returns 0.
GetEncodedSize() uint32
// SetEncodedSize sets the encoded size of this block, locally
// caching it. The encoded size is not serialized.
SetEncodedSize(size uint32)
// NewEmpty returns a new block of the same type as this block
NewEmpty() Block
// Set sets this block to the same value as the passed-in block
Set(other Block)
// ToCommonBlock retrieves this block as a *CommonBlock.
ToCommonBlock() *CommonBlock
}
// NodeID is a unique but transient ID for a Node. That is, two Node
// objects in memory at the same time represent the same file or
// directory if and only if their NodeIDs are equal (by pointer).
type NodeID interface {
// ParentID returns the NodeID of the directory containing the
// pointed-to file or directory, or nil if none exists.
ParentID() NodeID
}
// Node represents a direct pointer to a file or directory in KBFS.
// It is somewhat like an inode in a regular file system. Users of
// KBFS can use Node as a handle when accessing files or directories
// they have previously looked up.
type Node interface {
// GetID returns the ID of this Node. This should be used as a
// map key instead of the Node itself.
GetID() NodeID
// GetFolderBranch returns the folder ID and branch for this Node.
GetFolderBranch() FolderBranch
// GetBasename returns the current basename of the node, or ""
// if the node has been unlinked.
GetBasename() string
}
// KBFSOps handles all file system operations. Expands all indirect
// pointers. Operations that modify the server data change all the
// block IDs along the path, and so must return a path with the new
// BlockIds so the caller can update their references.
//
// KBFSOps implementations must guarantee goroutine-safety of calls on
// a per-top-level-folder basis.
//
// There are two types of operations that could block:
// * remote-sync operations, that need to synchronously update the
// MD for the corresponding top-level folder. When these
// operations return successfully, they will have guaranteed to
// have successfully written the modification to the KBFS servers.
// * remote-access operations, that don't sync any modifications to KBFS
// servers, but may block on reading data from the servers.
//
// KBFSOps implementations are supposed to give git-like consistency
// semantics for modification operations; they will be visible to
// other clients immediately after the remote-sync operations succeed,
// if and only if there was no other intervening modification to the
// same folder. If not, the change will be sync'd to the server in a
// special per-device "unmerged" area before the operation succeeds.
// In this case, the modification will not be visible to other clients
// until the KBFS code on this device performs automatic conflict
// resolution in the background.
//
// All methods take a Context (see https://blog.golang.org/context),
// and if that context is cancelled during the operation, KBFSOps will
// abort any blocking calls and return ctx.Err(). Any notifications
// resulting from an operation will also include this ctx (or a
// Context derived from it), allowing the caller to determine whether
// the notification is a result of their own action or an external
// action.
type KBFSOps interface {
// GetFavorites returns the logged-in user's list of favorite
// top-level folders. This is a remote-access operation.
GetFavorites(ctx context.Context) ([]Favorite, error)
// RefreshCachedFavorites tells the instances to forget any cached
// favorites list and fetch a new list from the server. The
// effects are asychronous; if there's an error refreshing the
// favorites, the cached favorites will become empty.
RefreshCachedFavorites(ctx context.Context)
// AddFavorite adds the favorite to both the server and
// the local cache.
AddFavorite(ctx context.Context, fav Favorite) error
// DeleteFavorite deletes the favorite from both the server and
// the local cache. Idempotent, so it succeeds even if the folder
// isn't favorited.
DeleteFavorite(ctx context.Context, fav Favorite) error
// GetTLFCryptKeys gets crypt key of all generations as well as
// TLF ID for tlfHandle. The returned keys (the keys slice) are ordered by
// generation, starting with the key for FirstValidKeyGen.
GetTLFCryptKeys(ctx context.Context, tlfHandle *TlfHandle) (
keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error)
// GetTLFID gets the TLF ID for tlfHandle.
GetTLFID(ctx context.Context, tlfHandle *TlfHandle) (tlf.ID, error)
// GetOrCreateRootNode returns the root node and root entry
// info associated with the given TLF handle and branch, if
// the logged-in user has read permissions to the top-level
// folder. It creates the folder if one doesn't exist yet (and
// branch == MasterBranch), and the logged-in user has write
// permissions to the top-level folder. This is a
// remote-access operation.
GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error)
// GetRootNode is like GetOrCreateRootNode but if the root node
// does not exist it will return a nil Node and not create it.
GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error)
// GetDirChildren returns a map of children in the directory,
// mapped to their EntryInfo, if the logged-in user has read
// permission for the top-level folder. This is a remote-access
// operation.
GetDirChildren(ctx context.Context, dir Node) (map[string]EntryInfo, error)
// Lookup returns the Node and entry info associated with a
// given name in a directory, if the logged-in user has read
// permissions to the top-level folder. The returned Node is nil
// if the name is a symlink. This is a remote-access operation.
Lookup(ctx context.Context, dir Node, name string) (Node, EntryInfo, error)
// Stat returns the entry info associated with a
// given Node, if the logged-in user has read permissions to the
// top-level folder. This is a remote-access operation.
Stat(ctx context.Context, node Node) (EntryInfo, error)
// CreateDir creates a new subdirectory under the given node, if
// the logged-in user has write permission to the top-level
// folder. Returns the new Node for the created subdirectory, and
// its new entry info. This is a remote-sync operation.
CreateDir(ctx context.Context, dir Node, name string) (
Node, EntryInfo, error)
// CreateFile creates a new file under the given node, if the
// logged-in user has write permission to the top-level folder.
// Returns the new Node for the created file, and its new
// entry info. excl (when implemented) specifies whether this is an exclusive
// create. Semantically setting excl to WithExcl is like O_CREAT|O_EXCL in a
// Unix open() call.
//
// This is a remote-sync operation.
CreateFile(ctx context.Context, dir Node, name string, isExec bool, excl Excl) (
Node, EntryInfo, error)
// CreateLink creates a new symlink under the given node, if the
// logged-in user has write permission to the top-level folder.
// Returns the new entry info for the created symlink. This
// is a remote-sync operation.
CreateLink(ctx context.Context, dir Node, fromName string, toPath string) (
EntryInfo, error)
// RemoveDir removes the subdirectory represented by the given
// node, if the logged-in user has write permission to the
// top-level folder. Will return an error if the subdirectory is
// not empty. This is a remote-sync operation.
RemoveDir(ctx context.Context, dir Node, dirName string) error
// RemoveEntry removes the directory entry represented by the
// given node, if the logged-in user has write permission to the
// top-level folder. This is a remote-sync operation.
RemoveEntry(ctx context.Context, dir Node, name string) error
// Rename performs an atomic rename operation with a given
// top-level folder if the logged-in user has write permission to
// that folder, and will return an error if nodes from different
// folders are passed in. Also returns an error if the new name
// already has an entry corresponding to an existing directory
// (only non-dir types may be renamed over). This is a
// remote-sync operation.
Rename(ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) error
// Read fills in the given buffer with data from the file at the
// given node starting at the given offset, if the logged-in user
// has read permission to the top-level folder. The read data
// reflects any outstanding writes and truncates to that file that
// have been written through this KBFSOps object, even if those
// writes have not yet been sync'd. There is no guarantee that
// Read returns all of the requested data; it will return the
// number of bytes that it wrote to the dest buffer. Reads on an
// unlinked file may or may not succeed, depending on whether or
// not the data has been cached locally. If (0, nil) is returned,
// that means EOF has been reached. This is a remote-access
// operation.
Read(ctx context.Context, file Node, dest []byte, off int64) (int64, error)
// Write modifies the file at the given node, by writing the given
// buffer at the given offset within the file, if the logged-in
// user has write permission to the top-level folder. It
// overwrites any data already there, and extends the file size as
// necessary to accomodate the new data. It guarantees to write
// the entire buffer in one operation. Writes on an unlinked file
// may or may not succeed as no-ops, depending on whether or not
// the necessary blocks have been locally cached. This is a
// remote-access operation.
Write(ctx context.Context, file Node, data []byte, off int64) error
// Truncate modifies the file at the given node, by either
// shrinking or extending its size to match the given size, if the
// logged-in user has write permission to the top-level folder.
// If extending the file, it pads the new data with 0s. Truncates
// on an unlinked file may or may not succeed as no-ops, depending
// on whether or not the necessary blocks have been locally
// cached. This is a remote-access operation.
Truncate(ctx context.Context, file Node, size uint64) error
// SetEx turns on or off the executable bit on the file
// represented by a given node, if the logged-in user has write
// permissions to the top-level folder. This is a remote-sync
// operation.
SetEx(ctx context.Context, file Node, ex bool) error
// SetMtime sets the modification time on the file represented by
// a given node, if the logged-in user has write permissions to
// the top-level folder. If mtime is nil, it is a noop. This is
// a remote-sync operation.
SetMtime(ctx context.Context, file Node, mtime *time.Time) error
// Sync flushes all outstanding writes and truncates for the given
// file to the KBFS servers, if the logged-in user has write
// permissions to the top-level folder. If done through a file
// system interface, this may include modifications done via
// multiple file handles. This is a remote-sync operation.
Sync(ctx context.Context, file Node) error
// FolderStatus returns the status of a particular folder/branch, along
// with a channel that will be closed when the status has been
// updated (to eliminate the need for polling this method).
FolderStatus(ctx context.Context, folderBranch FolderBranch) (
FolderBranchStatus, <-chan StatusUpdate, error)
// Status returns the status of KBFS, along with a channel that will be
// closed when the status has been updated (to eliminate the need for
// polling this method). Note that this channel only applies to
// connection status changes.
//
// KBFSStatus can be non-empty even if there is an error.
Status(ctx context.Context) (
KBFSStatus, <-chan StatusUpdate, error)
// UnstageForTesting clears out this device's staged state, if
// any, and fast-forwards to the current head of this
// folder-branch.
UnstageForTesting(ctx context.Context, folderBranch FolderBranch) error
// RequestRekey requests to rekey this folder. Note that this asynchronously
// requests a rekey, so canceling ctx doesn't cancel the rekey.
RequestRekey(ctx context.Context, id tlf.ID)
// SyncFromServerForTesting blocks until the local client has
// contacted the server and guaranteed that all known updates
// for the given top-level folder have been applied locally
// (and notifications sent out to any observers). It returns
// an error if this folder-branch is currently unmerged or
// dirty locally.
SyncFromServerForTesting(ctx context.Context, folderBranch FolderBranch) error
// GetUpdateHistory returns a complete history of all the merged
// updates of the given folder, in a data structure that's
// suitable for encoding directly into JSON. This is an expensive
// operation, and should only be used for ocassional debugging.
// Note that the history does not include any unmerged changes or
// outstanding writes from the local device.
GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) (
history TLFUpdateHistory, err error)
// GetEditHistory returns a clustered list of the most recent file
// edits by each of the valid writers of the given folder. users
// looking to get updates to this list can register as an observer
// for the folder.
GetEditHistory(ctx context.Context, folderBranch FolderBranch) (
edits TlfWriterEdits, err error)
// GetNodeMetadata gets metadata associated with a Node.
GetNodeMetadata(ctx context.Context, node Node) (NodeMetadata, error)
// Shutdown is called to clean up any resources associated with
// this KBFSOps instance.
Shutdown(ctx context.Context) error
// PushConnectionStatusChange updates the status of a service for
// human readable connection status tracking.
PushConnectionStatusChange(service string, newStatus error)
// PushStatusChange causes Status listeners to be notified via closing
// the status channel.
PushStatusChange()
// ClearPrivateFolderMD clears any cached private folder metadata,
// e.g. on a logout.
ClearPrivateFolderMD(ctx context.Context)
// ForceFastForward forwards the nodes of all folders that have
// been previously cleared with `ClearPrivateFolderMD` to their
// newest version. It works asynchronously, so no error is
// returned.
ForceFastForward(ctx context.Context)
}
// KeybaseService is an interface for communicating with the keybase
// service.
type KeybaseService interface {
// Resolve, given an assertion, resolves it to a username/UID
// pair. The username <-> UID mapping is trusted and
// immutable, so it can be cached. If the assertion is just
// the username or a UID assertion, then the resolution can
// also be trusted. If the returned pair is equal to that of
// the current session, then it can also be
// trusted. Otherwise, Identify() needs to be called on the
// assertion before the assertion -> (username, UID) mapping
// can be trusted.
Resolve(ctx context.Context, assertion string) (
libkb.NormalizedUsername, keybase1.UID, error)
// Identify, given an assertion, returns a UserInfo struct
// with the user that matches that assertion, or an error
// otherwise. The reason string is displayed on any tracker
// popups spawned.
Identify(ctx context.Context, assertion, reason string) (UserInfo, error)
// LoadUserPlusKeys returns a UserInfo struct for a
// user with the specified UID.
// If you have the UID for a user and don't require Identify to
// validate an assertion or the identity of a user, use this to
// get UserInfo structs as it is much cheaper than Identify.
//
// pollForKID, if non empty, causes `PollForKID` field to be populated, which
// causes the service to poll for the given KID. This is useful during
// provisioning where the provisioner needs to get the MD revision that the
// provisionee has set the rekey bit on.
LoadUserPlusKeys(ctx context.Context,
uid keybase1.UID, pollForKID keybase1.KID) (UserInfo, error)
// LoadUnverifiedKeys returns a list of unverified public keys. They are the union
// of all known public keys associated with the account and the currently verified
// keys currently part of the user's sigchain.
LoadUnverifiedKeys(ctx context.Context, uid keybase1.UID) (
[]keybase1.PublicKey, error)
// CurrentSession returns a SessionInfo struct with all the
// information for the current session, or an error otherwise.
CurrentSession(ctx context.Context, sessionID int) (SessionInfo, error)
// FavoriteAdd adds the given folder to the list of favorites.
FavoriteAdd(ctx context.Context, folder keybase1.Folder) error
// FavoriteAdd removes the given folder from the list of
// favorites.
FavoriteDelete(ctx context.Context, folder keybase1.Folder) error
// FavoriteList returns the current list of favorites.
FavoriteList(ctx context.Context, sessionID int) ([]keybase1.Folder, error)
// Notify sends a filesystem notification.
Notify(ctx context.Context, notification *keybase1.FSNotification) error
// NotifySyncStatus sends a sync status notification.
NotifySyncStatus(ctx context.Context,
status *keybase1.FSPathSyncStatus) error
// FlushUserFromLocalCache instructs this layer to clear any
// KBFS-side, locally-cached information about the given user.
// This does NOT involve communication with the daemon, this is
// just to force future calls loading this user to fall through to
// the daemon itself, rather than being served from the cache.
FlushUserFromLocalCache(ctx context.Context, uid keybase1.UID)
// FlushUserUnverifiedKeysFromLocalCache instructs this layer to clear any
// KBFS-side, locally-cached unverified keys for the given user.
FlushUserUnverifiedKeysFromLocalCache(ctx context.Context, uid keybase1.UID)
// TODO: Add CryptoClient methods, too.
// EstablishMountDir asks the service for the current mount path
// and sets it if not established.
EstablishMountDir(ctx context.Context) (string, error)
// Shutdown frees any resources associated with this
// instance. No other methods may be called after this is
// called.
Shutdown()
}
// KeybaseServiceCn defines methods needed to construct KeybaseService
// and Crypto implementations.
type KeybaseServiceCn interface {
NewKeybaseService(config Config, params InitParams, ctx Context, log logger.Logger) (KeybaseService, error)
NewCrypto(config Config, params InitParams, ctx Context, log logger.Logger) (Crypto, error)
}
type resolver interface {
// Resolve, given an assertion, resolves it to a username/UID
// pair. The username <-> UID mapping is trusted and
// immutable, so it can be cached. If the assertion is just
// the username or a UID assertion, then the resolution can
// also be trusted. If the returned pair is equal to that of
// the current session, then it can also be
// trusted. Otherwise, Identify() needs to be called on the
// assertion before the assertion -> (username, UID) mapping
// can be trusted.
Resolve(ctx context.Context, assertion string) (
libkb.NormalizedUsername, keybase1.UID, error)
}
type identifier interface {
// Identify resolves an assertion (which could also be a
// username) to a UserInfo struct, spawning tracker popups if
// necessary. The reason string is displayed on any tracker
// popups spawned.
Identify(ctx context.Context, assertion, reason string) (UserInfo, error)
}
type normalizedUsernameGetter interface {
// GetNormalizedUsername returns the normalized username
// corresponding to the given UID.
GetNormalizedUsername(ctx context.Context, uid keybase1.UID) (libkb.NormalizedUsername, error)
}
type currentSessionGetter interface {
// GetCurrentSession gets the current session info.
GetCurrentSession(ctx context.Context) (SessionInfo, error)
}
// KBPKI interacts with the Keybase daemon to fetch user info.
type KBPKI interface {
currentSessionGetter
resolver
identifier
normalizedUsernameGetter
// HasVerifyingKey returns nil if the given user has the given
// VerifyingKey, and an error otherwise.
HasVerifyingKey(ctx context.Context, uid keybase1.UID,
verifyingKey kbfscrypto.VerifyingKey,
atServerTime time.Time) error
// HasUnverifiedVerifyingKey returns nil if the given user has the given
// unverified VerifyingKey, and an error otherwise. Note that any match
// is with a key not verified to be currently connected to the user via
// their sigchain. This is currently only used to verify finalized or
// reset TLFs. Further note that unverified keys is a super set of
// verified keys.
HasUnverifiedVerifyingKey(ctx context.Context, uid keybase1.UID,
verifyingKey kbfscrypto.VerifyingKey) error
// GetCryptPublicKeys gets all of a user's crypt public keys (including
// paper keys).
GetCryptPublicKeys(ctx context.Context, uid keybase1.UID) (
[]kbfscrypto.CryptPublicKey, error)
// TODO: Split the methods below off into a separate
// FavoriteOps interface.
// FavoriteAdd adds folder to the list of the logged in user's
// favorite folders. It is idempotent.
FavoriteAdd(ctx context.Context, folder keybase1.Folder) error
// FavoriteDelete deletes folder from the list of the logged in user's
// favorite folders. It is idempotent.
FavoriteDelete(ctx context.Context, folder keybase1.Folder) error
// FavoriteList returns the list of all favorite folders for
// the logged in user.
FavoriteList(ctx context.Context) ([]keybase1.Folder, error)
// Notify sends a filesystem notification.
Notify(ctx context.Context, notification *keybase1.FSNotification) error
}
// KeyMetadata is an interface for something that holds key
// information. This is usually implemented by RootMetadata.
type KeyMetadata interface {
// TlfID returns the ID of the TLF for which this object holds
// key info.
TlfID() tlf.ID
// LatestKeyGeneration returns the most recent key generation
// with key data in this object, or PublicKeyGen if this TLF
// is public.
LatestKeyGeneration() KeyGen
// GetTlfHandle returns the handle for the TLF. It must not
// return nil.
//
// TODO: Remove the need for this function in this interface,
// so that BareRootMetadata can implement this interface
// fully.
GetTlfHandle() *TlfHandle
// HasKeyForUser returns whether or not the given user has
// keys for at least one device. Returns an error if the TLF
// is public.
HasKeyForUser(user keybase1.UID) (bool, error)
// GetTLFCryptKeyParams returns all the necessary info to
// construct the TLF crypt key for the given key generation,
// user, and device (identified by its crypt public key), or
// false if not found. This returns an error if the TLF is
// public.
GetTLFCryptKeyParams(
keyGen KeyGen, user keybase1.UID,
key kbfscrypto.CryptPublicKey) (
kbfscrypto.TLFEphemeralPublicKey,
EncryptedTLFCryptKeyClientHalf,
TLFCryptKeyServerHalfID, bool, error)
// StoresHistoricTLFCryptKeys returns whether or not history keys are
// symmetrically encrypted; if not, they're encrypted per-device.
StoresHistoricTLFCryptKeys() bool
// GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given
// generation using the current generation's TLFCryptKey.
GetHistoricTLFCryptKey(c cryptoPure, keyGen KeyGen,
currentKey kbfscrypto.TLFCryptKey) (
kbfscrypto.TLFCryptKey, error)
}
type encryptionKeyGetter interface {
// GetTLFCryptKeyForEncryption gets the crypt key to use for
// encryption (i.e., with the latest key generation) for the
// TLF with the given metadata.
GetTLFCryptKeyForEncryption(ctx context.Context, kmd KeyMetadata) (
kbfscrypto.TLFCryptKey, error)
}
type mdDecryptionKeyGetter interface {
// GetTLFCryptKeyForMDDecryption gets the crypt key to use for the
// TLF with the given metadata to decrypt the private portion of
// the metadata. It finds the appropriate key from mdWithKeys
// (which in most cases is the same as mdToDecrypt) if it's not
// already cached.
GetTLFCryptKeyForMDDecryption(ctx context.Context,
kmdToDecrypt, kmdWithKeys KeyMetadata) (
kbfscrypto.TLFCryptKey, error)
}
type blockDecryptionKeyGetter interface {
// GetTLFCryptKeyForBlockDecryption gets the crypt key to use
// for the TLF with the given metadata to decrypt the block
// pointed to by the given pointer.
GetTLFCryptKeyForBlockDecryption(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer) (kbfscrypto.TLFCryptKey, error)
}
type blockKeyGetter interface {
encryptionKeyGetter
blockDecryptionKeyGetter
}
// KeyManager fetches and constructs the keys needed for KBFS file
// operations.
type KeyManager interface {
blockKeyGetter
mdDecryptionKeyGetter
// GetTLFCryptKeyOfAllGenerations gets the crypt keys of all generations
// for current devices. keys contains crypt keys from all generations, in
// order, starting from FirstValidKeyGen.
GetTLFCryptKeyOfAllGenerations(ctx context.Context, kmd KeyMetadata) (
keys []kbfscrypto.TLFCryptKey, err error)
// Rekey checks the given MD object, if it is a private TLF,
// against the current set of device keys for all valid
// readers and writers. If there are any new devices, it
// updates all existing key generations to include the new
// devices. If there are devices that have been removed, it
// creates a new epoch of keys for the TLF. If there was an
// error, or the RootMetadata wasn't changed, it returns false.
// Otherwise, it returns true. If a new key generation is
// added the second return value points to this new key. This
// is to allow for caching of the TLF crypt key only after a
// successful merged write of the metadata. Otherwise we could
// prematurely pollute the key cache.
//
// If the given MD object is a public TLF, it simply updates
// the TLF's handle with any newly-resolved writers.
//
// If promptPaper is set, prompts for any unlocked paper keys.
// promptPaper shouldn't be set if md is for a public TLF.
Rekey(ctx context.Context, md *RootMetadata, promptPaper bool) (
bool, *kbfscrypto.TLFCryptKey, error)
}
// Reporter exports events (asynchronously) to any number of sinks
type Reporter interface {
// ReportErr records that a given error happened.
ReportErr(ctx context.Context, tlfName CanonicalTlfName, public bool,
mode ErrorModeType, err error)
// AllKnownErrors returns all errors known to this Reporter.
AllKnownErrors() []ReportedError
// Notify sends the given notification to any sink.
Notify(ctx context.Context, notification *keybase1.FSNotification)
// NotifySyncStatus sends the given path sync status to any sink.
NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus)
// Shutdown frees any resources allocated by a Reporter.
Shutdown()
}
// MDCache gets and puts plaintext top-level metadata into the cache.
type MDCache interface {
// Get gets the metadata object associated with the given TLF ID,
// revision number, and branch ID (NullBranchID for merged MD).
Get(tlf tlf.ID, rev MetadataRevision, bid BranchID) (ImmutableRootMetadata, error)
// Put stores the metadata object.
Put(md ImmutableRootMetadata) error
// Delete removes the given metadata object from the cache if it exists.
Delete(tlf tlf.ID, rev MetadataRevision, bid BranchID)
// Replace replaces the entry matching the md under the old branch
// ID with the new one. If the old entry doesn't exist, this is
// equivalent to a Put.
Replace(newRmd ImmutableRootMetadata, oldBID BranchID) error
}
// KeyCache handles caching for both TLFCryptKeys and BlockCryptKeys.
type KeyCache interface {
// GetTLFCryptKey gets the crypt key for the given TLF.
GetTLFCryptKey(tlf.ID, KeyGen) (kbfscrypto.TLFCryptKey, error)
// PutTLFCryptKey stores the crypt key for the given TLF.
PutTLFCryptKey(tlf.ID, KeyGen, kbfscrypto.TLFCryptKey) error
}
// BlockCacheLifetime denotes the lifetime of an entry in BlockCache.
type BlockCacheLifetime int
const (
// NoCacheEntry means that the entry will not be cached.
NoCacheEntry BlockCacheLifetime = iota
// TransientEntry means that the cache entry may be evicted at
// any time.
TransientEntry
// PermanentEntry means that the cache entry must remain until
// explicitly removed from the cache.
PermanentEntry
)
// BlockCacheSimple gets and puts plaintext dir blocks and file blocks into
// a cache. These blocks are immutable and identified by their
// content hash.
type BlockCacheSimple interface {
// Get gets the block associated with the given block ID.
Get(ptr BlockPointer) (Block, error)
// Put stores the final (content-addressable) block associated
// with the given block ID. If lifetime is TransientEntry,
// then it is assumed that the block exists on the server and
// the entry may be evicted from the cache at any time. If
// lifetime is PermanentEntry, then it is assumed that the
// block doesn't exist on the server and must remain in the
// cache until explicitly removed. As an intermediary state,
// as when a block is being sent to the server, the block may
// be put into the cache both with TransientEntry and
// PermanentEntry -- these are two separate entries. This is
// fine, since the block should be the same.
Put(ptr BlockPointer, tlf tlf.ID, block Block,
lifetime BlockCacheLifetime) error
}
// BlockCache specifies the interface of BlockCacheSimple, and also more
// advanced and internal methods.
type BlockCache interface {
BlockCacheSimple
// CheckForKnownPtr sees whether this cache has a transient
// entry for the given file block, which must be a direct file
// block containing data). Returns the full BlockPointer
// associated with that ID, including key and data versions.
// If no ID is known, return an uninitialized BlockPointer and
// a nil error.
CheckForKnownPtr(tlf tlf.ID, block *FileBlock) (BlockPointer, error)
// DeleteTransient removes the transient entry for the given
// pointer from the cache, as well as any cached IDs so the block
// won't be reused.
DeleteTransient(ptr BlockPointer, tlf tlf.ID) error
// Delete removes the permanent entry for the non-dirty block
// associated with the given block ID from the cache. No
// error is returned if no block exists for the given ID.
DeletePermanent(id kbfsblock.ID) error
// DeleteKnownPtr removes the cached ID for the given file
// block. It does not remove the block itself.
DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error
// GetWithPrefetch retrieves a block from the cache, along with whether or
// not it has triggered a prefetch.
GetWithPrefetch(ptr BlockPointer) (
block Block, hasPrefetched bool, lifetime BlockCacheLifetime, err error)
// PutWithPrefetch puts a block into the cache, along with whether or not
// it has triggered a prefetch.
PutWithPrefetch(ptr BlockPointer, tlf tlf.ID, block Block,
lifetime BlockCacheLifetime, hasPrefetched bool) error
// SetCleanBytesCapacity atomically sets clean bytes capacity for block
// cache.
SetCleanBytesCapacity(capacity uint64)
// GetCleanBytesCapacity atomically gets clean bytes capacity for block
// cache.
GetCleanBytesCapacity() (capacity uint64)
}
// DirtyPermChan is a channel that gets closed when the holder has
// permission to write. We are forced to define it as a type due to a
// bug in mockgen that can't handle return values with a chan
// struct{}.
type DirtyPermChan <-chan struct{}
// DirtyBlockCache gets and puts plaintext dir blocks and file blocks
// into a cache, which have been modified by the application and not
// yet committed on the KBFS servers. They are identified by a
// (potentially random) ID that may not have any relationship with
// their context, along with a Branch in case the same TLF is being
// modified via multiple branches. Dirty blocks are never evicted,
// they must be deleted explicitly.
type DirtyBlockCache interface {
// Get gets the block associated with the given block ID. Returns
// the dirty block for the given ID, if one exists.
Get(tlfID tlf.ID, ptr BlockPointer, branch BranchName) (Block, error)
// Put stores a dirty block currently identified by the
// given block pointer and branch name.
Put(tlfID tlf.ID, ptr BlockPointer, branch BranchName, block Block) error
// Delete removes the dirty block associated with the given block
// pointer and branch from the cache. No error is returned if no
// block exists for the given ID.
Delete(tlfID tlf.ID, ptr BlockPointer, branch BranchName) error
// IsDirty states whether or not the block associated with the
// given block pointer and branch name is dirty in this cache.
IsDirty(tlfID tlf.ID, ptr BlockPointer, branch BranchName) bool
// IsAnyDirty returns whether there are any dirty blocks in the
// cache. tlfID may be ignored.
IsAnyDirty(tlfID tlf.ID) bool
// RequestPermissionToDirty is called whenever a user wants to
// write data to a file. The caller provides an estimated number
// of bytes that will become dirty -- this is difficult to know
// exactly without pre-fetching all the blocks involved, but in
// practice we can just use the number of bytes sent in via the
// Write. It returns a channel that blocks until the cache is
// ready to receive more dirty data, at which point the channel is
// closed. The user must call
// `UpdateUnsyncedBytes(-estimatedDirtyBytes)` once it has
// completed its write and called `UpdateUnsyncedBytes` for all
// the exact dirty block sizes.
RequestPermissionToDirty(ctx context.Context, tlfID tlf.ID,
estimatedDirtyBytes int64) (DirtyPermChan, error)
// UpdateUnsyncedBytes is called by a user, who has already been
// granted permission to write, with the delta in block sizes that
// were dirtied as part of the write. So for example, if a
// newly-dirtied block of 20 bytes was extended by 5 bytes, they
// should send 25. If on the next write (before any syncs), bytes
// 10-15 of that same block were overwritten, they should send 0
// over the channel because there were no new bytes. If an
// already-dirtied block is truncated, or if previously requested
// bytes have now been updated more accurately in previous
// requests, newUnsyncedBytes may be negative. wasSyncing should
// be true if `BlockSyncStarted` has already been called for this
// block.
UpdateUnsyncedBytes(tlfID tlf.ID, newUnsyncedBytes int64, wasSyncing bool)
// UpdateSyncingBytes is called when a particular block has
// started syncing, or with a negative number when a block is no
// longer syncing due to an error (and BlockSyncFinished will
// never be called).
UpdateSyncingBytes(tlfID tlf.ID, size int64)
// BlockSyncFinished is called when a particular block has
// finished syncing, though the overall sync might not yet be
// complete. This lets the cache know it might be able to grant
// more permission to writers.
BlockSyncFinished(tlfID tlf.ID, size int64)
// SyncFinished is called when a complete sync has completed and
// its dirty blocks have been removed from the cache. This lets
// the cache know it might be able to grant more permission to
// writers.
SyncFinished(tlfID tlf.ID, size int64)
// ShouldForceSync returns true if the sync buffer is full enough
// to force all callers to sync their data immediately.
ShouldForceSync(tlfID tlf.ID) bool
// Shutdown frees any resources associated with this instance. It
// returns an error if there are any unsynced blocks.
Shutdown() error
}
// DiskBlockCache caches blocks to the disk.
type DiskBlockCache interface {
// Get gets a block from the disk cache.
Get(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error)
// Put puts a block to the disk cache.
Put(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf) error
// DeleteByTLF deletes some blocks from the disk cache.
DeleteByTLF(ctx context.Context, tlfID tlf.ID, blockIDs []kbfsblock.ID) (numRemoved int, sizeRemoved int64, err error)
// Size returns the size in bytes of the disk cache.
Size() int64
// Shutdown cleanly shuts down the disk block cache.
Shutdown(ctx context.Context)
}
// cryptoPure contains all methods of Crypto that don't depend on
// implicit state, i.e. they're pure functions of the input.
type cryptoPure interface {
// MakeRandomTlfID generates a dir ID using a CSPRNG.
MakeRandomTlfID(isPublic bool) (tlf.ID, error)
// MakeRandomBranchID generates a per-device branch ID using a
// CSPRNG. It will not return LocalSquashBranchID or
// NullBranchID.
MakeRandomBranchID() (BranchID, error)
// MakeMdID computes the MD ID of a RootMetadata object.
// TODO: This should move to BareRootMetadata. Note though, that some mock tests
// rely on it being part of the config and crypto_measured.go uses it to keep
// statistics on time spent hashing.
MakeMdID(md BareRootMetadata) (MdID, error)
// MakeMerkleHash computes the hash of a RootMetadataSigned object
// for inclusion into the KBFS Merkle tree.
MakeMerkleHash(md *RootMetadataSigned) (MerkleHash, error)
// MakeTemporaryBlockID generates a temporary block ID using a
// CSPRNG. This is used for indirect blocks before they're
// committed to the server.
MakeTemporaryBlockID() (kbfsblock.ID, error)
// MakeRefNonce generates a block reference nonce using a
// CSPRNG. This is used for distinguishing different references to
// the same BlockID.
MakeBlockRefNonce() (kbfsblock.RefNonce, error)
// MakeRandomTLFEphemeralKeys generates ephemeral keys using a
// CSPRNG for a TLF. These keys can then be used to key/rekey
// the TLF.
MakeRandomTLFEphemeralKeys() (kbfscrypto.TLFEphemeralPublicKey,
kbfscrypto.TLFEphemeralPrivateKey, error)
// MakeRandomTLFKeys generates keys using a CSPRNG for a
// single key generation of a TLF.
MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey,
kbfscrypto.TLFPrivateKey, kbfscrypto.TLFCryptKey, error)
// MakeRandomTLFCryptKeyServerHalf generates the server-side of a
// top-level folder crypt key.
MakeRandomTLFCryptKeyServerHalf() (
kbfscrypto.TLFCryptKeyServerHalf, error)
// MakeRandomBlockCryptKeyServerHalf generates the server-side of
// a block crypt key.
MakeRandomBlockCryptKeyServerHalf() (
kbfscrypto.BlockCryptKeyServerHalf, error)
// EncryptTLFCryptKeyClientHalf encrypts a TLFCryptKeyClientHalf
// using both a TLF's ephemeral private key and a device pubkey.
EncryptTLFCryptKeyClientHalf(
privateKey kbfscrypto.TLFEphemeralPrivateKey,
publicKey kbfscrypto.CryptPublicKey,
clientHalf kbfscrypto.TLFCryptKeyClientHalf) (
EncryptedTLFCryptKeyClientHalf, error)
// EncryptPrivateMetadata encrypts a PrivateMetadata object.
EncryptPrivateMetadata(
pmd PrivateMetadata, key kbfscrypto.TLFCryptKey) (
EncryptedPrivateMetadata, error)
// DecryptPrivateMetadata decrypts a PrivateMetadata object.
DecryptPrivateMetadata(
encryptedPMD EncryptedPrivateMetadata,
key kbfscrypto.TLFCryptKey) (PrivateMetadata, error)
// EncryptBlocks encrypts a block. plainSize is the size of the encoded
// block; EncryptBlock() must guarantee that plainSize <=
// len(encryptedBlock).
EncryptBlock(block Block, key kbfscrypto.BlockCryptKey) (
plainSize int, encryptedBlock EncryptedBlock, err error)
// DecryptBlock decrypts a block. Similar to EncryptBlock(),
// DecryptBlock() must guarantee that (size of the decrypted
// block) <= len(encryptedBlock).
DecryptBlock(encryptedBlock EncryptedBlock,
key kbfscrypto.BlockCryptKey, block Block) error
// GetTLFCryptKeyServerHalfID creates a unique ID for this particular
// kbfscrypto.TLFCryptKeyServerHalf.
GetTLFCryptKeyServerHalfID(
user keybase1.UID, devicePubKey kbfscrypto.CryptPublicKey,
serverHalf kbfscrypto.TLFCryptKeyServerHalf) (
TLFCryptKeyServerHalfID, error)
// VerifyTLFCryptKeyServerHalfID verifies the ID is the proper HMAC result.
VerifyTLFCryptKeyServerHalfID(serverHalfID TLFCryptKeyServerHalfID,
user keybase1.UID, devicePubKey kbfscrypto.CryptPublicKey,
serverHalf kbfscrypto.TLFCryptKeyServerHalf) error
// EncryptMerkleLeaf encrypts a Merkle leaf node with the TLFPublicKey.
EncryptMerkleLeaf(leaf MerkleLeaf, pubKey kbfscrypto.TLFPublicKey,
nonce *[24]byte, ePrivKey kbfscrypto.TLFEphemeralPrivateKey) (
EncryptedMerkleLeaf, error)
// DecryptMerkleLeaf decrypts a Merkle leaf node with the TLFPrivateKey.
DecryptMerkleLeaf(encryptedLeaf EncryptedMerkleLeaf,
privKey kbfscrypto.TLFPrivateKey, nonce *[24]byte,
ePubKey kbfscrypto.TLFEphemeralPublicKey) (*MerkleLeaf, error)
// MakeTLFWriterKeyBundleID hashes a TLFWriterKeyBundleV3 to create an ID.
MakeTLFWriterKeyBundleID(wkb TLFWriterKeyBundleV3) (TLFWriterKeyBundleID, error)
// MakeTLFReaderKeyBundleID hashes a TLFReaderKeyBundleV3 to create an ID.
MakeTLFReaderKeyBundleID(rkb TLFReaderKeyBundleV3) (TLFReaderKeyBundleID, error)
// EncryptTLFCryptKeys encrypts an array of historic TLFCryptKeys.
EncryptTLFCryptKeys(oldKeys []kbfscrypto.TLFCryptKey,
key kbfscrypto.TLFCryptKey) (EncryptedTLFCryptKeys, error)
// DecryptTLFCryptKeys decrypts an array of historic TLFCryptKeys.
DecryptTLFCryptKeys(
encKeys EncryptedTLFCryptKeys, key kbfscrypto.TLFCryptKey) (
[]kbfscrypto.TLFCryptKey, error)
}
// Crypto signs, verifies, encrypts, and decrypts stuff.
type Crypto interface {
cryptoPure
// Duplicate kbfscrypto.Signer here to work around gomock's
// limitations.
Sign(context.Context, []byte) (kbfscrypto.SignatureInfo, error)
SignForKBFS(context.Context, []byte) (kbfscrypto.SignatureInfo, error)
SignToString(context.Context, []byte) (string, error)
// DecryptTLFCryptKeyClientHalf decrypts a
// kbfscrypto.TLFCryptKeyClientHalf using the current device's
// private key and the TLF's ephemeral public key.
DecryptTLFCryptKeyClientHalf(ctx context.Context,
publicKey kbfscrypto.TLFEphemeralPublicKey,
encryptedClientHalf EncryptedTLFCryptKeyClientHalf) (
kbfscrypto.TLFCryptKeyClientHalf, error)
// DecryptTLFCryptKeyClientHalfAny decrypts one of the
// kbfscrypto.TLFCryptKeyClientHalf using the available
// private keys and the ephemeral public key. If promptPaper
// is true, the service will prompt the user for any unlocked
// paper keys.
DecryptTLFCryptKeyClientHalfAny(ctx context.Context,
keys []EncryptedTLFCryptKeyClientAndEphemeral,
promptPaper bool) (
kbfscrypto.TLFCryptKeyClientHalf, int, error)
// Shutdown frees any resources associated with this instance.
Shutdown()
}
// MDOps gets and puts root metadata to an MDServer. On a get, it
// verifies the metadata is signed by the metadata's signing key.
type MDOps interface {
// GetForHandle returns the current metadata object
// corresponding to the given top-level folder's handle and
// merge status, if the logged-in user has read permission on
// the folder. It creates the folder if one doesn't exist
// yet, and the logged-in user has permission to do so.
//
// If there is no returned error, then the returned ID must
// always be non-null. An empty ImmutableRootMetadata may be
// returned, but if it is non-empty, then its ID must match
// the returned ID.
GetForHandle(
ctx context.Context, handle *TlfHandle, mStatus MergeStatus) (
tlf.ID, ImmutableRootMetadata, error)
// GetForTLF returns the current metadata object
// corresponding to the given top-level folder, if the logged-in
// user has read permission on the folder.
GetForTLF(ctx context.Context, id tlf.ID) (ImmutableRootMetadata, error)
// GetUnmergedForTLF is the same as the above but for unmerged
// metadata.
GetUnmergedForTLF(ctx context.Context, id tlf.ID, bid BranchID) (
ImmutableRootMetadata, error)
// GetRange returns a range of metadata objects corresponding to
// the passed revision numbers (inclusive).
GetRange(ctx context.Context, id tlf.ID, start, stop MetadataRevision) (
[]ImmutableRootMetadata, error)
// GetUnmergedRange is the same as the above but for unmerged
// metadata history (inclusive).
GetUnmergedRange(ctx context.Context, id tlf.ID, bid BranchID,
start, stop MetadataRevision) ([]ImmutableRootMetadata, error)
// Put stores the metadata object for the given
// top-level folder.
Put(ctx context.Context, rmd *RootMetadata) (MdID, error)
// PutUnmerged is the same as the above but for unmerged
// metadata history.
PutUnmerged(ctx context.Context, rmd *RootMetadata) (MdID, error)
// PruneBranch prunes all unmerged history for the given TLF
// branch.
PruneBranch(ctx context.Context, id tlf.ID, bid BranchID) error
// ResolveBranch prunes all unmerged history for the given TLF
// branch, and also deletes any blocks in `blocksToDelete` that
// are still in the local journal. It also appends the given MD
// to the journal.
ResolveBranch(ctx context.Context, id tlf.ID, bid BranchID,
blocksToDelete []kbfsblock.ID, rmd *RootMetadata) (MdID, error)
// GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF,
// which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it
// entered into a conflicting state.
GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (
tlf.Handle, error)
}
// KeyOps fetches server-side key halves from the key server.
type KeyOps interface {
// GetTLFCryptKeyServerHalf gets a server-side key half for a
// device given the key half ID.
GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID TLFCryptKeyServerHalfID,
cryptPublicKey kbfscrypto.CryptPublicKey) (
kbfscrypto.TLFCryptKeyServerHalf, error)
// PutTLFCryptKeyServerHalves stores a server-side key halves for a
// set of users and devices.
PutTLFCryptKeyServerHalves(ctx context.Context,
keyServerHalves UserDeviceKeyServerHalves) error
// DeleteTLFCryptKeyServerHalf deletes a server-side key half for a
// device given the key half ID.
DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, key kbfscrypto.CryptPublicKey,
serverHalfID TLFCryptKeyServerHalfID) error
}
// Prefetcher is an interface to a block prefetcher.
type Prefetcher interface {
// PrefetchBlock directs the prefetcher to prefetch a block.
PrefetchBlock(block Block, blockPtr BlockPointer, kmd KeyMetadata,
priority int) error
// PrefetchAfterBlockRetrieved allows the prefetcher to trigger prefetches
// after a block has been retrieved. Whichever component is responsible for
// retrieving blocks will call this method once it's done retrieving a
// block. It caches if it has triggered a prefetch.
PrefetchAfterBlockRetrieved(b Block, blockPtr BlockPointer,
kmd KeyMetadata, priority int, lifetime BlockCacheLifetime,
hasPrefetched bool)
// Shutdown shuts down the prefetcher idempotently. Future calls to
// the various Prefetch* methods will return io.EOF. The returned channel
// allows upstream components to block until all pending prefetches are
// complete. This feature is mainly used for testing, but also to toggle
// the prefetcher on and off.
Shutdown() <-chan struct{}
}
// BlockOps gets and puts data blocks to a BlockServer. It performs
// the necessary crypto operations on each block.
type BlockOps interface {
// Get gets the block associated with the given block pointer
// (which belongs to the TLF with the given key metadata),
// decrypts it if necessary, and fills in the provided block
// object with its contents, if the logged-in user has read
// permission for that block. cacheLifetime controls the behavior of the
// write-through cache once a Get completes.
Get(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer,
block Block, cacheLifetime BlockCacheLifetime) error
// GetEncodedSize gets the encoded size of the block associated
// with the given block pointer (which belongs to the TLF with the
// given key metadata).
GetEncodedSize(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer) (uint32, error)
// Ready turns the given block (which belongs to the TLF with
// the given key metadata) into encoded (and encrypted) data,
// and calculates its ID and size, so that we can do a bunch
// of block puts in parallel for every write. Ready() must
// guarantee that plainSize <= readyBlockData.QuotaSize().
Ready(ctx context.Context, kmd KeyMetadata, block Block) (
id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData, err error)
// Delete instructs the server to delete the given block references.
// It returns the number of not-yet deleted references to
// each block reference
Delete(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) (
liveCounts map[kbfsblock.ID]int, err error)
// Archive instructs the server to mark the given block references
// as "archived"; that is, they are not being used in the current
// view of the folder, and shouldn't be served to anyone other
// than folder writers.
Archive(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) error
// TogglePrefetcher activates or deactivates the prefetcher.
TogglePrefetcher(ctx context.Context, enable bool) error
// Prefetcher retrieves this BlockOps' Prefetcher.
Prefetcher() Prefetcher
// Shutdown shuts down all the workers performing Get operations
Shutdown()
}
// Duplicate kbfscrypto.AuthTokenRefreshHandler here to work around
// gomock's limitations.
type authTokenRefreshHandler interface {
RefreshAuthToken(context.Context)
}
// MDServer gets and puts metadata for each top-level directory. The
// instantiation should be able to fetch session/user details via KBPKI. On a
// put, the server is responsible for 1) ensuring the user has appropriate
// permissions for whatever modifications were made; 2) ensuring that
// LastModifyingWriter and LastModifyingUser are updated appropriately; and 3)
// detecting conflicting writes based on the previous root block ID (i.e., when
// it supports strict consistency). On a get, it verifies the logged-in user
// has read permissions.
//
// TODO: Add interface for searching by time
type MDServer interface {
authTokenRefreshHandler
// GetForHandle returns the current (signed/encrypted) metadata
// object corresponding to the given top-level folder's handle, if
// the logged-in user has read permission on the folder. It
// creates the folder if one doesn't exist yet, and the logged-in
// user has permission to do so.
//
// If there is no returned error, then the returned ID must
// always be non-null. A nil *RootMetadataSigned may be
// returned, but if it is non-nil, then its ID must match the
// returned ID.
GetForHandle(ctx context.Context, handle tlf.Handle,
mStatus MergeStatus) (tlf.ID, *RootMetadataSigned, error)
// GetForTLF returns the current (signed/encrypted) metadata object
// corresponding to the given top-level folder, if the logged-in
// user has read permission on the folder.
GetForTLF(ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus) (
*RootMetadataSigned, error)
// GetRange returns a range of (signed/encrypted) metadata objects
// corresponding to the passed revision numbers (inclusive).
GetRange(ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,
start, stop MetadataRevision) ([]*RootMetadataSigned, error)
// Put stores the (signed/encrypted) metadata object for the given
// top-level folder. Note: If the unmerged bit is set in the metadata
// block's flags bitmask it will be appended to the unmerged per-device
// history.
Put(ctx context.Context, rmds *RootMetadataSigned, extra ExtraMetadata) error
// PruneBranch prunes all unmerged history for the given TLF branch.
PruneBranch(ctx context.Context, id tlf.ID, bid BranchID) error
// RegisterForUpdate tells the MD server to inform the caller when
// there is a merged update with a revision number greater than
// currHead, which did NOT originate from this same MD server
// session. This method returns a chan which can receive only a
// single error before it's closed. If the received err is nil,
// then there is updated MD ready to fetch which didn't originate
// locally; if it is non-nil, then the previous registration
// cannot send the next notification (e.g., the connection to the
// MD server may have failed). In either case, the caller must
// re-register to get a new chan that can receive future update
// notifications.
RegisterForUpdate(ctx context.Context, id tlf.ID,
currHead MetadataRevision) (<-chan error, error)
// CancelRegistration lets the local MDServer instance know that
// we are no longer interested in updates for the specified
// folder. It does not necessarily forward this cancellation to
// remote servers.
CancelRegistration(ctx context.Context, id tlf.ID)
// CheckForRekeys initiates the rekey checking process on the
// server. The server is allowed to delay this request, and so it
// returns a channel for returning the error. Actual rekey
// requests are expected to come in asynchronously.
CheckForRekeys(ctx context.Context) <-chan error
// TruncateLock attempts to take the history truncation lock for
// this folder, for a TTL defined by the server. Returns true if
// the lock was successfully taken.
TruncateLock(ctx context.Context, id tlf.ID) (bool, error)
// TruncateUnlock attempts to release the history truncation lock
// for this folder. Returns true if the lock was successfully
// released.
TruncateUnlock(ctx context.Context, id tlf.ID) (bool, error)
// DisableRekeyUpdatesForTesting disables processing rekey updates
// received from the mdserver while testing.
DisableRekeyUpdatesForTesting()
// Shutdown is called to shutdown an MDServer connection.
Shutdown()
// IsConnected returns whether the MDServer is connected.
IsConnected() bool
// GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF,
// which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it
// entered into a conflicting state. For the highest level of confidence, the caller
// should verify the mapping with a Merkle tree lookup.
GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (
tlf.Handle, error)
// OffsetFromServerTime is the current estimate for how off our
// local clock is from the mdserver clock. Add this to any
// mdserver-provided timestamps to get the "local" time of the
// corresponding event. If the returned bool is false, then we
// don't have a current estimate for the offset.
OffsetFromServerTime() (time.Duration, bool)
// GetKeyBundles looks up the key bundles for the given key
// bundle IDs. tlfID must be non-zero but either or both wkbID
// and rkbID can be zero, in which case nil will be returned
// for the respective bundle. If a bundle cannot be found, an
// error is returned and nils are returned for both bundles.
GetKeyBundles(ctx context.Context, tlfID tlf.ID,
wkbID TLFWriterKeyBundleID, rkbID TLFReaderKeyBundleID) (
*TLFWriterKeyBundleV3, *TLFReaderKeyBundleV3, error)
}
type mdServerLocal interface {
MDServer
addNewAssertionForTest(
uid keybase1.UID, newAssertion keybase1.SocialAssertion) error
getCurrentMergedHeadRevision(ctx context.Context, id tlf.ID) (
rev MetadataRevision, err error)
isShutdown() bool
copy(config mdServerLocalConfig) mdServerLocal
}
// BlockServer gets and puts opaque data blocks. The instantiation
// should be able to fetch session/user details via KBPKI. On a
// put/delete, the server is reponsible for: 1) checking that the ID
// matches the hash of the buffer; and 2) enforcing writer quotas.
type BlockServer interface {
authTokenRefreshHandler
// Get gets the (encrypted) block data associated with the given
// block ID and context, uses the provided block key to decrypt
// the block, and fills in the provided block object with its
// contents, if the logged-in user has read permission for that
// block.
Get(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error)
// Put stores the (encrypted) block data under the given ID
// and context on the server, along with the server half of
// the block key. context should contain a kbfsblock.RefNonce
// of zero. There will be an initial reference for this block
// for the given context.
//
// Put should be idempotent, although it should also return an
// error if, for a given ID, any of the other arguments differ
// from previous Put calls with the same ID.
//
// If this returns a BServerErrorOverQuota, with Throttled=false,
// the caller can treat it as informational and otherwise ignore
// the error.
Put(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error
// AddBlockReference adds a new reference to the given block,
// defined by the given context (which should contain a
// non-zero kbfsblock.RefNonce). (Contexts with a
// kbfsblock.RefNonce of zero should be used when putting the
// block for the first time via Put().) Returns a
// BServerErrorBlockNonExistent if id is unknown within this
// folder.
//
// AddBlockReference should be idempotent, although it should
// also return an error if, for a given ID and refnonce, any
// of the other fields of context differ from previous
// AddBlockReference calls with the same ID and refnonce.
//
// If this returns a BServerErrorOverQuota, with Throttled=false,
// the caller can treat it as informational and otherwise ignore
// the error.
AddBlockReference(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
context kbfsblock.Context) error
// RemoveBlockReferences removes the references to the given block
// ID defined by the given contexts. If no references to the block
// remain after this call, the server is allowed to delete the
// corresponding block permanently. If the reference defined by
// the count has already been removed, the call is a no-op.
// It returns the number of remaining not-yet-deleted references after this
// reference has been removed
RemoveBlockReferences(ctx context.Context, tlfID tlf.ID,
contexts kbfsblock.ContextMap) (liveCounts map[kbfsblock.ID]int, err error)
// ArchiveBlockReferences marks the given block references as
// "archived"; that is, they are not being used in the current
// view of the folder, and shouldn't be served to anyone other
// than folder writers.
//
// For a given ID/refnonce pair, ArchiveBlockReferences should
// be idempotent, although it should also return an error if
// any of the other fields of the context differ from previous
// calls with the same ID/refnonce pair.
ArchiveBlockReferences(ctx context.Context, tlfID tlf.ID,
contexts kbfsblock.ContextMap) error
// IsUnflushed returns whether a given block is being queued
// locally for later flushing to another block server. If the
// block is currently being flushed to the server, this should
// return `true`, so that the caller will try to clean it up from
// the server if it's no longer needed.
IsUnflushed(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID) (
bool, error)
// Shutdown is called to shutdown a BlockServer connection.
Shutdown(ctx context.Context)
// GetUserQuotaInfo returns the quota for the user.
GetUserQuotaInfo(ctx context.Context) (info *kbfsblock.UserQuotaInfo, err error)
}
// blockServerLocal is the interface for BlockServer implementations
// that store data locally.
type blockServerLocal interface {
BlockServer
// getAllRefsForTest returns all the known block references
// for the given TLF, and should only be used during testing.
getAllRefsForTest(ctx context.Context, tlfID tlf.ID) (
map[kbfsblock.ID]blockRefMap, error)
}
// BlockSplitter decides when a file or directory block needs to be split
type BlockSplitter interface {
// CopyUntilSplit copies data into the block until we reach the
// point where we should split, but only if writing to the end of
// the last block. If this is writing into the middle of a file,
// just copy everything that will fit into the block, and assume
// that block boundaries will be fixed later. Return how much was
// copied.
CopyUntilSplit(
block *FileBlock, lastBlock bool, data []byte, off int64) int64
// CheckSplit, given a block, figures out whether it ends at the
// right place. If so, return 0. If not, return either the
// offset in the block where it should be split, or -1 if more
// bytes from the next block should be appended.
CheckSplit(block *FileBlock) int64
// MaxPtrsPerBlock describes the number of indirect pointers we
// can fit into one indirect block.
MaxPtrsPerBlock() int
// ShouldEmbedBlockChanges decides whether we should keep the
// block changes embedded in the MD or not.
ShouldEmbedBlockChanges(bc *BlockChanges) bool
}
// KeyServer fetches/writes server-side key halves from/to the key server.
type KeyServer interface {
// GetTLFCryptKeyServerHalf gets a server-side key half for a
// device given the key half ID.
GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID TLFCryptKeyServerHalfID,
cryptPublicKey kbfscrypto.CryptPublicKey) (
kbfscrypto.TLFCryptKeyServerHalf, error)
// PutTLFCryptKeyServerHalves stores a server-side key halves for a
// set of users and devices.
PutTLFCryptKeyServerHalves(ctx context.Context,
keyServerHalves UserDeviceKeyServerHalves) error
// DeleteTLFCryptKeyServerHalf deletes a server-side key half for a
// device given the key half ID.
DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, key kbfscrypto.CryptPublicKey,
serverHalfID TLFCryptKeyServerHalfID) error
// Shutdown is called to free any KeyServer resources.
Shutdown()
}
// NodeChange represents a change made to a node as part of an atomic
// file system operation.
type NodeChange struct {
Node Node
// Basenames of entries added/removed.
DirUpdated []string
FileUpdated []WriteRange
}
// Observer can be notified that there is an available update for a
// given directory. The notification callbacks should not block, or
// make any calls to the Notifier interface. Nodes passed to the
// observer should not be held past the end of the notification
// callback.
type Observer interface {
// LocalChange announces that the file at this Node has been
// updated locally, but not yet saved at the server.
LocalChange(ctx context.Context, node Node, write WriteRange)
// BatchChanges announces that the nodes have all been updated
// together atomically. Each NodeChange in changes affects the
// same top-level folder and branch.
BatchChanges(ctx context.Context, changes []NodeChange)
// TlfHandleChange announces that the handle of the corresponding
// folder branch has changed, likely due to previously-unresolved
// assertions becoming resolved. This indicates that the listener
// should switch over any cached paths for this folder-branch to
// the new name. Nodes that were acquired under the old name will
// still continue to work, but new lookups on the old name may
// either encounter alias errors or entirely new TLFs (in the case
// of conflicts).
TlfHandleChange(ctx context.Context, newHandle *TlfHandle)
}
// Notifier notifies registrants of directory changes
type Notifier interface {
// RegisterForChanges declares that the given Observer wants to
// subscribe to updates for the given top-level folders.
RegisterForChanges(folderBranches []FolderBranch, obs Observer) error
// UnregisterFromChanges declares that the given Observer no
// longer wants to subscribe to updates for the given top-level
// folders.
UnregisterFromChanges(folderBranches []FolderBranch, obs Observer) error
}
// Clock is an interface for getting the current time
type Clock interface {
// Now returns the current time.
Now() time.Time
}
// ConflictRenamer deals with names for conflicting directory entries.
type ConflictRenamer interface {
// ConflictRename returns the appropriately modified filename.
ConflictRename(ctx context.Context, op op, original string) (
string, error)
}
// Config collects all the singleton instance instantiations needed to
// run KBFS in one place. The methods below are self-explanatory and
// do not require comments.
type Config interface {
dataVersioner
logMaker
blockCacher
blockServerGetter
codecGetter
cryptoPureGetter
keyGetterGetter
cryptoGetter
signerGetter
currentSessionGetterGetter
diskBlockCacheGetter
diskBlockCacheSetter
clockGetter
diskLimiterGetter
KBFSOps() KBFSOps
SetKBFSOps(KBFSOps)
KBPKI() KBPKI
SetKBPKI(KBPKI)
KeyManager() KeyManager
SetKeyManager(KeyManager)
Reporter() Reporter
SetReporter(Reporter)
MDCache() MDCache
SetMDCache(MDCache)
KeyCache() KeyCache
SetKeyBundleCache(KeyBundleCache)
KeyBundleCache() KeyBundleCache
SetKeyCache(KeyCache)
SetBlockCache(BlockCache)
DirtyBlockCache() DirtyBlockCache
SetDirtyBlockCache(DirtyBlockCache)
SetCrypto(Crypto)
SetCodec(kbfscodec.Codec)
MDOps() MDOps
SetMDOps(MDOps)
KeyOps() KeyOps
SetKeyOps(KeyOps)
BlockOps() BlockOps
SetBlockOps(BlockOps)
MDServer() MDServer
SetMDServer(MDServer)
SetBlockServer(BlockServer)
KeyServer() KeyServer
SetKeyServer(KeyServer)
KeybaseService() KeybaseService
SetKeybaseService(KeybaseService)
BlockSplitter() BlockSplitter
SetBlockSplitter(BlockSplitter)
Notifier() Notifier
SetNotifier(Notifier)
SetClock(Clock)
ConflictRenamer() ConflictRenamer
SetConflictRenamer(ConflictRenamer)
MetadataVersion() MetadataVer
SetMetadataVersion(MetadataVer)
RekeyQueue() RekeyQueue
SetRekeyQueue(RekeyQueue)
// ReqsBufSize indicates the number of read or write operations
// that can be buffered per folder
ReqsBufSize() int
// MaxNameBytes indicates the maximum supported size of a
// directory entry name in bytes.
MaxNameBytes() uint32
// MaxDirBytes indicates the maximum supported plaintext size of a
// directory in bytes.
MaxDirBytes() uint64
// DoBackgroundFlushes says whether we should periodically try to
// flush dirty files, even without a sync from the user. Should
// be true except for during some testing.
DoBackgroundFlushes() bool
SetDoBackgroundFlushes(bool)
// RekeyWithPromptWaitTime indicates how long to wait, after
// setting the rekey bit, before prompting for a paper key.
RekeyWithPromptWaitTime() time.Duration
SetRekeyWithPromptWaitTime(time.Duration)
// Mode indicates how KBFS is configured to run.
Mode() InitMode
// GracePeriod specifies a grace period for which a delayed cancellation
// waits before actual cancels the context. This is useful for giving
// critical portion of a slow remote operation some extra time to finish as
// an effort to avoid conflicting. Example include an O_EXCL Create call
// interrupted by ALRM signal actually makes it to the server, while
// application assumes not since EINTR is returned. A delayed cancellation
// allows us to distinguish between successful cancel (where remote operation
// didn't make to server) or failed cancel (where remote operation made to
// the server). However, the optimal value of this depends on the network
// conditions. A long grace period for really good network condition would
// just unnecessarily slow down Ctrl-C.
//
// TODO: make this adaptive and self-change over time based on network
// conditions.
DelayedCancellationGracePeriod() time.Duration
SetDelayedCancellationGracePeriod(time.Duration)
// QuotaReclamationPeriod indicates how often should each TLF
// should check for quota to reclaim. If the Duration.Seconds()
// == 0, quota reclamation should not run automatically.
QuotaReclamationPeriod() time.Duration
// QuotaReclamationMinUnrefAge indicates the minimum time a block
// must have been unreferenced before it can be reclaimed.
QuotaReclamationMinUnrefAge() time.Duration
// QuotaReclamationMinHeadAge indicates the minimum age of the
// most recently merged MD update before we can run reclamation,
// to avoid conflicting with a currently active writer.
QuotaReclamationMinHeadAge() time.Duration
// ResetCaches clears and re-initializes all data and key caches.
ResetCaches()
// StorageRoot returns the path to the storage root for this config.
StorageRoot() string
// MetricsRegistry may be nil, which should be interpreted as
// not using metrics at all. (i.e., as if UseNilMetrics were
// set). This differs from how go-metrics treats nil Registry
// objects, which is to use the default registry.
MetricsRegistry() metrics.Registry
SetMetricsRegistry(metrics.Registry)
// TLFValidDuration is the time TLFs are valid before identification needs to be redone.
TLFValidDuration() time.Duration
// SetTLFValidDuration sets TLFValidDuration.
SetTLFValidDuration(time.Duration)
// Shutdown is called to free config resources.
Shutdown(context.Context) error
// CheckStateOnShutdown tells the caller whether or not it is safe
// to check the state of the system on shutdown.
CheckStateOnShutdown() bool
}
// NodeCache holds Nodes, and allows libkbfs to update them when
// things change about the underlying KBFS blocks. It is probably
// most useful to instantiate this on a per-folder-branch basis, so
// that it can create a Path with the correct DirId and Branch name.
type NodeCache interface {
// GetOrCreate either makes a new Node for the given
// BlockPointer, or returns an existing one. TODO: If we ever
// support hard links, we will have to revisit the "name" and
// "parent" parameters here. name must not be empty. Returns
// an error if parent cannot be found.
GetOrCreate(ptr BlockPointer, name string, parent Node) (Node, error)
// Get returns the Node associated with the given ptr if one
// already exists. Otherwise, it returns nil.
Get(ref BlockRef) Node
// UpdatePointer updates the BlockPointer for the corresponding
// Node. NodeCache ignores this call when oldRef is not cached in
// any Node. Returns whether the pointer was updated.
UpdatePointer(oldRef BlockRef, newPtr BlockPointer) bool
// Move swaps the parent node for the corresponding Node, and
// updates the node's name. NodeCache ignores the call when ptr
// is not cached. Returns an error if newParent cannot be found.
// If newParent is nil, it treats the ptr's corresponding node as
// being unlinked from the old parent completely.
Move(ref BlockRef, newParent Node, newName string) error
// Unlink set the corresponding node's parent to nil and caches
// the provided path in case the node is still open. NodeCache
// ignores the call when ptr is not cached. The path is required
// because the caller may have made changes to the parent nodes
// already that shouldn't be reflected in the cached path.
// Returns whether a node was actually updated.
Unlink(ref BlockRef, oldPath path) bool
// PathFromNode creates the path up to a given Node.
PathFromNode(node Node) path
// AllNodes returns the complete set of nodes currently in the cache.
AllNodes() []Node
}
// fileBlockDeepCopier fetches a file block, makes a deep copy of it
// (duplicating pointer for any indirect blocks) and generates a new
// random temporary block ID for it. It returns the new BlockPointer,
// and internally saves the block for future uses.
type fileBlockDeepCopier func(context.Context, string, BlockPointer) (
BlockPointer, error)
// crAction represents a specific action to take as part of the
// conflict resolution process.
type crAction interface {
// swapUnmergedBlock should be called before do(), and if it
// returns true, the caller must use the merged block
// corresponding to the returned BlockPointer instead of
// unmergedBlock when calling do(). If BlockPointer{} is zeroPtr
// (and true is returned), just swap in the regular mergedBlock.
swapUnmergedBlock(unmergedChains *crChains, mergedChains *crChains,
unmergedBlock *DirBlock) (bool, BlockPointer, error)
// do modifies the given merged block in place to resolve the
// conflict, and potential uses the provided blockCopyFetchers to
// obtain copies of other blocks (along with new BlockPointers)
// when requiring a block copy.
do(ctx context.Context, unmergedCopier fileBlockDeepCopier,
mergedCopier fileBlockDeepCopier, unmergedBlock *DirBlock,
mergedBlock *DirBlock) error
// updateOps potentially modifies, in place, the slices of
// unmerged and merged operations stored in the corresponding
// crChains for the given unmerged and merged most recent
// pointers. Eventually, the "unmerged" ops will be pushed as
// part of a MD update, and so should contain any necessarily
// operations to fully merge the unmerged data, including any
// conflict resolution. The "merged" ops will be played through
// locally, to notify any caches about the newly-obtained merged
// data (and any changes to local data that were required as part
// of conflict resolution, such as renames). A few things to note:
// * A particular action's updateOps method may be called more than
// once for different sets of chains, however it should only add
// new directory operations (like create/rm/rename) into directory
// chains.
// * updateOps doesn't necessarily result in correct BlockPointers within
// each of those ops; that must happen in a later phase.
// * mergedBlock can be nil if the chain is for a file.
updateOps(unmergedMostRecent BlockPointer, mergedMostRecent BlockPointer,
unmergedBlock *DirBlock, mergedBlock *DirBlock,
unmergedChains *crChains, mergedChains *crChains) error
// String returns a string representation for this crAction, used
// for debugging.
String() string
}
// RekeyQueue is a managed queue of folders needing some rekey action taken
// upon them by the current client.
type RekeyQueue interface {
// Enqueue enqueues a folder for rekey action. If the TLF is already in the
// rekey queue, the error channel of the existing one is returned.
Enqueue(tlf.ID)
// IsRekeyPending returns true if the given folder is in the rekey queue.
// Note that an ongoing rekey doesn't count as "pending".
IsRekeyPending(tlf.ID) bool
// Shutdown cancels all pending rekey actions and clears the queue. It
// doesn't cancel ongoing rekeys. After Shutdown() is called, the same
// RekeyQueue shouldn't be used anymore.
Shutdown()
}
// BareRootMetadata is a read-only interface to the bare serializeable MD that
// is signed by the reader or writer.
type BareRootMetadata interface {
// TlfID returns the ID of the TLF this BareRootMetadata is for.
TlfID() tlf.ID
// KeyGenerationsToUpdate returns a range that has to be
// updated when rekeying. start is included, but end is not
// included. This range can be empty (i.e., start >= end), in
// which case there's nothing to update, i.e. the TLF is
// public, or there aren't any existing key generations.
KeyGenerationsToUpdate() (start KeyGen, end KeyGen)
// LatestKeyGeneration returns the most recent key generation in this
// BareRootMetadata, or PublicKeyGen if this TLF is public.
LatestKeyGeneration() KeyGen
// IsValidRekeyRequest returns true if the current block is a simple rekey wrt
// the passed block.
IsValidRekeyRequest(codec kbfscodec.Codec, prevMd BareRootMetadata,
user keybase1.UID, prevExtra, extra ExtraMetadata) (bool, error)
// MergedStatus returns the status of this update -- has it been
// merged into the main folder or not?
MergedStatus() MergeStatus
// IsRekeySet returns true if the rekey bit is set.
IsRekeySet() bool
// IsWriterMetadataCopiedSet returns true if the bit is set indicating
// the writer metadata was copied.
IsWriterMetadataCopiedSet() bool
// IsFinal returns true if this is the last metadata block for a given
// folder. This is only expected to be set for folder resets.
IsFinal() bool
// IsWriter returns whether or not the user+device is an authorized writer.
IsWriter(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey, extra ExtraMetadata) bool
// IsReader returns whether or not the user+device is an authorized reader.
IsReader(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey, extra ExtraMetadata) bool
// DeepCopy returns a deep copy of the underlying data structure.
DeepCopy(codec kbfscodec.Codec) (MutableBareRootMetadata, error)
// MakeSuccessorCopy returns a newly constructed successor
// copy to this metadata revision. It differs from DeepCopy
// in that it can perform an up conversion to a new metadata
// version. tlfCryptKeyGetter should be a function that
// returns a list of TLFCryptKeys for all key generations in
// ascending order.
MakeSuccessorCopy(codec kbfscodec.Codec, crypto cryptoPure,
extra ExtraMetadata, latestMDVer MetadataVer,
tlfCryptKeyGetter func() ([]kbfscrypto.TLFCryptKey, error),
isReadableAndWriter bool) (mdCopy MutableBareRootMetadata,
extraCopy ExtraMetadata, err error)
// CheckValidSuccessor makes sure the given BareRootMetadata is a valid
// successor to the current one, and returns an error otherwise.
CheckValidSuccessor(currID MdID, nextMd BareRootMetadata) error
// CheckValidSuccessorForServer is like CheckValidSuccessor but with
// server-specific error messages.
CheckValidSuccessorForServer(currID MdID, nextMd BareRootMetadata) error
// MakeBareTlfHandle makes a tlf.Handle for this
// BareRootMetadata. Should be used only by servers and MDOps.
MakeBareTlfHandle(extra ExtraMetadata) (tlf.Handle, error)
// TlfHandleExtensions returns a list of handle extensions associated with the TLf.
TlfHandleExtensions() (extensions []tlf.HandleExtension)
// GetDevicePublicKeys returns the kbfscrypto.CryptPublicKeys
// for all known users and devices. Returns an error if the
// TLF is public.
GetUserDevicePublicKeys(extra ExtraMetadata) (
writers, readers UserDevicePublicKeys, err error)
// GetTLFCryptKeyParams returns all the necessary info to construct
// the TLF crypt key for the given key generation, user, and device
// (identified by its crypt public key), or false if not found. This
// returns an error if the TLF is public.
GetTLFCryptKeyParams(keyGen KeyGen, user keybase1.UID,
key kbfscrypto.CryptPublicKey, extra ExtraMetadata) (
kbfscrypto.TLFEphemeralPublicKey,
EncryptedTLFCryptKeyClientHalf,
TLFCryptKeyServerHalfID, bool, error)
// IsValidAndSigned verifies the BareRootMetadata, checks the
// writer signature, and returns an error if a problem was
// found. This should be the first thing checked on a BRMD
// retrieved from an untrusted source, and then the signing
// user and key should be validated, either by comparing to
// the current device key (using IsLastModifiedBy), or by
// checking with KBPKI.
IsValidAndSigned(codec kbfscodec.Codec,
crypto cryptoPure, extra ExtraMetadata) error
// IsLastModifiedBy verifies that the BareRootMetadata is
// written by the given user and device (identified by the
// device verifying key), and returns an error if not.
IsLastModifiedBy(uid keybase1.UID, key kbfscrypto.VerifyingKey) error
// LastModifyingWriter return the UID of the last user to modify the writer metadata.
LastModifyingWriter() keybase1.UID
// LastModifyingUser return the UID of the last user to modify the any of the metadata.
GetLastModifyingUser() keybase1.UID
// RefBytes returns the number of newly referenced bytes of data blocks introduced by this revision of metadata.
RefBytes() uint64
// UnrefBytes returns the number of newly unreferenced bytes introduced by this revision of metadata.
UnrefBytes() uint64
// MDRefBytes returns the number of newly referenced bytes of MD blocks introduced by this revision of metadata.
MDRefBytes() uint64
// DiskUsage returns the estimated disk usage for the folder as of this revision of metadata.
DiskUsage() uint64
// MDDiskUsage returns the estimated MD disk usage for the folder as of this revision of metadata.
MDDiskUsage() uint64
// RevisionNumber returns the revision number associated with this metadata structure.
RevisionNumber() MetadataRevision
// BID returns the per-device branch ID associated with this metadata revision.
BID() BranchID
// GetPrevRoot returns the hash of the previous metadata revision.
GetPrevRoot() MdID
// IsUnmergedSet returns true if the unmerged bit is set.
IsUnmergedSet() bool
// GetSerializedPrivateMetadata returns the serialized private metadata as a byte slice.
GetSerializedPrivateMetadata() []byte
// GetSerializedWriterMetadata serializes the underlying writer metadata and returns the result.
GetSerializedWriterMetadata(codec kbfscodec.Codec) ([]byte, error)
// Version returns the metadata version.
Version() MetadataVer
// GetCurrentTLFPublicKey returns the TLF public key for the
// current key generation.
GetCurrentTLFPublicKey(ExtraMetadata) (kbfscrypto.TLFPublicKey, error)
// GetUnresolvedParticipants returns any unresolved readers
// and writers present in this revision of metadata. The
// returned array should be safe to modify by the caller.
GetUnresolvedParticipants() []keybase1.SocialAssertion
// GetTLFWriterKeyBundleID returns the ID of the externally-stored writer key bundle, or the zero value if
// this object stores it internally.
GetTLFWriterKeyBundleID() TLFWriterKeyBundleID
// GetTLFReaderKeyBundleID returns the ID of the externally-stored reader key bundle, or the zero value if
// this object stores it internally.
GetTLFReaderKeyBundleID() TLFReaderKeyBundleID
// StoresHistoricTLFCryptKeys returns whether or not history keys are symmetrically encrypted; if not, they're
// encrypted per-device.
StoresHistoricTLFCryptKeys() bool
// GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given
// generation using the current generation's TLFCryptKey.
GetHistoricTLFCryptKey(c cryptoPure, keyGen KeyGen,
currentKey kbfscrypto.TLFCryptKey, extra ExtraMetadata) (
kbfscrypto.TLFCryptKey, error)
}
// MutableBareRootMetadata is a mutable interface to the bare serializeable MD that is signed by the reader or writer.
type MutableBareRootMetadata interface {
BareRootMetadata
// SetRefBytes sets the number of newly referenced bytes of data blocks introduced by this revision of metadata.
SetRefBytes(refBytes uint64)
// SetUnrefBytes sets the number of newly unreferenced bytes introduced by this revision of metadata.
SetUnrefBytes(unrefBytes uint64)
// SetMDRefBytes sets the number of newly referenced bytes of MD blocks introduced by this revision of metadata.
SetMDRefBytes(mdRefBytes uint64)
// SetDiskUsage sets the estimated disk usage for the folder as of this revision of metadata.
SetDiskUsage(diskUsage uint64)
// SetMDDiskUsage sets the estimated MD disk usage for the folder as of this revision of metadata.
SetMDDiskUsage(mdDiskUsage uint64)
// AddRefBytes increments the number of newly referenced bytes of data blocks introduced by this revision of metadata.
AddRefBytes(refBytes uint64)
// AddUnrefBytes increments the number of newly unreferenced bytes introduced by this revision of metadata.
AddUnrefBytes(unrefBytes uint64)
// AddMDRefBytes increments the number of newly referenced bytes of MD blocks introduced by this revision of metadata.
AddMDRefBytes(mdRefBytes uint64)
// AddDiskUsage increments the estimated disk usage for the folder as of this revision of metadata.
AddDiskUsage(diskUsage uint64)
// AddMDDiskUsage increments the estimated MD disk usage for the folder as of this revision of metadata.
AddMDDiskUsage(mdDiskUsage uint64)
// ClearRekeyBit unsets any set rekey bit.
ClearRekeyBit()
// ClearWriterMetadataCopiedBit unsets any set writer metadata copied bit.
ClearWriterMetadataCopiedBit()
// ClearFinalBit unsets any final bit.
ClearFinalBit()
// SetUnmerged sets the unmerged bit.
SetUnmerged()
// SetBranchID sets the branch ID for this metadata revision.
SetBranchID(bid BranchID)
// SetPrevRoot sets the hash of the previous metadata revision.
SetPrevRoot(mdID MdID)
// SetSerializedPrivateMetadata sets the serialized private metadata.
SetSerializedPrivateMetadata(spmd []byte)
// SignWriterMetadataInternally signs the writer metadata, for
// versions that store this signature inside the metadata.
SignWriterMetadataInternally(ctx context.Context,
codec kbfscodec.Codec, signer kbfscrypto.Signer) error
// SetLastModifyingWriter sets the UID of the last user to modify the writer metadata.
SetLastModifyingWriter(user keybase1.UID)
// SetLastModifyingUser sets the UID of the last user to modify any of the metadata.
SetLastModifyingUser(user keybase1.UID)
// SetRekeyBit sets the rekey bit.
SetRekeyBit()
// SetFinalBit sets the finalized bit.
SetFinalBit()
// SetWriterMetadataCopiedBit set the writer metadata copied bit.
SetWriterMetadataCopiedBit()
// SetRevision sets the revision number of the underlying metadata.
SetRevision(revision MetadataRevision)
// SetUnresolvedReaders sets the list of unresolved readers associated with this folder.
SetUnresolvedReaders(readers []keybase1.SocialAssertion)
// SetUnresolvedWriters sets the list of unresolved writers associated with this folder.
SetUnresolvedWriters(writers []keybase1.SocialAssertion)
// SetConflictInfo sets any conflict info associated with this metadata revision.
SetConflictInfo(ci *tlf.HandleExtension)
// SetFinalizedInfo sets any finalized info associated with this metadata revision.
SetFinalizedInfo(fi *tlf.HandleExtension)
// SetWriters sets the list of writers associated with this folder.
SetWriters(writers []keybase1.UID)
// SetTlfID sets the ID of the underlying folder in the metadata structure.
SetTlfID(tlf tlf.ID)
// AddKeyGeneration adds a new key generation to this revision
// of metadata. If StoresHistoricTLFCryptKeys is false, then
// currCryptKey must be zero. Otherwise, currCryptKey must be
// zero if there are no existing key generations, and non-zero
// for otherwise.
//
// AddKeyGeneration must only be called on metadata for
// private TLFs.
//
// Note that the TLFPrivateKey corresponding to privKey must
// also be stored in PrivateMetadata.
AddKeyGeneration(codec kbfscodec.Codec, crypto cryptoPure,
currExtra ExtraMetadata,
updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys,
ePubKey kbfscrypto.TLFEphemeralPublicKey,
ePrivKey kbfscrypto.TLFEphemeralPrivateKey,
pubKey kbfscrypto.TLFPublicKey,
currCryptKey, nextCryptKey kbfscrypto.TLFCryptKey) (
nextExtra ExtraMetadata,
serverHalves UserDeviceKeyServerHalves, err error)
// UpdateKeyBundles ensures that every device for every writer
// and reader in the provided lists has complete TLF crypt key
// info, and uses the new ephemeral key pair to generate the
// info if it doesn't yet exist. tlfCryptKeys must contain an
// entry for each key generation in KeyGenerationsToUpdate(),
// in ascending order.
//
// updatedWriterKeys and updatedReaderKeys usually contains
// the full maps of writers to per-device crypt public keys,
// but for reader rekey, updatedWriterKeys will be empty and
// updatedReaderKeys will contain only a single entry.
//
// UpdateKeyBundles must only be called on metadata for
// private TLFs.
//
// An array of server halves to push to the server are
// returned, with each entry corresponding to each key
// generation in KeyGenerationsToUpdate(), in ascending order.
UpdateKeyBundles(crypto cryptoPure, extra ExtraMetadata,
updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys,
ePubKey kbfscrypto.TLFEphemeralPublicKey,
ePrivKey kbfscrypto.TLFEphemeralPrivateKey,
tlfCryptKeys []kbfscrypto.TLFCryptKey) (
[]UserDeviceKeyServerHalves, error)
// PromoteReaders converts the given set of users (which may
// be empty) from readers to writers.
PromoteReaders(readersToPromote map[keybase1.UID]bool,
extra ExtraMetadata) error
// RevokeRemovedDevices removes key info for any device not in
// the given maps, and returns a corresponding map of server
// halves to delete from the server.
//
// Note: the returned server halves may not be for all key
// generations, e.g. for MDv3 it's only for the latest key
// generation.
RevokeRemovedDevices(
updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys,
extra ExtraMetadata) (ServerHalfRemovalInfo, error)
// FinalizeRekey must be called called after all rekeying work
// has been performed on the underlying metadata.
FinalizeRekey(c cryptoPure, extra ExtraMetadata) error
}
// KeyBundleCache is an interface to a key bundle cache for use with v3 metadata.
type KeyBundleCache interface {
// GetTLFReaderKeyBundle returns the TLFReaderKeyBundleV3 for
// the given TLFReaderKeyBundleID, or nil if there is none.
GetTLFReaderKeyBundle(tlf.ID, TLFReaderKeyBundleID) (*TLFReaderKeyBundleV3, error)
// GetTLFWriterKeyBundle returns the TLFWriterKeyBundleV3 for
// the given TLFWriterKeyBundleID, or nil if there is none.
GetTLFWriterKeyBundle(tlf.ID, TLFWriterKeyBundleID) (*TLFWriterKeyBundleV3, error)
// PutTLFReaderKeyBundle stores the given TLFReaderKeyBundleV3.
PutTLFReaderKeyBundle(tlf.ID, TLFReaderKeyBundleID, TLFReaderKeyBundleV3)
// PutTLFWriterKeyBundle stores the given TLFWriterKeyBundleV3.
PutTLFWriterKeyBundle(tlf.ID, TLFWriterKeyBundleID, TLFWriterKeyBundleV3)
}
// RekeyFSM is a Finite State Machine (FSM) for housekeeping rekey states for a
// FolderBranch. Each FolderBranch has its own FSM for rekeys.
//
// See rekey_fsm.go for implementation details.
//
// TODO: report FSM status in FolderBranchStatus?
type RekeyFSM interface {
// Event sends an event to the FSM.
Event(event RekeyEvent)
// Shutdown shuts down the FSM. No new event should be sent into the FSM
// after this method is called.
Shutdown()
// listenOnEvent adds a listener (callback) to the FSM so that when
// event happens, callback is called with the received event. If repeatedly
// is set to false, callback is called only once. Otherwise it's called every
// time event happens.
//
// Currently this is only used in tests and for RekeyFile. See comment for
// RequestRekeyAndWaitForOneFinishEvent for more details.
listenOnEvent(
event rekeyEventType, callback func(RekeyEvent), repeatedly bool)
}
| 1 | 16,314 | "including"? Since it doesn't take any parameters, I'm not sure how it can update anything else... | keybase-kbfs | go |
@@ -674,7 +674,7 @@ namespace AutoRest.Core.Properties {
}
/// <summary>
- /// Looks up a localized string similar to Non-HTTPS/HTTP schemes have limited support.
+ /// Looks up a localized string similar to Azure Resource Management only supports HTTPS scheme..
/// </summary>
public static string SupportedSchemesWarningMessage {
get { | 1 | //------------------------------------------------------------------------------
// <auto-generated>
// This code was generated by a tool.
// Runtime Version:4.0.30319.42000
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
namespace AutoRest.Core.Properties {
using System;
using System.Reflection;
/// <summary>
/// A strongly-typed resource class, for looking up localized strings, etc.
/// </summary>
// This class was auto-generated by the StronglyTypedResourceBuilder
// class via a tool like ResGen or Visual Studio.
// To add or remove a member, edit your .ResX file then rerun ResGen
// with the /str option, or rebuild your VS project.
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
public class Resources {
private static global::System.Resources.ResourceManager resourceMan;
private static global::System.Globalization.CultureInfo resourceCulture;
[global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")]
internal Resources() {
}
/// <summary>
/// Returns the cached ResourceManager instance used by this class.
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
public static global::System.Resources.ResourceManager ResourceManager {
get {
if (object.ReferenceEquals(resourceMan, null)) {
global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("AutoRest.Core.Properties.Resources", typeof(Resources).GetTypeInfo().Assembly);
resourceMan = temp;
}
return resourceMan;
}
}
/// <summary>
/// Overrides the current thread's CurrentUICulture property for all
/// resource lookups using this strongly typed resource class.
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
public static global::System.Globalization.CultureInfo Culture {
get {
return resourceCulture;
}
set {
resourceCulture = value;
}
}
/// <summary>
/// Looks up a localized string similar to Top level properties should be one of name, type, id, location, properties, tags, plan, sku, etag, managedBy, identity. Extra properties found: "{0}"..
/// </summary>
public static string AllowedTopLevelProperties {
get {
return ResourceManager.GetString("AllowedTopLevelProperties", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to For better generated code quality, define schemas instead of using anonymous types..
/// </summary>
public static string AnonymousTypesDiscouraged {
get {
return ResourceManager.GetString("AnonymousTypesDiscouraged", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to API Version must be in the format: yyyy-MM-dd, optionally followed by -preview, -alpha, -beta, -rc, -privatepreview..
/// </summary>
public static string APIVersionFormatIsNotValid {
get {
return ResourceManager.GetString("APIVersionFormatIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Top level property names should not be repeated inside the properties bag for ARM resource "{0}". Properties [{1}] conflict with ARM top level properties. Please rename these..
/// </summary>
public static string ArmPropertiesBagValidationMessage {
get {
return ResourceManager.GetString("ArmPropertiesBagValidationMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to AutoRest Core {0}.
/// </summary>
public static string AutoRestCore {
get {
return ResourceManager.GetString("AutoRestCore", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Each body parameter must have a schema.
/// </summary>
public static string BodyMustHaveSchema {
get {
return ResourceManager.GetString("BodyMustHaveSchema", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Property named: "{0}", must follow camelCase style. Example: "{1}"..
/// </summary>
public static string BodyPropertyNameCamelCase {
get {
return ResourceManager.GetString("BodyPropertyNameCamelCase", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to A body parameter cannot have a type, format, or any other properties describing its type..
/// </summary>
public static string BodyWithType {
get {
return ResourceManager.GetString("BodyWithType", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Booleans are not descriptive and make them hard to use. Instead use string enums with allowed set of values defined: '{0}'..
/// </summary>
public static string BooleanPropertyNotRecommended {
get {
return ResourceManager.GetString("BooleanPropertyNotRecommended", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Errors found during Swagger document validation..
/// </summary>
public static string CodeGenerationError {
get {
return ResourceManager.GetString("CodeGenerationError", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Code generation failed with errors. See inner exceptions for details..
/// </summary>
public static string CodeGenerationFailed {
get {
return ResourceManager.GetString("CodeGenerationFailed", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Could not load CodeGenSettings file '{0}'. Exception: '{1}'..
/// </summary>
public static string CodeGenSettingsFileInvalid {
get {
return ResourceManager.GetString("CodeGenSettingsFileInvalid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Collection object {0} returned by list operation {1} with 'x-ms-pageable' extension, has no property named 'value'..
/// </summary>
public static string CollectionObjectPropertiesNamingMessage {
get {
return ResourceManager.GetString("CollectionObjectPropertiesNamingMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to \\\\.
/// </summary>
public static string CommentString {
get {
return ResourceManager.GetString("CommentString", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Plugins:
/// CSharp:
/// TypeName: PluginCs, AutoRest.CSharp
/// Azure.CSharp:
/// TypeName: PluginCsa, AutoRest.CSharp.Azure
/// Azure.CSharp.Fluent:
/// TypeName: PluginCsaf, AutoRest.CSharp.Azure.Fluent
/// Ruby:
/// TypeName: PluginRb, AutoRest.Ruby
/// Azure.Ruby:
/// TypeName: PluginRba, AutoRest.Ruby.Azure
/// NodeJS:
/// TypeName: PluginJs, AutoRest.NodeJS
/// Azure.NodeJS:
/// TypeName: PluginJsa, AutoRest.NodeJS.Azure
/// Python:
/// TypeName: PluginPy, AutoRest.Python
/// Azure.Python:
/// TypeNa [rest of string was truncated]";.
/// </summary>
public static string ConfigurationKnownPlugins {
get {
return ResourceManager.GetString("ConfigurationKnownPlugins", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Property named: "{0}", for definition: "{1}" must follow camelCase style. Example: "{2}"..
/// </summary>
public static string DefinitionsPropertiesNameCamelCase {
get {
return ResourceManager.GetString("DefinitionsPropertiesNameCamelCase", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'Delete' operation must not have a request body..
/// </summary>
public static string DeleteMustNotHaveRequestBody {
get {
return ResourceManager.GetString("DeleteMustNotHaveRequestBody", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'DELETE' operation '{0}' must use method name 'Delete'..
/// </summary>
public static string DeleteOperationNameNotValid {
get {
return ResourceManager.GetString("DeleteOperationNameNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The value provided for description is not descriptive enough. Accurate and descriptive description is essential for maintaining reference documentation..
/// </summary>
public static string DescriptionNotDescriptive {
get {
return ResourceManager.GetString("DescriptionNotDescriptive", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Directory {0} does not exist..
/// </summary>
public static string DirectoryNotExist {
get {
return ResourceManager.GetString("DirectoryNotExist", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Empty x-ms-client-name property..
/// </summary>
public static string EmptyClientName {
get {
return ResourceManager.GetString("EmptyClientName", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to {0} with name '{1}' was renamed to '{2}' because it conflicts with following entities: {3}.
/// </summary>
public static string EntityConflictTitleMessage {
get {
return ResourceManager.GetString("EntityConflictTitleMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Error generating client model: {0}.
/// </summary>
public static string ErrorGeneratingClientModel {
get {
return ResourceManager.GetString("ErrorGeneratingClientModel", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Error loading {0} assembly: {1}.
/// </summary>
public static string ErrorLoadingAssembly {
get {
return ResourceManager.GetString("ErrorLoadingAssembly", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Error saving generated code: {0}.
/// </summary>
public static string ErrorSavingGeneratedCode {
get {
return ResourceManager.GetString("ErrorSavingGeneratedCode", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Plugin {0} not found.
/// </summary>
public static string ExtensionNotFound {
get {
return ResourceManager.GetString("ExtensionNotFound", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Successfully initialized {0} Code Generator {1}.
/// </summary>
public static string GeneratorInitialized {
get {
return ResourceManager.GetString("GeneratorInitialized", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'GET' operation '{0}' must use method name 'Get' or Method name start with 'List'.
/// </summary>
public static string GetOperationNameNotValid {
get {
return ResourceManager.GetString("GetOperationNameNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Guid used at the #/Definitions/{1}/.../{0}. Usage of Guid is not recommanded. If GUIDs are absolutely required in your service, please get sign off from the Azure API review board..
/// </summary>
public static string GuidUsageNotRecommended {
get {
return ResourceManager.GetString("GuidUsageNotRecommended", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Each header parameter should have an explicit client name defined for improved code generation output quality..
/// </summary>
public static string HeaderShouldHaveClientName {
get {
return ResourceManager.GetString("HeaderShouldHaveClientName", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Permissible values for HTTP Verb are delete,get,put,patch,head,options,post. .
/// </summary>
public static string HttpVerbIsNotValid {
get {
return ResourceManager.GetString("HttpVerbIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Initializing code generator..
/// </summary>
public static string InitializingCodeGenerator {
get {
return ResourceManager.GetString("InitializingCodeGenerator", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Initializing modeler..
/// </summary>
public static string InitializingModeler {
get {
return ResourceManager.GetString("InitializingModeler", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Constraint is not supported for this type and will be ignored..
/// </summary>
public static string InvalidConstraint {
get {
return ResourceManager.GetString("InvalidConstraint", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The default value is not one of the values enumerated as valid for this element..
/// </summary>
public static string InvalidDefault {
get {
return ResourceManager.GetString("InvalidDefault", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Property name {0} cannot be used as an Identifier, as it contains only invalid characters..
/// </summary>
public static string InvalidIdentifierName {
get {
return ResourceManager.GetString("InvalidIdentifierName", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to When property is modeled as "readOnly": true then x-ms-mutability extension can only have "read" value. When property is modeled as "readOnly": false then applying x-ms-mutability extension with only "read" value is not allowed. Extension contains invalid values: '{0}'..
/// </summary>
public static string InvalidMutabilityValueForReadOnly {
get {
return ResourceManager.GetString("InvalidMutabilityValueForReadOnly", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Valid values for an x-ms-mutability extension are 'create', 'read' and 'update'. Applied extension contains invalid value(s): '{0}'..
/// </summary>
public static string InvalidMutabilityValues {
get {
return ResourceManager.GetString("InvalidMutabilityValues", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Only body parameters can have a schema defined..
/// </summary>
public static string InvalidSchemaParameter {
get {
return ResourceManager.GetString("InvalidSchemaParameter", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to '{0}' code generator does not support code generation to a single file..
/// </summary>
public static string LanguageDoesNotSupportSingleFileGeneration {
get {
return ResourceManager.GetString("LanguageDoesNotSupportSingleFileGeneration", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Since operation '{0}' response has model definition '{1}', it should be named as "list_*".
/// </summary>
public static string ListOperationsNamingWarningMessage {
get {
return ResourceManager.GetString("ListOperationsNamingWarningMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to An operation with x-ms-long-running-operation extension must have a valid terminal success status code. 200 or 201 for Put/Patch. 200, 201 or 204 for Post. 200 or 204 or both for Delete..
/// </summary>
public static string LongRunningResponseNotValid {
get {
return ResourceManager.GetString("LongRunningResponseNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to {0} lacks 'description' property. Consider adding a 'description' element. Accurate description is essential for maintaining reference documentation..
/// </summary>
public static string MissingDescription {
get {
return ResourceManager.GetString("MissingDescription", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to '{0}' is supposedly required, but no such property exists..
/// </summary>
public static string MissingRequiredProperty {
get {
return ResourceManager.GetString("MissingRequiredProperty", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Successfully initialized modeler {0} v {1}..
/// </summary>
public static string ModelerInitialized {
get {
return ResourceManager.GetString("ModelerInitialized", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to For better generated code quality, remove all references to "msdn.microsoft.com"..
/// </summary>
public static string MsdnReferencesDiscouraged {
get {
return ResourceManager.GetString("MsdnReferencesDiscouraged", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to {0} (already used in {1}).
/// </summary>
public static string NamespaceConflictReasonMessage {
get {
return ResourceManager.GetString("NamespaceConflictReasonMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Please consider changing your swagger specification to avoid naming conflicts..
/// </summary>
public static string NamingConflictsSuggestion {
get {
return ResourceManager.GetString("NamingConflictsSuggestion", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Could not find a definition for the path parameter '{0}'.
/// </summary>
public static string NoDefinitionForPathParameter {
get {
return ResourceManager.GetString("NoDefinitionForPathParameter", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Please make sure that media types other than 'application/json' are supported by your service..
/// </summary>
public static string NonAppJsonTypeNotSupported {
get {
return ResourceManager.GetString("NonAppJsonTypeNotSupported", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Only 1 underscore is permitted in the operation id, following Noun_Verb conventions..
/// </summary>
public static string OnlyOneUnderscoreAllowedInOperationId {
get {
return ResourceManager.GetString("OnlyOneUnderscoreAllowedInOperationId", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to OperationId is required for all operations. Please add it for '{0}' operation of '{1}' path..
/// </summary>
public static string OperationIdMissing {
get {
return ResourceManager.GetString("OperationIdMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Per the Noun_Verb convention for Operation Ids, the noun '{0}' should not appear after the underscore..
/// </summary>
public static string OperationIdNounInVerb {
get {
return ResourceManager.GetString("OperationIdNounInVerb", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameters "subscriptionId" and "api-version" are not allowed in the operations section, define these in the global parameters section instead.
/// </summary>
public static string OperationParametersNotAllowedMessage {
get {
return ResourceManager.GetString("OperationParametersNotAllowedMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Operations API must be implemented for '{0}'..
/// </summary>
public static string OperationsAPINotImplemented {
get {
return ResourceManager.GetString("OperationsAPINotImplemented", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameter '{0}' is not expected..
/// </summary>
public static string ParameterIsNotValid {
get {
return ResourceManager.GetString("ParameterIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameter '{0}' is required..
/// </summary>
public static string ParameterValueIsMissing {
get {
return ResourceManager.GetString("ParameterValueIsMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameter '{0}' value is not valid. Expect '{1}'.
/// </summary>
public static string ParameterValueIsNotValid {
get {
return ResourceManager.GetString("ParameterValueIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'PATCH' operation '{0}' must use method name 'Update'..
/// </summary>
public static string PatchOperationNameNotValid {
get {
return ResourceManager.GetString("PatchOperationNameNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to path cannot be null or an empty string or a string with white spaces while getting the parent directory.
/// </summary>
public static string PathCannotBeNullOrEmpty {
get {
return ResourceManager.GetString("PathCannotBeNullOrEmpty", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to {0} has different responses for PUT/GET/PATCH operations. The PUT/GET/PATCH operations must have same schema response..
/// </summary>
public static string PutGetPatchResponseInvalid {
get {
return ResourceManager.GetString("PutGetPatchResponseInvalid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to 'PUT' operation '{0}' must use method name 'Create'..
/// </summary>
public static string PutOperationNameNotValid {
get {
return ResourceManager.GetString("PutOperationNameNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to A 'Resource' definition must have x-ms-azure-resource extension enabled and set to true..
/// </summary>
public static string ResourceIsMsResourceNotValid {
get {
return ResourceManager.GetString("ResourceIsMsResourceNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to The id, name, type, location and tags properties of the Resource must be present with id, name and type as read-only.
/// </summary>
public static string ResourceModelIsNotValid {
get {
return ResourceManager.GetString("ResourceModelIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Parameter "{0}" is referenced but not defined in the global parameters section of Service Definition.
/// </summary>
public static string ServiceDefinitionParametersMissingMessage {
get {
return ResourceManager.GetString("ServiceDefinitionParametersMissingMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Sku Model is not valid. A Sku model must have 'name' property. It can also have 'tier', 'size', 'family', 'capacity' as optional properties..
/// </summary>
public static string SkuModelIsNotValid {
get {
return ResourceManager.GetString("SkuModelIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Non-HTTPS/HTTP schemes have limited support.
/// </summary>
public static string SupportedSchemesWarningMessage {
get {
return ResourceManager.GetString("SupportedSchemesWarningMessage", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Operations can not have more than one 'body' parameter. The following were found: '{0}'.
/// </summary>
public static string TooManyBodyParameters {
get {
return ResourceManager.GetString("TooManyBodyParameters", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Tracked resource '{0}' must have a get operation..
/// </summary>
public static string TrackedResourceGetOperationMissing {
get {
return ResourceManager.GetString("TrackedResourceGetOperationMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Tracked Resource failing validation is: "{0}". Validation Failed: {1}.
/// A Tracked Resource must have:
/// 1. A Get Operation
/// 2. A ListByResourceGroup operation with x-ms-pageable extension and
/// 3. A ListBySubscriptionId operation with x-ms-pageable extension.
/// 4. "type","location","tags" should not be used in the RP property bag named "properties"..
/// </summary>
public static string TrackedResourceIsNotValid {
get {
return ResourceManager.GetString("TrackedResourceIsNotValid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Tracked resource '{0}' must have patch operation that at least supports the update of tags..
/// </summary>
public static string TrackedResourcePatchOperationMissing {
get {
return ResourceManager.GetString("TrackedResourcePatchOperationMissing", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Type '{0}' name should be assembly qualified. For example 'ClassName, AssemblyName'.
/// </summary>
public static string TypeShouldBeAssemblyQualified {
get {
return ResourceManager.GetString("TypeShouldBeAssemblyQualified", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Multiple resource providers are not allowed in a single spec. More than one the resource paths were found: '{0}'..
/// </summary>
public static string UniqueResourcePaths {
get {
return ResourceManager.GetString("UniqueResourcePaths", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to '{0}' is not a known format..
/// </summary>
public static string UnknownFormat {
get {
return ResourceManager.GetString("UnknownFormat", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Value of 'x-ms-client-name' cannot be the same as '{0}' Property/Model..
/// </summary>
public static string XmsClientNameInvalid {
get {
return ResourceManager.GetString("XmsClientNameInvalid", resourceCulture);
}
}
/// <summary>
/// Looks up a localized string similar to Paths in x-ms-paths must overload a normal path in the paths section, i.e. a path in the x-ms-paths must either be same as a path in the paths section or a path in the paths sections followed by additional parameters..
/// </summary>
public static string XMSPathBaseNotInPaths {
get {
return ResourceManager.GetString("XMSPathBaseNotInPaths", resourceCulture);
}
}
}
}
| 1 | 24,332 | nit: I'm assuming you have verified this is what the error message should read like | Azure-autorest | java |
@@ -850,4 +850,4 @@ def main(argv):
return glob_result
if __name__ == "__main__":
- exit(main(sys.argv[1:]))
+ sys.exit(main(sys.argv[1:])) | 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Unit testing infrastructure for Scapy
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, getopt, imp, glob, importlib
import hashlib, copy, bz2, base64, os.path, time, traceback, zlib
from scapy.consts import WINDOWS
import scapy.modules.six as six
from scapy.modules.six.moves import range
### Util class ###
class Bunch:
__init__ = lambda self, **kw: setattr(self, '__dict__', kw)
#### Import tool ####
def import_module(name):
name = os.path.realpath(name)
thepath = os.path.dirname(name)
name = os.path.basename(name)
if name.endswith(".py"):
name = name[:-3]
f,path,desc = imp.find_module(name,[thepath])
try:
return imp.load_module(name, f, path, desc)
finally:
if f:
f.close()
#### INTERNAL/EXTERNAL FILE EMBEDDING ####
class File:
def __init__(self, name, URL, local):
self.name = name
self.local = local
self.URL = URL
def get_local(self):
return bz2.decompress(base64.decodestring(self.local))
def get_URL(self):
return self.URL
def write(self, dir):
if dir:
dir += "/"
open(dir+self.name,"wb").write(self.get_local())
# Embed a base64 encoded bziped version of js and css files
# to work if you can't reach Internet.
class External_Files:
UTscapy_js = File("UTscapy.js", "http://www.secdev.org/projects/UTscapy/UTscapy.js",
"""QlpoOTFBWSZTWWVijKQAAXxfgERUYOvAChIhBAC/79+qQAH8AFA0poANAMjQAAAG
ABo0NGEZNBo00BhgAaNDRhGTQaNNAYFURJinplGaKbRkJiekzSenqmpA0Gm1LFMp
RUklVQlK9WUTZYpNFI1IiEWEFT09Sfj5uO+qO6S5DQwKIxM92+Zku94wL6V/1KTK
an2c66Ug6SmVKy1ZIrgauxMVLF5xLH0lJRQuKlqLF10iatlTzqvw7S9eS3+h4lu3
GZyMgoOude3NJ1pQy8eo+X96IYZw+ynehsiPj73m0rnvQ3QXZ9BJQiZQYQ5/uNcl
2WOlC5vyQqV/BWsnr2NZYLYXQLDs/Bffk4ZfR4/SH6GfA5Xlek4xHNHqbSsRbREO
gueXo3kcYi94K6hSO3ldD2O/qJXOFqJ8o3TE2aQahxtQpCVUKQMvODHwu2YkaORY
ZC6gihEallcHDIAtRPScBACAJnUggYhLDX6DEko7nC9GvAw5OcEkiyDUbLdiGCzD
aXWMC2DuQ2Y6sGf6NcRuON7QSbhHsPc4KKmZ/xdyRThQkGVijKQ=""")
UTscapy_css = File("UTscapy.css","http://www.secdev.org/projects/UTscapy/UTscapy.css",
"""QlpoOTFBWSZTWTbBCNEAAE7fgHxwSB//+Cpj2QC//9/6UAR+63dxbNzO3ccmtGEk
pM0m1I9E/Qp6g9Q09TNQ9QDR6gMgAkiBFG9U9TEGRkGgABoABoBmpJkRAaAxD1AN
Gh6gNADQBzAATJgATCYJhDAEYAEiQkwIyJk0n6qenpqeoaMUeo9RgIxp6pX78kfx
Jx4MUhDHKEb2pJAYAelG1cybiZBBDipH8ocxNyHDAqTUxiQmIAEDE3ApIBUUECAT
7Lvlf4xA/sVK0QHkSlYtT0JmErdOjx1v5NONPYSjrIhQnbl1MbG5m+InMYmVAWJp
uklD9cNdmQv2YigxbEtgUrsY2pDDV/qMT2SHnHsViu2rrp2LA01YJIHZqjYCGIQN
sGNobFxAYHLqqMOj9TI2Y4GRpRCUGu82PnMnXUBgDSkTY4EfmygaqvUwbGMbPwyE
220Q4G+sDvw7+6in3CAOS634pcOEAdREUW+QqMjvWvECrGISo1piv3vqubTGOL1c
ssrFnnSfU4T6KSCbPs98HJ2yjWN4i8Bk5WrM/JmELLNeZ4vgMkA4JVQInNnWTUTe
gmMSlJd/b7JuRwiM5RUzXOBTa0e3spO/rsNJiylu0rCxygdRo2koXdSJzmUVjJUm
BOFIkUKq8LrE+oT9h2qUqqUQ25fGV7e7OFkpmZopqUi0WeIBzlXdYY0Zz+WUJUTC
RC+CIPFIYh1RkopswMAop6ZjuZKRqR0WNuV+rfuF5aCXPpxAm0F14tPyhf42zFMT
GJUMxxowJnoauRq4xGQk+2lYFxbQ0FiC43WZSyYLHMuo5NTJ92QLAgs4FgOyZQqQ
xpsGKMA0cIisNeiootpnlWQvkPzNGUTPg8jqkwTvqQLguZLKJudha1hqfBib1IfO
LNChcU6OqF+3wyPKg5Y5oSbSJPAMcRDANwmS2i9oZm6vsD1pLkWtFGbAkEjjCuEU
W1ev1IsF2UVmWYFtJkqLT708ApUBK/ig3rbJWSq7RGQd3sSrOKu3lyKzTBdkXK2a
BGLV5dS1XURdKxaRkMplLLQxsimBYZEAa8KQkYyI+4EagMqycRR7RgwtZFxJSu0T
1q5wS2JG82iETHplbNj8DYo9IkmKzNAiw4FxK8bRfIYvwrbshbEagL11AQJFsqeZ
WeXDoWEx2FMyyZRAB5QyCFnwYtwtWAQmmITY8aIM2SZyRnHH9Wi8+Sr2qyCscFYo
vzM985aHXOHAxQN2UQZbQkUv3D4Vc+lyvalAffv3Tyg4ks3a22kPXiyeCGweviNX
0K8TKasyOhGsVamTUAZBXfQVw1zmdS4rHDnbHgtIjX3DcCt6UIr0BHTYjdV0JbPj
r1APYgXihjQwM2M83AKIhwQQJv/F3JFOFCQNsEI0QA==""")
def get_local_dict(cls):
return {x: y.name for (x, y) in six.iteritems(cls.__dict__)
if isinstance(y, File)}
get_local_dict = classmethod(get_local_dict)
def get_URL_dict(cls):
return {x: y.URL for (x, y) in six.iteritems(cls.__dict__)
if isinstance(y, File)}
get_URL_dict = classmethod(get_URL_dict)
#### HELPER CLASSES FOR PARAMETRING OUTPUT FORMAT ####
class EnumClass:
def from_string(cls,x):
return cls.__dict__[x.upper()]
from_string = classmethod(from_string)
class Format(EnumClass):
TEXT = 1
ANSI = 2
HTML = 3
LATEX = 4
XUNIT = 5
#### TEST CLASSES ####
class TestClass:
def __getitem__(self, item):
return getattr(self, item)
def add_keywords(self, kws):
if isinstance(kws, six.string_types):
kws = [kws]
for kwd in kws:
if kwd.startswith('-'):
try:
self.keywords.remove(kwd[1:])
except KeyError:
pass
else:
self.keywords.add(kwd)
class TestCampaign(TestClass):
def __init__(self, title):
self.title = title
self.filename = None
self.headcomments = ""
self.campaign = []
self.keywords = set()
self.crc = None
self.sha = None
self.preexec = None
self.preexec_output = None
self.end_pos = 0
def add_testset(self, testset):
self.campaign.append(testset)
testset.keywords.update(self.keywords)
def startNum(self, beginpos):
for ts in self:
for t in ts:
t.num = beginpos
beginpos += 1
self.end_pos = beginpos
def __iter__(self):
return self.campaign.__iter__()
def all_tests(self):
for ts in self:
for t in ts:
yield t
class TestSet(TestClass):
def __init__(self, name):
self.name = name
self.tests = []
self.comments = ""
self.keywords = set()
self.crc = None
self.expand = 1
def add_test(self, test):
self.tests.append(test)
test.keywords.update(self.keywords)
def __iter__(self):
return self.tests.__iter__()
class UnitTest(TestClass):
def __init__(self, name):
self.name = name
self.test = ""
self.comments = ""
self.result = ""
self.res = True # must be True at init to have a different truth value than None
self.output = ""
self.num = -1
self.keywords = set()
self.crc = None
self.expand = 1
def decode(self):
if six.PY2:
self.test = self.test.decode("utf8", "ignore")
self.output = self.output.decode("utf8", "ignore")
self.comments = self.comments.decode("utf8", "ignore")
self.result = self.result.decode("utf8", "ignore")
def __nonzero__(self):
return self.res
__bool__ = __nonzero__
# Careful note: all data not included will be set by default.
# Use -c as first argument !!
def parse_config_file(config_path, verb=3):
"""Parse provided json to get configuration
Empty default json:
{
"testfiles": [],
"onlyfailed": false,
"verb": 2,
"dump": 0,
"crc": true,
"scapy": "scapy",
"preexec": {},
"global_preexec": "",
"outputfile": null,
"local": true,
"format": "ansi",
"num": null,
"modules": [],
"kw_ok": [],
"kw_ko": []
}
"""
import json, unicodedata
with open(config_path) as config_file:
data = json.load(config_file, encoding="utf8")
if verb > 2:
print("### Loaded config file", config_path, file=sys.stderr)
def get_if_exist(key, default):
return data[key] if key in data else default
return Bunch(testfiles=get_if_exist("testfiles", []), onlyfailed=get_if_exist("onlyfailed", False),
verb=get_if_exist("verb", 3), dump=get_if_exist("dump", 0), crc=get_if_exist("crc", 1),
scapy=get_if_exist("scapy", "scapy"), preexec=get_if_exist("preexec", {}),
global_preexec=get_if_exist("global_preexec", ""), outfile=get_if_exist("outputfile", sys.stdout),
local=get_if_exist("local", 0), num=get_if_exist("num", None), modules=get_if_exist("modules", []),
kw_ok=get_if_exist("kw_ok", []), kw_ko=get_if_exist("kw_ko", []), format=get_if_exist("format", "ansi"))
#### PARSE CAMPAIGN ####
def parse_campaign_file(campaign_file):
test_campaign = TestCampaign("Test campaign")
test_campaign.filename= campaign_file.name
testset = None
test = None
testnb = 0
for l in campaign_file.readlines():
if l[0] == '#':
continue
if l[0] == "~":
(test or testset or test_campaign).add_keywords(l[1:].split())
elif l[0] == "%":
test_campaign.title = l[1:].strip()
elif l[0] == "+":
testset = TestSet(l[1:].strip())
test_campaign.add_testset(testset)
test = None
elif l[0] == "=":
test = UnitTest(l[1:].strip())
test.num = testnb
testnb += 1
testset.add_test(test)
elif l[0] == "*":
if test is not None:
test.comments += l[1:]
elif testset is not None:
testset.comments += l[1:]
else:
test_campaign.headcomments += l[1:]
else:
if test is None:
if l.strip():
print("Unknown content [%s]" % l.strip(), file=sys.stderr)
else:
test.test += l
return test_campaign
def dump_campaign(test_campaign):
print("#"*(len(test_campaign.title)+6))
print("## %(title)s ##" % test_campaign)
print("#"*(len(test_campaign.title)+6))
if test_campaign.sha and test_campaign.crc:
print("CRC=[%(crc)s] SHA=[%(sha)s]" % test_campaign)
print("from file %(filename)s" % test_campaign)
print()
for ts in test_campaign:
if ts.crc:
print("+--[%s]%s(%s)--" % (ts.name,"-"*max(2,80-len(ts.name)-18),ts.crc))
else:
print("+--[%s]%s" % (ts.name,"-"*max(2,80-len(ts.name)-6)))
if ts.keywords:
print(" kw=%s" % ",".join(ts.keywords))
for t in ts:
print("%(num)03i %(name)s" % t)
c = k = ""
if t.keywords:
k = "kw=%s" % ",".join(t.keywords)
if t.crc:
c = "[%(crc)s] " % t
if c or k:
print(" %s%s" % (c,k))
#### COMPUTE CAMPAIGN DIGESTS ####
if six.PY2:
def crc32(x):
return "%08X" % (0xffffffff & zlib.crc32(x))
def sha1(x):
return hashlib.sha1(x).hexdigest().upper()
else:
def crc32(x):
return "%08X" % (0xffffffff & zlib.crc32(bytearray(x, "utf8")))
def sha1(x):
return hashlib.sha1(x.encode("utf8")).hexdigest().upper()
def compute_campaign_digests(test_campaign):
dc = ""
for ts in test_campaign:
dts = ""
for t in ts:
dt = t.test.strip()
t.crc = crc32(dt)
dts += "\0"+dt
ts.crc = crc32(dts)
dc += "\0\x01"+dts
test_campaign.crc = crc32(dc)
test_campaign.sha = sha1(open(test_campaign.filename).read())
#### FILTER CAMPAIGN #####
def filter_tests_on_numbers(test_campaign, num):
if num:
for ts in test_campaign:
ts.tests = [t for t in ts.tests if t.num in num]
test_campaign.campaign = [ts for ts in test_campaign.campaign
if ts.tests]
def filter_tests_keep_on_keywords(test_campaign, kw):
def kw_match(lst, kw):
for k in lst:
if k in kw:
return True
return False
if kw:
for ts in test_campaign:
ts.tests = [t for t in ts.tests if kw_match(t.keywords, kw)]
def filter_tests_remove_on_keywords(test_campaign, kw):
def kw_match(lst, kw):
for k in kw:
if k in lst:
return True
return False
if kw:
for ts in test_campaign:
ts.tests = [t for t in ts.tests if not kw_match(t.keywords, kw)]
def remove_empty_testsets(test_campaign):
test_campaign.campaign = [ts for ts in test_campaign.campaign if ts.tests]
#### RUN CAMPAIGN #####
def run_campaign(test_campaign, get_interactive_session, verb=3, ignore_globals=None):
if WINDOWS:
# Add a route to 127.0.0.1 and ::1
from scapy.arch.windows import route_add_loopback
route_add_loopback()
passed=failed=0
if test_campaign.preexec:
test_campaign.preexec_output = get_interactive_session(test_campaign.preexec.strip(), ignore_globals=ignore_globals)[0]
for testset in test_campaign:
for t in testset:
t.output,res = get_interactive_session(t.test.strip(), ignore_globals=ignore_globals)
the_res = False
try:
if res is None or res:
the_res= True
except Exception as msg:
t.output+="UTscapy: Error during result interpretation:\n"
t.output+="".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2],))
if the_res:
t.res = True
res = "passed"
passed += 1
else:
t.res = False
res = "failed"
failed += 1
t.result = res
t.decode()
if verb > 1:
print("%(result)6s %(crc)s %(name)s" % t, file=sys.stderr)
test_campaign.passed = passed
test_campaign.failed = failed
if verb:
print("Campaign CRC=%(crc)s SHA=%(sha)s" % test_campaign, file=sys.stderr)
print("PASSED=%i FAILED=%i" % (passed, failed), file=sys.stderr)
return failed
#### INFO LINES ####
def info_line(test_campaign):
filename = test_campaign.filename
if filename is None:
return "Run %s by UTscapy" % time.ctime()
else:
return "Run %s from [%s] by UTscapy" % (time.ctime(), filename)
def html_info_line(test_campaign):
filename = test_campaign.filename
if filename is None:
return """Run %s by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % time.ctime()
else:
return """Run %s from [%s] by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % (time.ctime(), filename)
#### CAMPAIGN TO something ####
def campaign_to_TEXT(test_campaign):
output="%(title)s\n" % test_campaign
output += "-- "+info_line(test_campaign)+"\n\n"
output += "Passed=%(passed)i\nFailed=%(failed)i\n\n%(headcomments)s\n" % test_campaign
for testset in test_campaign:
if any(t.expand for t in testset):
output += "######\n## %(name)s\n######\n%(comments)s\n\n" % testset
for t in testset:
if t.expand:
output += "###(%(num)03i)=[%(result)s] %(name)s\n%(comments)s\n%(output)s\n\n" % t
return output
def campaign_to_ANSI(test_campaign):
output="%(title)s\n" % test_campaign
output += "-- "+info_line(test_campaign)+"\n\n"
output += "Passed=%(passed)i\nFailed=%(failed)i\n\n%(headcomments)s\n" % test_campaign
for testset in test_campaign:
if any(t.expand for t in testset):
output += "######\n## %(name)s\n######\n%(comments)s\n\n" % testset
for t in testset:
if t.expand:
output += "###(%(num)03i)=[%(result)s] %(name)s\n%(comments)s\n%(output)s\n\n" % t
return output
def campaign_to_xUNIT(test_campaign):
output='<?xml version="1.0" encoding="UTF-8" ?>\n<testsuite>\n'
for testset in test_campaign:
for t in testset:
output += ' <testcase classname="%s"\n' % testset.name.encode("string_escape").replace('"',' ')
output += ' name="%s"\n' % t.name.encode("string_escape").replace('"',' ')
output += ' duration="0">\n' % t
if not t.res:
output += '<error><![CDATA[%(output)s]]></error>\n' % t
output += "</testcase>\n"
output += '</testsuite>'
return output
def campaign_to_HTML(test_campaign):
output = """
<h1>%(title)s</h1>
<p>
""" % test_campaign
if test_campaign.crc is not None and test_campaign.sha is not None:
output += "CRC=<span class=crc>%(crc)s</span> SHA=<span class=crc>%(sha)s</span><br>" % test_campaign
output += "<small><em>"+html_info_line(test_campaign)+"</em></small>"
output += test_campaign.headcomments + "\n<p>PASSED=%(passed)i FAILED=%(failed)i<p>\n\n" % test_campaign
for testset in test_campaign:
output += "<h2>" % testset
if testset.crc is not None:
output += "<span class=crc>%(crc)s</span> " % testset
output += "%(name)s</h2>\n%(comments)s\n<ul>\n" % testset
for t in testset:
output += """<li class=%(result)s id="tst%(num)il">\n""" % t
if t.expand == 2:
output +="""
<span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">+%(num)03i+</span>
<span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')">-%(num)03i-</span>
""" % t
else:
output += """
<span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')">+%(num)03i+</span>
<span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">-%(num)03i-</span>
""" % t
if t.crc is not None:
output += "<span class=crc>%(crc)s</span>\n" % t
output += """%(name)s\n<span class="comment %(result)s" id="tst%(num)i" """ % t
if t.expand < 2:
output += """ style="POSITION: absolute; VISIBILITY: hidden;" """
output += """><br>%(comments)s
<pre>
%(output)s</pre></span>
""" % t
output += "\n</ul>\n\n"
return output
def pack_html_campaigns(runned_campaigns, data, local=0, title=None):
output = """
<html>
<head>
<title>%(title)s</title>
<h1>UTScapy tests</h1>
<span class=button onClick="hide_all('tst')">Shrink All</span>
<span class=button onClick="show_all('tst')">Expand All</span>
<span class=button onClick="show_passed('tst')">Expand Passed</span>
<span class=button onClick="show_failed('tst')">Expand Failed</span>
<p>
"""
for test_campaign in runned_campaigns:
for ts in test_campaign:
for t in ts:
output += """<span class=button%(result)s onClick="goto_id('tst%(num)il')">%(num)03i</span>\n""" % t
output += """</p>\n\n
<link rel="stylesheet" href="%(UTscapy_css)s" type="text/css">
<script language="JavaScript" src="%(UTscapy_js)s" type="text/javascript"></script>
</head>
<body>
%(data)s
</body></html>
"""
out_dict = {'data': data, 'title': title if title else "UTScapy tests"}
if local:
External_Files.UTscapy_js.write(os.path.dirname(test_campaign.output_file.name))
External_Files.UTscapy_css.write(os.path.dirname(test_campaign.output_file.name))
out_dict.update(External_Files.get_local_dict())
else:
out_dict.update(External_Files.get_URL_dict())
output %= out_dict
return output
def campaign_to_LATEX(test_campaign):
output = r"""\documentclass{report}
\usepackage{alltt}
\usepackage{xcolor}
\usepackage{a4wide}
\usepackage{hyperref}
\title{%(title)s}
\date{%%s}
\begin{document}
\maketitle
\tableofcontents
\begin{description}
\item[Passed:] %(passed)i
\item[Failed:] %(failed)i
\end{description}
%(headcomments)s
""" % test_campaign
output %= info_line(test_campaign)
for testset in test_campaign:
output += "\\chapter{%(name)s}\n\n%(comments)s\n\n" % testset
for t in testset:
if t.expand:
output += r"""\section{%(name)s}
[%(num)03i] [%(result)s]
%(comments)s
\begin{alltt}
%(output)s
\end{alltt}
""" % t
output += "\\end{document}\n"
return output
#### USAGE ####
def usage():
print("""Usage: UTscapy [-m module] [-f {text|ansi|HTML|LaTeX}] [-o output_file]
[-t testfile] [-T testfile] [-k keywords [-k ...]] [-K keywords [-K ...]]
[-l] [-d|-D] [-F] [-q[q]] [-P preexecute_python_code]
[-s /path/to/scapy] [-c configfile]
-t\t\t: provide test files (can be used many times)
-T\t\t: if -t is used with *, remove a specific file (can be used many times)
-l\t\t: generate local files
-F\t\t: expand only failed tests
-d\t\t: dump campaign
-D\t\t: dump campaign and stop
-C\t\t: don't calculate CRC and SHA
-s\t\t: path to scapy.py
-c\t\t: load a .utsc config file
-q\t\t: quiet mode
-qq\t\t: [silent mode]
-n <testnum>\t: only tests whose numbers are given (eg. 1,3-7,12)
-m <module>\t: additional module to put in the namespace
-k <kw1>,<kw2>,...\t: include only tests with one of those keywords (can be used many times)
-K <kw1>,<kw2>,...\t: remove tests with one of those keywords (can be used many times)
-P <preexecute_python_code>
""", file=sys.stderr)
raise SystemExit
#### MAIN ####
def execute_campaign(TESTFILE, OUTPUTFILE, PREEXEC, NUM, KW_OK, KW_KO, DUMP,
FORMAT, VERB, ONLYFAILED, CRC, autorun_func, pos_begin=0, ignore_globals=None):
# Parse test file
test_campaign = parse_campaign_file(TESTFILE)
# Report parameters
if PREEXEC:
test_campaign.preexec = PREEXEC
# Compute campaign CRC and SHA
if CRC:
compute_campaign_digests(test_campaign)
# Filter out unwanted tests
filter_tests_on_numbers(test_campaign, NUM)
for k in KW_OK:
filter_tests_keep_on_keywords(test_campaign, k)
for k in KW_KO:
filter_tests_remove_on_keywords(test_campaign, k)
remove_empty_testsets(test_campaign)
# Dump campaign
if DUMP:
dump_campaign(test_campaign)
if DUMP > 1:
sys.exit()
# Run tests
test_campaign.output_file = OUTPUTFILE
result = run_campaign(test_campaign, autorun_func[FORMAT], verb=VERB, ignore_globals=None)
# Shrink passed
if ONLYFAILED:
for t in test_campaign.all_tests():
if t:
t.expand = 0
else:
t.expand = 2
pos_end = 0
# Generate report
if FORMAT == Format.TEXT:
output = campaign_to_TEXT(test_campaign)
elif FORMAT == Format.ANSI:
output = campaign_to_ANSI(test_campaign)
elif FORMAT == Format.HTML:
test_campaign.startNum(pos_begin)
output = campaign_to_HTML(test_campaign)
elif FORMAT == Format.LATEX:
output = campaign_to_LATEX(test_campaign)
elif FORMAT == Format.XUNIT:
output = campaign_to_xUNIT(test_campaign)
return output, (result == 0), test_campaign
def resolve_testfiles(TESTFILES):
for tfile in TESTFILES[:]:
if "*" in tfile:
TESTFILES.remove(tfile)
TESTFILES.extend(glob.glob(tfile))
return TESTFILES
def main(argv):
ignore_globals = list(six.moves.builtins.__dict__.keys())
# Parse arguments
FORMAT = Format.ANSI
TESTFILE = sys.stdin
OUTPUTFILE = sys.stdout
LOCAL = 0
NUM = None
KW_OK = []
KW_KO = []
DUMP = 0
CRC = True
ONLYFAILED = False
VERB = 3
GLOB_PREEXEC = ""
PREEXEC_DICT = {}
SCAPY = "scapy"
MODULES = []
TESTFILES = []
try:
opts = getopt.getopt(argv, "o:t:T:c:f:hln:m:k:K:DdCFqP:s:")
for opt,optarg in opts[0]:
if opt == "-h":
usage()
elif opt == "-F":
ONLYFAILED = True
elif opt == "-q":
VERB -= 1
elif opt == "-D":
DUMP = 2
elif opt == "-d":
DUMP = 1
elif opt == "-C":
CRC = False
elif opt == "-s":
SCAPY = optarg
elif opt == "-P":
GLOB_PREEXEC += "\n"+optarg
elif opt == "-f":
try:
FORMAT = Format.from_string(optarg)
except KeyError as msg:
raise getopt.GetoptError("Unknown output format %s" % msg)
elif opt == "-t":
TESTFILES.append(optarg)
TESTFILES = resolve_testfiles(TESTFILES)
elif opt == "-T":
TESTFILES.remove(optarg)
elif opt == "-c":
data = parse_config_file(optarg, VERB)
ONLYFAILED = data.onlyfailed
VERB = data.verb
DUMP = data.dump
CRC = data.crc
SCAPY = data.scapy
PREEXEC_DICT = data.preexec
GLOB_PREEXEC = data.global_preexec
OUTPUTFILE = data.outfile
TESTFILES = data.testfiles
LOCAL = 1 if data.local else 0
NUM = data.num
MODULES = data.modules
KW_OK = [data.kw_ok]
KW_KO = [data.kw_ko]
try:
FORMAT = Format.from_string(data.format)
except KeyError as msg:
raise getopt.GetoptError("Unknown output format %s" % msg)
TESTFILES = resolve_testfiles(TESTFILES)
elif opt == "-o":
OUTPUTFILE = open(optarg, "wb")
elif opt == "-l":
LOCAL = 1
elif opt == "-n":
NUM = []
for v in (x.strip() for x in optarg.split(",")):
try:
NUM.append(int(v))
except ValueError:
v1, v2 = [int(e) for e in v.split('-', 1)]
NUM.extend(range(v1, v2 + 1))
elif opt == "-m":
MODULES.append(optarg)
elif opt == "-k":
KW_OK.append(optarg.split(","))
elif opt == "-K":
KW_KO.append(optarg.split(","))
if VERB > 2:
print("### Booting scapy...", file=sys.stderr)
try:
from scapy import all as scapy
except ImportError as e:
raise getopt.GetoptError("cannot import [%s]: %s" % (SCAPY,e))
for m in MODULES:
try:
mod = import_module(m)
six.moves.builtins.__dict__.update(mod.__dict__)
except ImportError as e:
raise getopt.GetoptError("cannot import [%s]: %s" % (m,e))
except getopt.GetoptError as msg:
print("ERROR:",msg, file=sys.stderr)
raise SystemExit
autorun_func = {
Format.TEXT: scapy.autorun_get_text_interactive_session,
Format.ANSI: scapy.autorun_get_ansi_interactive_session,
Format.HTML: scapy.autorun_get_html_interactive_session,
Format.LATEX: scapy.autorun_get_latex_interactive_session,
Format.XUNIT: scapy.autorun_get_text_interactive_session,
}
if VERB > 2:
print("### Starting tests...", file=sys.stderr)
glob_output = ""
glob_result = 0
glob_title = None
UNIQUE = len(TESTFILES) == 1
# Resolve tags and asterix
for prex in six.iterkeys(copy.copy(PREEXEC_DICT)):
if "*" in prex:
pycode = PREEXEC_DICT[prex]
del PREEXEC_DICT[prex]
for gl in glob.iglob(prex):
_pycode = pycode.replace("%name%", os.path.splitext(os.path.split(gl)[1])[0])
PREEXEC_DICT[gl] = _pycode
pos_begin = 0
runned_campaigns = []
# Execute all files
for TESTFILE in TESTFILES:
if VERB > 2:
print("### Loading:", TESTFILE, file=sys.stderr)
PREEXEC = PREEXEC_DICT[TESTFILE] if TESTFILE in PREEXEC_DICT else GLOB_PREEXEC
output, result, campaign = execute_campaign(open(TESTFILE), OUTPUTFILE,
PREEXEC, NUM, KW_OK, KW_KO,
DUMP, FORMAT, VERB, ONLYFAILED,
CRC, autorun_func, pos_begin, ignore_globals)
runned_campaigns.append(campaign)
pos_begin = campaign.end_pos
if UNIQUE:
glob_title = campaign.title
glob_output += output
if not result:
glob_result = 1
break
if VERB > 2:
print("### Writing output...", file=sys.stderr)
# Concenate outputs
if FORMAT == Format.HTML:
glob_output = pack_html_campaigns(runned_campaigns, glob_output, LOCAL, glob_title)
OUTPUTFILE.write(glob_output.encode("utf8", "ignore"))
OUTPUTFILE.close()
# Return state
return glob_result
if __name__ == "__main__":
exit(main(sys.argv[1:]))
| 1 | 10,981 | Why do you need that? (real question) | secdev-scapy | py |
@@ -1788,7 +1788,7 @@ static void skipCppTemplateParameterList (void)
}
}
else if(CollectingSignature)
- vStringPut (Signature, x);
+ vStringPut (Signature, c);
}
else if (c == '>')
{ | 1 | /*
* Copyright (c) 1996-2003, Darren Hiebert
*
* This source code is released for free distribution under the terms of the
* GNU General Public License version 2 or (at your option) any later version.
*
* This module contains functions for parsing and scanning C, C++, C#, D and Java
* source files.
*/
/*
* INCLUDE FILES
*/
#include "general.h" /* must always come first */
#include <string.h>
#include <setjmp.h>
#include "debug.h"
#include "entry.h"
#include "get.h"
#include "keyword.h"
#include "options.h"
#include "parse.h"
#include "read.h"
#include "routines.h"
#include "selectors.h"
#include "xtag.h"
/*
* MACROS
*/
#define activeToken(st) ((st)->token [(int) (st)->tokenIndex])
#define parentDecl(st) ((st)->parent == NULL ? \
DECL_NONE : (st)->parent->declaration)
#define isType(token,t) (boolean) ((token)->type == (t))
#define insideEnumBody(st) ((st)->parent == NULL ? FALSE : \
(boolean) ((st)->parent->declaration == DECL_ENUM))
#define insideAnnotationBody(st) ((st)->parent == NULL ? FALSE : \
(boolean) ((st)->parent->declaration == DECL_ANNOTATION))
#define insideInterfaceBody(st) ((st)->parent == NULL ? FALSE : \
(boolean) ((st)->parent->declaration == DECL_INTERFACE))
#define isSignalDirection(token) (boolean)(( (token)->keyword == KEYWORD_INPUT ) ||\
( (token)->keyword == KEYWORD_OUTPUT ) ||\
( (token)->keyword == KEYWORD_INOUT ) )
#define isExternCDecl(st,c) (boolean) ((c) == STRING_SYMBOL && \
! (st)->haveQualifyingName && (st)->scope == SCOPE_EXTERN)
#define isOneOf(c,s) (boolean) (strchr ((s), (c)) != NULL)
#define isHighChar(c) ((c) != EOF && (unsigned int)(c) >= 0xc0 && \
(unsigned int)(c) <= 0xff)
/*
* DATA DECLARATIONS
*/
enum { NumTokens = 3 };
typedef enum eException {
ExceptionNone, ExceptionEOF, ExceptionFormattingError,
ExceptionBraceFormattingError
} exception_t;
/* Used to specify type of keyword.
*/
typedef enum eKeywordId {
KEYWORD_NONE = -1,
KEYWORD_ALIAS, KEYWORD_ATTRIBUTE, KEYWORD_ABSTRACT,
KEYWORD_BOOLEAN, KEYWORD_BYTE, KEYWORD_BAD_STATE, KEYWORD_BAD_TRANS,
KEYWORD_BIND, KEYWORD_BIND_VAR, KEYWORD_BIT,
KEYWORD_CASE, KEYWORD_CATCH, KEYWORD_CHAR, KEYWORD_CLASS, KEYWORD_CLOCK, KEYWORD_CONST,
KEYWORD_CONSTRAINT, KEYWORD_COVERAGE_BLOCK, KEYWORD_COVERAGE_DEF,
KEYWORD_DEFAULT, KEYWORD_DELEGATE, KEYWORD_DELETE, KEYWORD_DO,
KEYWORD_DOUBLE,
KEYWORD_ELSE, KEYWORD_ENUM, KEYWORD_EXPLICIT, KEYWORD_EXTERN,
KEYWORD_EXTENDS, KEYWORD_EVENT,
KEYWORD_FINAL, KEYWORD_FLOAT, KEYWORD_FOR, KEYWORD_FOREACH,
KEYWORD_FRIEND, KEYWORD_FUNCTION,
KEYWORD_GOTO,
KEYWORD_HDL_NODE,
KEYWORD_IF, KEYWORD_IMPLEMENTS, KEYWORD_IMPORT, KEYWORD_INLINE, KEYWORD_INT,
KEYWORD_INOUT, KEYWORD_INPUT, KEYWORD_INTEGER, KEYWORD_INTERFACE,
KEYWORD_INTERNAL,
KEYWORD_LOCAL, KEYWORD_LONG,
KEYWORD_M_BAD_STATE, KEYWORD_M_BAD_TRANS, KEYWORD_M_STATE, KEYWORD_M_TRANS,
KEYWORD_MUTABLE,
KEYWORD_NAMESPACE, KEYWORD_NEW, KEYWORD_NEWCOV, KEYWORD_NATIVE,
KEYWORD_NHOLD, KEYWORD_NOEXCEPT, KEYWORD_NSAMPLE,
KEYWORD_OPERATOR, KEYWORD_OUTPUT, KEYWORD_OVERLOAD, KEYWORD_OVERRIDE,
KEYWORD_PACKED, KEYWORD_PORT, KEYWORD_PACKAGE, KEYWORD_PHOLD, KEYWORD_PRIVATE,
KEYWORD_PROGRAM, KEYWORD_PROTECTED, KEYWORD_PSAMPLE, KEYWORD_PUBLIC,
KEYWORD_REGISTER, KEYWORD_RETURN,
KEYWORD_SHADOW, KEYWORD_STATE,
KEYWORD_SHORT, KEYWORD_SIGNED, KEYWORD_STATIC, KEYWORD_STRING,
KEYWORD_STRUCT, KEYWORD_SWITCH, KEYWORD_SYNCHRONIZED,
KEYWORD_TASK, KEYWORD_TEMPLATE, KEYWORD_THIS, KEYWORD_THROW,
KEYWORD_THROWS, KEYWORD_TRANSIENT, KEYWORD_TRANS, KEYWORD_TRANSITION,
KEYWORD_TRY, KEYWORD_TYPEDEF, KEYWORD_TYPENAME,
KEYWORD_UINT, KEYWORD_ULONG, KEYWORD_UNION, KEYWORD_UNSIGNED, KEYWORD_USHORT,
KEYWORD_USING,
KEYWORD_VIRTUAL, KEYWORD_VOID, KEYWORD_VOLATILE,
KEYWORD_WCHAR_T, KEYWORD_WHILE,
KEYWORD_ALIGN, KEYWORD_ASM, KEYWORD_ASSERT, KEYWORD_AUTO,
KEYWORD_BODY, KEYWORD_BOOL, KEYWORD_BREAK, KEYWORD_CAST,
KEYWORD_CDOUBLE, KEYWORD_CENT, KEYWORD_CFLOAT, KEYWORD_CONTINUE,
KEYWORD_CREAL, KEYWORD_DCHAR, KEYWORD_DEBUG,
KEYWORD_DEPRECATED, KEYWORD_EXPORT, KEYWORD_FALSE, KEYWORD_FINALLY,
KEYWORD_FOREACH_REVERSE, KEYWORD_IDOUBLE, KEYWORD_IFLOAT,
KEYWORD_IN, KEYWORD_INVARIANT, KEYWORD_IREAL, KEYWORD_IS,
KEYWORD_LAZY, KEYWORD_MIXIN, KEYWORD_MODULE, KEYWORD_NULL,
KEYWORD_OUT, KEYWORD_PRAGMA, KEYWORD_REAL, KEYWORD_SCOPE,
KEYWORD_SUPER, KEYWORD_TRUE, KEYWORD_TYPEID, KEYWORD_TYPEOF,
KEYWORD_UBYTE, KEYWORD_UCENT, KEYWORD_UNITTEST, KEYWORD_VERSION,
KEYWORD_WCHAR, KEYWORD_WITH
} keywordId;
/* Used to determine whether keyword is valid for the current language and
* what its ID is.
*/
typedef struct sKeywordDesc {
const char *name;
keywordId id;
short isValid [6]; /* indicates languages for which kw is valid */
} keywordDesc;
/* Used for reporting the type of object parsed by nextToken ().
*/
typedef enum eTokenType {
TOKEN_NONE, /* none */
TOKEN_ARGS, /* a parenthetical pair and its contents */
TOKEN_BRACE_CLOSE,
TOKEN_BRACE_OPEN,
TOKEN_COLON, /* the colon character */
TOKEN_COMMA, /* the comma character */
TOKEN_DOUBLE_COLON, /* double colon indicates nested-name-specifier */
TOKEN_KEYWORD,
TOKEN_NAME, /* an unknown name */
TOKEN_PACKAGE, /* a Java package name */
TOKEN_PAREN_NAME, /* a single name in parentheses */
TOKEN_SEMICOLON, /* the semicolon character */
TOKEN_SPEC, /* a storage class specifier, qualifier, type, etc. */
TOKEN_COUNT
} tokenType;
/* This describes the scoping of the current statement.
*/
typedef enum eTagScope {
SCOPE_GLOBAL, /* no storage class specified */
SCOPE_STATIC, /* static storage class */
SCOPE_EXTERN, /* external storage class */
SCOPE_FRIEND, /* declares access only */
SCOPE_TYPEDEF, /* scoping depends upon context */
SCOPE_COUNT
} tagScope;
typedef enum eDeclaration {
DECL_NONE,
DECL_BASE, /* base type (default) */
DECL_CLASS,
DECL_ENUM,
DECL_EVENT,
DECL_FUNCTION,
DECL_FUNCTION_TEMPLATE, /* D-only */
DECL_IGNORE, /* non-taggable "declaration" */
DECL_INTERFACE,
DECL_MIXIN,
DECL_NAMESPACE,
DECL_NOMANGLE, /* C++ name demangling block */
DECL_PACKAGE,
DECL_PACKAGEREF,
DECL_PRIVATE,
DECL_PROGRAM, /* Vera program */
DECL_PROTECTED,
DECL_PUBLIC,
DECL_STRUCT,
DECL_TASK, /* Vera task */
DECL_TEMPLATE, /* D-only */
DECL_UNION,
DECL_USING,
DECL_VERSION, /* D conditional compile */
DECL_ANNOTATION, /* Java annotation */
DECL_COUNT
} declType;
typedef enum eVisibilityType {
ACCESS_UNDEFINED,
ACCESS_LOCAL,
ACCESS_PRIVATE,
ACCESS_PROTECTED,
ACCESS_PUBLIC,
ACCESS_DEFAULT, /* Java-specific */
ACCESS_COUNT
} accessType;
/* Information about the parent class of a member (if any).
*/
typedef struct sMemberInfo {
accessType access; /* access of current statement */
accessType accessDefault; /* access default for current statement */
} memberInfo;
typedef struct sTokenInfo {
tokenType type;
keywordId keyword;
vString* name; /* the name of the token */
unsigned long lineNumber; /* line number of tag */
fpos_t filePosition; /* file position of line containing name */
} tokenInfo;
typedef enum eImplementation {
IMP_DEFAULT,
IMP_ABSTRACT,
IMP_VIRTUAL,
IMP_PURE_VIRTUAL,
IMP_COUNT
} impType;
/* Describes the statement currently undergoing analysis.
*/
typedef struct sStatementInfo {
tagScope scope;
declType declaration; /* specifier associated with TOKEN_SPEC */
boolean gotName; /* was a name parsed yet? */
boolean haveQualifyingName; /* do we have a name we are considering? */
boolean gotParenName; /* was a name inside parentheses parsed yet? */
boolean gotArgs; /* was a list of parameters parsed yet? */
boolean isPointer; /* is 'name' a pointer? */
boolean inFunction; /* are we inside of a function? */
boolean assignment; /* have we handled an '='? */
boolean notVariable; /* has a variable declaration been disqualified ? */
impType implementation; /* abstract or concrete implementation? */
unsigned int tokenIndex; /* currently active token */
tokenInfo* token [(int) NumTokens];
tokenInfo* context; /* accumulated scope of current statement */
tokenInfo* blockName; /* name of current block */
memberInfo member; /* information regarding parent class/struct */
vString* parentClasses; /* parent classes */
struct sStatementInfo *parent; /* statement we are nested within */
} statementInfo;
/* Describes the type of tag being generated.
*/
typedef enum eTagType {
TAG_UNDEFINED,
TAG_CLASS, /* class name */
TAG_ENUM, /* enumeration name */
TAG_ENUMERATOR, /* enumerator (enumeration value) */
TAG_EVENT, /* event */
TAG_FIELD, /* field (Java) */
TAG_FUNCTION, /* function definition */
TAG_INTERFACE, /* interface declaration */
TAG_LOCAL, /* local variable definition */
TAG_MEMBER, /* structure, class or interface member */
TAG_METHOD, /* method declaration */
TAG_MIXIN, /* D mixin */
TAG_NAMESPACE, /* namespace name */
TAG_PACKAGE, /* package name / D module name */
TAG_PACKAGEREF, /* referenced package name */
TAG_PROGRAM, /* program name */
TAG_PROPERTY, /* property name */
TAG_PROTOTYPE, /* function prototype or declaration */
TAG_SIGNAL, /* VERA signal name */
TAG_STRUCT, /* structure name */
TAG_TASK, /* task name */
TAG_TYPEDEF, /* typedef name / D alias name */
TAG_TEMPLATE, /* D template name */
TAG_UNION, /* union name */
TAG_VARIABLE, /* variable definition */
TAG_EXTERN_VAR, /* external variable declaration */
TAG_VERSION, /* conditional template compilation */
TAG_LABEL, /* goto label */
TAG_ANNOTATION, /* Java annotation definition */
TAG_COUNT /* must be last */
} tagType;
typedef struct sParenInfo {
boolean isPointer;
boolean isParamList;
boolean isKnrParamList;
boolean isNameCandidate;
boolean invalidContents;
boolean nestedArgs;
unsigned int parameterCount;
} parenInfo;
/*
* DATA DEFINITIONS
*/
static jmp_buf Exception;
static langType Lang_c;
static langType Lang_cpp;
static langType Lang_csharp;
static langType Lang_d;
static langType Lang_java;
static langType Lang_vera;
static vString *Signature;
static boolean CollectingSignature;
/* Number used to uniquely identify anonymous structs and unions. */
static int AnonymousID = 0;
#define COMMONK_UNDEFINED -1
/* Used to index into the CKinds table. */
typedef enum {
CR_MACRO_UNDEF,
} cMacroRole;
static roleDesc CMacroRoles [] = {
RoleTemplateUndef,
};
typedef enum {
CR_HEADER_SYSTEM,
CR_HEADER_LOCAL,
} cHeaderRole;
static roleDesc CHeaderRoles [] = {
RoleTemplateSystem,
RoleTemplateLocal,
};
typedef enum {
CK_UNDEFINED = COMMONK_UNDEFINED,
CK_CLASS, CK_DEFINE, CK_ENUMERATOR, CK_FUNCTION,
CK_ENUMERATION, CK_HEADER, CK_LOCAL, CK_MEMBER, CK_NAMESPACE, CK_PROTOTYPE,
CK_STRUCT, CK_TYPEDEF, CK_UNION, CK_VARIABLE,
CK_EXTERN_VARIABLE, CK_LABEL
} cKind;
static kindOption CKinds [] = {
{ TRUE, 'c', "class", "classes"},
{ TRUE, 'd', "macro", "macro definitions",
.referenceOnly = FALSE, ATTACH_ROLES(CMacroRoles)},
{ TRUE, 'e', "enumerator", "enumerators (values inside an enumeration)"},
{ TRUE, 'f', "function", "function definitions"},
{ TRUE, 'g', "enum", "enumeration names"},
{ FALSE, 'h', "header", "included header files",
.referenceOnly = TRUE, ATTACH_ROLES(CHeaderRoles)},
{ FALSE, 'l', "local", "local variables"},
{ TRUE, 'm', "member", "class, struct, and union members"},
{ TRUE, 'n', "namespace", "namespaces"},
{ FALSE, 'p', "prototype", "function prototypes"},
{ TRUE, 's', "struct", "structure names"},
{ TRUE, 't', "typedef", "typedefs"},
{ TRUE, 'u', "union", "union names"},
{ TRUE, 'v', "variable", "variable definitions"},
{ FALSE, 'x', "externvar", "external and forward variable declarations"},
{ FALSE, 'L', "label", "goto label"},
};
typedef enum {
CSK_UNDEFINED = COMMONK_UNDEFINED,
CSK_CLASS, CSK_DEFINE, CSK_ENUMERATOR, CSK_EVENT, CSK_FIELD,
CSK_ENUMERATION, CSK_INTERFACE, CSK_LOCAL, CSK_METHOD,
CSK_NAMESPACE, CSK_PROPERTY, CSK_STRUCT, CSK_TYPEDEF
} csharpKind;
static kindOption CsharpKinds [] = {
{ TRUE, 'c', "class", "classes"},
{ TRUE, 'd', "macro", "macro definitions"},
{ TRUE, 'e', "enumerator", "enumerators (values inside an enumeration)"},
{ TRUE, 'E', "event", "events"},
{ TRUE, 'f', "field", "fields"},
{ TRUE, 'g', "enum", "enumeration names"},
{ TRUE, 'i', "interface", "interfaces"},
{ FALSE, 'l', "local", "local variables"},
{ TRUE, 'm', "method", "methods"},
{ TRUE, 'n', "namespace", "namespaces"},
{ TRUE, 'p', "property", "properties"},
{ TRUE, 's', "struct", "structure names"},
{ TRUE, 't', "typedef", "typedefs"},
};
typedef enum
{
DK_UNDEFINED = COMMONK_UNDEFINED,
DK_ALIAS, DK_CLASS, DK_ENUMERATION, DK_ENUMERATOR, DK_EXTERN_VARIABLE, DK_FUNCTION,
DK_INTERFACE, DK_LOCAL, DK_MEMBER, DK_MIXIN, DK_MODULE, DK_NAMESPACE,
DK_PROTOTYPE, DK_STRUCT, DK_TEMPLATE, DK_UNION,
DK_VARIABLE, DK_VERSION
} dKind;
static kindOption DKinds [] = {
{ TRUE, 'a', "alias", "aliases"},
{ TRUE, 'c', "class", "classes"},
{ TRUE, 'g', "enum", "enumeration names"},
{ TRUE, 'e', "enumerator", "enumerators (values inside an enumeration)"},
{ FALSE, 'x', "externvar", "external variable declarations"},
{ TRUE, 'f', "function", "function definitions"},
{ TRUE, 'i', "interface", "interfaces"},
{ FALSE, 'l', "local", "local variables"},
{ TRUE, 'm', "member", "class, struct, and union members"},
{ TRUE, 'X', "mixin", "mixins"},
{ TRUE, 'M', "module", "modules"},
{ TRUE, 'n', "namespace", "namespaces"},
{ FALSE, 'p', "prototype", "function prototypes"},
{ TRUE, 's', "struct", "structure names"},
{ TRUE, 'T', "template", "templates"},
{ TRUE, 'u', "union", "union names"},
{ TRUE, 'v', "variable", "variable definitions"},
{ TRUE, 'V', "version", "version statements"}
};
/* Used to index into the JavaKinds table. */
typedef enum {
JAVAR_PACKAGE_IMPORTED,
} javaPackageRole;
static roleDesc JavaPackageRoles [] = {
{ TRUE, "imported", "imported package"},
};
typedef enum {
JK_UNDEFINED = COMMONK_UNDEFINED,
JK_CLASS, JK_ENUM_CONSTANT, JK_FIELD, JK_ENUM, JK_INTERFACE,
JK_LOCAL, JK_METHOD, JK_PACKAGE, JK_ACCESS, JK_CLASS_PREFIX
} javaKind;
static kindOption JavaKinds [] = {
{ TRUE, 'c', "class", "classes"},
{ TRUE, 'e', "enumConstant", "enum constants"},
{ TRUE, 'f', "field", "fields"},
{ TRUE, 'g', "enum", "enum types"},
{ TRUE, 'i', "interface", "interfaces"},
{ FALSE, 'l', "local", "local variables"},
{ TRUE, 'm', "method", "methods"},
{ TRUE, 'p', "package", "packages",
.referenceOnly = FALSE, ATTACH_ROLES(JavaPackageRoles)},
};
/* Used to index into the VeraKinds table. */
typedef enum {
VR_MACRO_UNDEF,
} veraMacroRole;
static roleDesc VeraMacroRoles [] = {
RoleTemplateUndef,
};
typedef enum {
VR_HEADER_SYSTEM,
VR_HEADER_LOCAL,
} veraHeaderRole;
static roleDesc VeraHeaderRoles [] = {
RoleTemplateSystem,
RoleTemplateLocal,
};
typedef enum {
VK_UNDEFINED = COMMONK_UNDEFINED,
VK_CLASS, VK_DEFINE, VK_ENUMERATOR, VK_FUNCTION,
VK_ENUMERATION, VK_INTERFACE, VK_LOCAL, VK_MEMBER, VK_PROGRAM, VK_PROTOTYPE,
VK_SIGNAL, VK_TASK, VK_TYPEDEF, VK_VARIABLE,
VK_EXTERN_VARIABLE, VK_HEADER
} veraKind;
static kindOption VeraKinds [] = {
{ TRUE, 'c', "class", "classes"},
{ TRUE, 'd', "macro", "macro definitions",
.referenceOnly = FALSE, ATTACH_ROLES(VeraMacroRoles)},
{ TRUE, 'e', "enumerator", "enumerators (values inside an enumeration)"},
{ TRUE, 'f', "function", "function definitions"},
{ TRUE, 'g', "enum", "enumeration names"},
{ TRUE, 'i', "interface", "interfaces"},
{ FALSE, 'l', "local", "local variables"},
{ TRUE, 'm', "member", "class, struct, and union members"},
{ TRUE, 'p', "program", "programs"},
{ FALSE, 'P', "prototype", "function prototypes"},
{ TRUE, 's', "signal", "signals"},
{ TRUE, 't', "task", "tasks"},
{ TRUE, 'T', "typedef", "typedefs"},
{ TRUE, 'v', "variable", "variable definitions"},
{ FALSE, 'x', "externvar", "external variable declarations"},
{ FALSE, 'h', "header", "included header files",
.referenceOnly = FALSE, ATTACH_ROLES(VeraHeaderRoles)},
};
static const keywordDesc KeywordTable [] = {
/* C++ D */
/* ANSI C | C# | Java */
/* | | | | | Vera */
/* keyword keyword ID | | | | | | */
{ "__attribute__", KEYWORD_ATTRIBUTE, { 1, 1, 1, 1, 0, 0 } },
{ "abstract", KEYWORD_ABSTRACT, { 0, 0, 1, 1, 1, 0 } },
{ "alias", KEYWORD_ALIAS, { 0, 0, 0, 1, 0, 0 } },
{ "align", KEYWORD_ALIGN, { 0, 0, 0, 1, 0, 0 } },
{ "asm", KEYWORD_ASM, { 0, 0, 0, 1, 0, 0 } },
{ "assert", KEYWORD_ASSERT, { 0, 0, 0, 1, 0, 0 } },
{ "auto", KEYWORD_AUTO, { 0, 0, 0, 1, 0, 0 } },
{ "bad_state", KEYWORD_BAD_STATE, { 0, 0, 0, 0, 0, 1 } },
{ "bad_trans", KEYWORD_BAD_TRANS, { 0, 0, 0, 0, 0, 1 } },
{ "bind", KEYWORD_BIND, { 0, 0, 0, 0, 0, 1 } },
{ "bind_var", KEYWORD_BIND_VAR, { 0, 0, 0, 0, 0, 1 } },
{ "bit", KEYWORD_BIT, { 0, 0, 0, 0, 0, 1 } },
{ "body", KEYWORD_BODY, { 0, 0, 0, 1, 0, 0 } },
{ "bool", KEYWORD_BOOL, { 0, 0, 0, 1, 0, 0 } },
{ "boolean", KEYWORD_BOOLEAN, { 0, 0, 0, 0, 1, 0 } },
{ "break", KEYWORD_BREAK, { 0, 0, 0, 1, 0, 0 } },
{ "byte", KEYWORD_BYTE, { 0, 0, 0, 1, 1, 0 } },
{ "case", KEYWORD_CASE, { 1, 1, 1, 1, 1, 0 } },
{ "cast", KEYWORD_CAST, { 0, 0, 0, 1, 0, 0 } },
{ "catch", KEYWORD_CATCH, { 0, 1, 1, 1, 1, 0 } },
{ "cdouble", KEYWORD_CDOUBLE, { 0, 0, 0, 1, 0, 0 } },
{ "cent", KEYWORD_CENT, { 0, 0, 0, 1, 0, 0 } },
{ "cfloat", KEYWORD_CFLOAT, { 0, 0, 0, 1, 0, 0 } },
{ "char", KEYWORD_CHAR, { 1, 1, 1, 1, 1, 0 } },
{ "class", KEYWORD_CLASS, { 0, 1, 1, 1, 1, 1 } },
{ "CLOCK", KEYWORD_CLOCK, { 0, 0, 0, 0, 0, 1 } },
{ "const", KEYWORD_CONST, { 1, 1, 1, 1, 1, 0 } },
{ "constraint", KEYWORD_CONSTRAINT, { 0, 0, 0, 0, 0, 1 } },
{ "continue", KEYWORD_CONTINUE, { 0, 0, 0, 1, 0, 0 } },
{ "coverage_block", KEYWORD_COVERAGE_BLOCK, { 0, 0, 0, 0, 0, 1 } },
{ "coverage_def", KEYWORD_COVERAGE_DEF, { 0, 0, 0, 0, 0, 1 } },
{ "creal", KEYWORD_CREAL, { 0, 0, 0, 1, 0, 0 } },
{ "dchar", KEYWORD_DCHAR, { 0, 0, 0, 1, 0, 0 } },
{ "debug", KEYWORD_DEBUG, { 0, 0, 0, 1, 0, 0 } },
{ "default", KEYWORD_DEFAULT, { 1, 1, 1, 1, 1, 0 } },
{ "delegate", KEYWORD_DELEGATE, { 0, 0, 1, 1, 0, 0 } },
{ "delete", KEYWORD_DELETE, { 0, 1, 0, 1, 0, 0 } },
{ "deprecated", KEYWORD_DEPRECATED, { 0, 0, 0, 1, 0, 0 } },
{ "do", KEYWORD_DO, { 1, 1, 1, 1, 1, 0 } },
{ "double", KEYWORD_DOUBLE, { 1, 1, 1, 1, 1, 0 } },
{ "else", KEYWORD_ELSE, { 1, 1, 1, 1, 1, 0 } },
{ "enum", KEYWORD_ENUM, { 1, 1, 1, 1, 1, 1 } },
{ "event", KEYWORD_EVENT, { 0, 0, 1, 0, 0, 1 } },
{ "explicit", KEYWORD_EXPLICIT, { 0, 1, 1, 1, 0, 0 } },
{ "export", KEYWORD_EXPORT, { 0, 0, 0, 1, 0, 0 } },
{ "extends", KEYWORD_EXTENDS, { 0, 0, 0, 0, 1, 1 } },
{ "extern", KEYWORD_EXTERN, { 1, 1, 1, 1, 0, 1 } },
{ "false", KEYWORD_FALSE, { 0, 0, 0, 1, 0, 0 } },
{ "final", KEYWORD_FINAL, { 0, 0, 0, 1, 1, 0 } },
{ "finally", KEYWORD_FINALLY, { 0, 0, 0, 1, 0, 0 } },
{ "float", KEYWORD_FLOAT, { 1, 1, 1, 1, 1, 0 } },
{ "for", KEYWORD_FOR, { 1, 1, 1, 1, 1, 0 } },
{ "foreach", KEYWORD_FOREACH, { 0, 0, 1, 1, 0, 0 } },
{ "foreach_reverse", KEYWORD_FOREACH_REVERSE, { 0, 0, 0, 1, 0, 0 } },
{ "friend", KEYWORD_FRIEND, { 0, 1, 0, 1, 0, 0 } },
{ "function", KEYWORD_FUNCTION, { 0, 0, 0, 1, 0, 1 } },
{ "goto", KEYWORD_GOTO, { 1, 1, 1, 1, 1, 0 } },
{ "hdl_node", KEYWORD_HDL_NODE, { 0, 0, 0, 0, 0, 1 } },
{ "idouble", KEYWORD_IDOUBLE, { 0, 0, 0, 1, 0, 0 } },
{ "if", KEYWORD_IF, { 1, 1, 1, 1, 1, 0 } },
{ "ifloat", KEYWORD_IFLOAT, { 0, 0, 0, 1, 0, 0 } },
{ "implements", KEYWORD_IMPLEMENTS, { 0, 0, 0, 0, 1, 0 } },
{ "import", KEYWORD_IMPORT, { 0, 0, 0, 1, 1, 0 } },
{ "in", KEYWORD_IN, { 0, 0, 0, 1, 0, 0 } },
{ "inline", KEYWORD_INLINE, { 0, 1, 0, 1, 0, 0 } },
{ "inout", KEYWORD_INOUT, { 0, 0, 0, 1, 0, 1 } },
{ "input", KEYWORD_INPUT, { 0, 0, 0, 0, 0, 1 } },
{ "int", KEYWORD_INT, { 1, 1, 1, 1, 1, 0 } },
{ "integer", KEYWORD_INTEGER, { 0, 0, 0, 0, 0, 1 } },
{ "interface", KEYWORD_INTERFACE, { 0, 0, 1, 1, 1, 1 } },
{ "internal", KEYWORD_INTERNAL, { 0, 0, 1, 0, 0, 0 } },
{ "invariant", KEYWORD_INVARIANT, { 0, 0, 0, 1, 0, 0 } },
{ "ireal", KEYWORD_IREAL, { 0, 0, 0, 1, 0, 0 } },
{ "is", KEYWORD_IS, { 0, 0, 0, 1, 0, 0 } },
{ "lazy", KEYWORD_LAZY, { 0, 0, 0, 1, 0, 0 } },
{ "local", KEYWORD_LOCAL, { 0, 0, 0, 0, 0, 1 } },
{ "long", KEYWORD_LONG, { 1, 1, 1, 1, 1, 0 } },
{ "m_bad_state", KEYWORD_M_BAD_STATE, { 0, 0, 0, 0, 0, 1 } },
{ "m_bad_trans", KEYWORD_M_BAD_TRANS, { 0, 0, 0, 0, 0, 1 } },
{ "m_state", KEYWORD_M_STATE, { 0, 0, 0, 0, 0, 1 } },
{ "m_trans", KEYWORD_M_TRANS, { 0, 0, 0, 0, 0, 1 } },
{ "mixin", KEYWORD_MIXIN, { 0, 0, 0, 1, 0, 0 } },
{ "module", KEYWORD_MODULE, { 0, 0, 0, 1, 0, 0 } },
{ "mutable", KEYWORD_MUTABLE, { 0, 1, 0, 1, 0, 0 } },
{ "namespace", KEYWORD_NAMESPACE, { 0, 1, 1, 1, 0, 0 } },
{ "native", KEYWORD_NATIVE, { 0, 0, 0, 0, 1, 0 } },
{ "new", KEYWORD_NEW, { 0, 1, 1, 1, 1, 0 } },
{ "newcov", KEYWORD_NEWCOV, { 0, 0, 0, 0, 0, 1 } },
{ "NHOLD", KEYWORD_NHOLD, { 0, 0, 0, 0, 0, 1 } },
{ "noexcept", KEYWORD_NOEXCEPT, { 0, 1, 0, 0, 0, 0 } },
{ "NSAMPLE", KEYWORD_NSAMPLE, { 0, 0, 0, 0, 0, 1 } },
{ "null", KEYWORD_NULL, { 0, 0, 0, 1, 0, 0 } },
{ "operator", KEYWORD_OPERATOR, { 0, 1, 1, 1, 0, 0 } },
{ "out", KEYWORD_OUT, { 0, 0, 0, 1, 0, 0 } },
{ "output", KEYWORD_OUTPUT, { 0, 0, 0, 0, 0, 1 } },
{ "overload", KEYWORD_OVERLOAD, { 0, 1, 0, 1, 0, 0 } },
{ "override", KEYWORD_OVERRIDE, { 0, 0, 1, 1, 0, 0 } },
{ "package", KEYWORD_PACKAGE, { 0, 0, 0, 1, 1, 0 } },
{ "packed", KEYWORD_PACKED, { 0, 0, 0, 0, 0, 1 } },
{ "PHOLD", KEYWORD_PHOLD, { 0, 0, 0, 0, 0, 1 } },
{ "port", KEYWORD_PORT, { 0, 0, 0, 0, 0, 1 } },
{ "pragma", KEYWORD_PRAGMA, { 0, 0, 0, 1, 0, 0 } },
{ "private", KEYWORD_PRIVATE, { 0, 1, 1, 1, 1, 0 } },
{ "program", KEYWORD_PROGRAM, { 0, 0, 0, 0, 0, 1 } },
{ "protected", KEYWORD_PROTECTED, { 0, 1, 1, 1, 1, 1 } },
{ "PSAMPLE", KEYWORD_PSAMPLE, { 0, 0, 0, 0, 0, 1 } },
{ "public", KEYWORD_PUBLIC, { 0, 1, 1, 1, 1, 1 } },
{ "real", KEYWORD_REAL, { 0, 0, 0, 1, 0, 0 } },
{ "register", KEYWORD_REGISTER, { 1, 1, 0, 1, 0, 0 } },
{ "return", KEYWORD_RETURN, { 1, 1, 1, 1, 1, 0 } },
{ "scope", KEYWORD_SCOPE, { 0, 0, 0, 1, 0, 0 } },
{ "shadow", KEYWORD_SHADOW, { 0, 0, 0, 0, 0, 1 } },
{ "short", KEYWORD_SHORT, { 1, 1, 1, 1, 1, 0 } },
{ "signed", KEYWORD_SIGNED, { 1, 1, 0, 1, 0, 0 } },
{ "state", KEYWORD_STATE, { 0, 0, 0, 0, 0, 1 } },
{ "static", KEYWORD_STATIC, { 1, 1, 1, 1, 1, 1 } },
{ "string", KEYWORD_STRING, { 0, 0, 1, 0, 0, 1 } },
{ "struct", KEYWORD_STRUCT, { 1, 1, 1, 1, 0, 0 } },
{ "super", KEYWORD_SUPER, { 0, 0, 0, 1, 0, 0 } },
{ "switch", KEYWORD_SWITCH, { 1, 1, 1, 1, 1, 0 } },
{ "synchronized", KEYWORD_SYNCHRONIZED, { 0, 0, 0, 1, 1, 0 } },
{ "task", KEYWORD_TASK, { 0, 0, 0, 0, 0, 1 } },
{ "template", KEYWORD_TEMPLATE, { 0, 1, 0, 1, 0, 0 } },
{ "this", KEYWORD_THIS, { 0, 1, 1, 0, 1, 0 } },
{ "throw", KEYWORD_THROW, { 0, 1, 1, 1, 1, 0 } },
{ "throws", KEYWORD_THROWS, { 0, 0, 0, 0, 1, 0 } },
{ "trans", KEYWORD_TRANS, { 0, 0, 0, 0, 0, 1 } },
{ "transient", KEYWORD_TRANSIENT, { 0, 0, 0, 0, 1, 0 } },
{ "transition", KEYWORD_TRANSITION, { 0, 0, 0, 0, 0, 1 } },
{ "true", KEYWORD_TRUE, { 0, 0, 0, 1, 0, 0 } },
{ "try", KEYWORD_TRY, { 0, 1, 1, 1, 0, 0 } },
{ "typedef", KEYWORD_TYPEDEF, { 1, 1, 1, 1, 0, 1 } },
{ "typeid", KEYWORD_TYPEID, { 0, 0, 0, 1, 0, 0 } },
{ "typename", KEYWORD_TYPENAME, { 0, 1, 0, 1, 0, 0 } },
{ "typeof", KEYWORD_TYPEOF, { 0, 0, 0, 1, 0, 0 } },
{ "ubyte", KEYWORD_UBYTE, { 0, 0, 0, 1, 0, 0 } },
{ "ucent", KEYWORD_UCENT, { 0, 0, 0, 1, 0, 0 } },
{ "uint", KEYWORD_UINT, { 0, 0, 1, 1, 0, 0 } },
{ "ulong", KEYWORD_ULONG, { 0, 0, 1, 1, 0, 0 } },
{ "union", KEYWORD_UNION, { 1, 1, 0, 1, 0, 0 } },
{ "unittest", KEYWORD_UNITTEST, { 0, 0, 0, 1, 0, 0 } },
{ "unsigned", KEYWORD_UNSIGNED, { 1, 1, 1, 1, 0, 0 } },
{ "ushort", KEYWORD_USHORT, { 0, 0, 1, 1, 0, 0 } },
{ "using", KEYWORD_USING, { 0, 1, 1, 1, 0, 0 } },
{ "version", KEYWORD_VERSION, { 0, 0, 0, 1, 0, 0 } },
{ "virtual", KEYWORD_VIRTUAL, { 0, 1, 1, 1, 0, 1 } },
{ "void", KEYWORD_VOID, { 1, 1, 1, 1, 1, 1 } },
{ "volatile", KEYWORD_VOLATILE, { 1, 1, 1, 1, 1, 0 } },
{ "wchar", KEYWORD_WCHAR, { 0, 0, 0, 1, 0, 0 } },
{ "wchar_t", KEYWORD_WCHAR_T, { 0, 1, 1, 0, 0, 0 } },
{ "while", KEYWORD_WHILE, { 1, 1, 1, 1, 1, 0 } },
{ "with", KEYWORD_WITH, { 0, 0, 0, 1, 0, 0 } },
};
/*
* FUNCTION PROTOTYPES
*/
static void createTags (const unsigned int nestLevel, statementInfo *const parent);
/*
* FUNCTION DEFINITIONS
*/
/*
* Token management
*/
static void initToken (tokenInfo* const token)
{
token->type = TOKEN_NONE;
token->keyword = KEYWORD_NONE;
token->lineNumber = getInputLineNumber ();
token->filePosition = getInputFilePosition ();
vStringClear (token->name);
}
static void advanceToken (statementInfo* const st)
{
if (st->tokenIndex >= (unsigned int) NumTokens - 1)
st->tokenIndex = 0;
else
++st->tokenIndex;
initToken (st->token [st->tokenIndex]);
}
static tokenInfo *prevToken (const statementInfo *const st, unsigned int n)
{
unsigned int tokenIndex;
unsigned int num = (unsigned int) NumTokens;
Assert (n < num);
tokenIndex = (st->tokenIndex + num - n) % num;
return st->token [tokenIndex];
}
static void setToken (statementInfo *const st, const tokenType type)
{
tokenInfo *token;
token = activeToken (st);
initToken (token);
token->type = type;
}
static void retardToken (statementInfo *const st)
{
if (st->tokenIndex == 0)
st->tokenIndex = (unsigned int) NumTokens - 1;
else
--st->tokenIndex;
setToken (st, TOKEN_NONE);
}
static tokenInfo *newToken (void)
{
tokenInfo *const token = xMalloc (1, tokenInfo);
token->name = vStringNew ();
initToken (token);
return token;
}
static void deleteToken (tokenInfo *const token)
{
if (token != NULL)
{
vStringDelete (token->name);
eFree (token);
}
}
static const char *accessString (const accessType access)
{
static const char *const names [] = {
"?", "local", "private", "protected", "public", "default"
};
Assert (ARRAY_SIZE (names) == ACCESS_COUNT);
Assert ((int) access < ACCESS_COUNT);
return names [(int) access];
}
static const char *implementationString (const impType imp)
{
static const char *const names [] ={
"?", "abstract", "virtual", "pure virtual"
};
Assert (ARRAY_SIZE (names) == IMP_COUNT);
Assert ((int) imp < IMP_COUNT);
return names [(int) imp];
}
/*
* Debugging functions
*/
#ifdef DEBUG
#define boolString(c) ((c) ? "TRUE" : "FALSE")
static const char *tokenString (const tokenType type)
{
static const char *const names [] = {
"none", "args", "}", "{", "colon", "comma", "double colon", "keyword",
"name", "package", "paren-name", "semicolon", "specifier"
};
Assert (ARRAY_SIZE (names) == TOKEN_COUNT);
Assert ((int) type < TOKEN_COUNT);
return names [(int) type];
}
static const char *scopeString (const tagScope scope)
{
static const char *const names [] = {
"global", "static", "extern", "friend", "typedef"
};
Assert (ARRAY_SIZE (names) == SCOPE_COUNT);
Assert ((int) scope < SCOPE_COUNT);
return names [(int) scope];
}
static const char *declString (const declType declaration)
{
static const char *const names [] = {
"?", "base", "class", "enum", "event", "function", "function template",
"ignore", "interface", "mixin", "namespace", "no mangle", "package", "package ref",
"private", "program", "protected", "public", "struct", "task", "template",
"union", "using", "version", "annotation"
};
Assert (ARRAY_SIZE (names) == DECL_COUNT);
Assert ((int) declaration < DECL_COUNT);
return names [(int) declaration];
}
static const char *keywordString (const keywordId keyword)
{
const size_t count = ARRAY_SIZE (KeywordTable);
const char *name = "none";
size_t i;
for (i = 0 ; i < count ; ++i)
{
const keywordDesc *p = &KeywordTable [i];
if (p->id == keyword)
{
name = p->name;
break;
}
}
return name;
}
static void __unused__ pt (tokenInfo *const token)
{
if (isType (token, TOKEN_NAME))
printf ("type: %-12s: %-13s line: %lu\n",
tokenString (token->type), vStringValue (token->name),
token->lineNumber);
else if (isType (token, TOKEN_KEYWORD))
printf ("type: %-12s: %-13s line: %lu\n",
tokenString (token->type), keywordString (token->keyword),
token->lineNumber);
else
printf ("type: %-12s line: %lu\n",
tokenString (token->type), token->lineNumber);
}
static void __unused__ ps (statementInfo *const st)
{
#define P "[%-7u]"
static unsigned int id = 0;
unsigned int i;
printf (P"scope: %s decl: %s gotName: %s gotParenName: %s\n", id,
scopeString (st->scope), declString (st->declaration),
boolString (st->gotName), boolString (st->gotParenName));
printf (P"haveQualifyingName: %s\n", id, boolString (st->haveQualifyingName));
printf (P"access: %s default: %s\n", id, accessString (st->member.access),
accessString (st->member.accessDefault));
printf (P"token : ", id);
pt (activeToken (st));
for (i = 1 ; i < (unsigned int) NumTokens ; ++i)
{
printf (P"prev %u : ", id, i);
pt (prevToken (st, i));
}
printf (P"context: ", id);
pt (st->context);
id++;
#undef P
}
#endif
/*
* Statement management
*/
static boolean isContextualKeyword (const tokenInfo *const token)
{
boolean result;
switch (token->keyword)
{
case KEYWORD_CLASS:
case KEYWORD_ENUM:
case KEYWORD_INTERFACE:
case KEYWORD_NAMESPACE:
case KEYWORD_STRUCT:
case KEYWORD_UNION:
case KEYWORD_VERSION:
case KEYWORD_TEMPLATE:
result = TRUE;
break;
default: result = FALSE; break;
}
return result;
}
static boolean isContextualStatement (const statementInfo *const st)
{
boolean result = FALSE;
if (st != NULL) switch (st->declaration)
{
case DECL_CLASS:
case DECL_ENUM:
case DECL_INTERFACE:
case DECL_NAMESPACE:
case DECL_PRIVATE:
case DECL_PROTECTED:
case DECL_PUBLIC:
case DECL_STRUCT:
case DECL_UNION:
case DECL_TEMPLATE:
case DECL_ANNOTATION:
result = TRUE;
break;
default: result = FALSE; break;
}
return result;
}
static boolean isMember (const statementInfo *const st)
{
boolean result;
if (isType (st->context, TOKEN_NAME))
result = TRUE;
else
result = (boolean)
(st->parent != NULL && isContextualStatement (st->parent));
return result;
}
static void initMemberInfo (statementInfo *const st)
{
accessType accessDefault = ACCESS_UNDEFINED;
if (st->parent != NULL) switch (st->parent->declaration)
{
case DECL_PRIVATE:
accessDefault = ACCESS_PRIVATE;
break;
case DECL_PROTECTED:
accessDefault = ACCESS_PROTECTED;
break;
case DECL_PUBLIC:
accessDefault = ACCESS_PUBLIC;
break;
case DECL_ENUM:
accessDefault = (isInputLanguage (Lang_java) ? ACCESS_PUBLIC : ACCESS_UNDEFINED);
break;
case DECL_NAMESPACE:
accessDefault = ACCESS_UNDEFINED;
break;
case DECL_CLASS:
if (isInputLanguage (Lang_java))
accessDefault = ACCESS_DEFAULT;
else if (isInputLanguage (Lang_d))
accessDefault = ACCESS_PUBLIC;
else
accessDefault = ACCESS_PRIVATE;
break;
case DECL_INTERFACE:
case DECL_STRUCT:
case DECL_UNION:
case DECL_ANNOTATION:
accessDefault = ACCESS_PUBLIC;
break;
default: break;
}
st->member.accessDefault = accessDefault;
st->member.access = accessDefault;
}
static void reinitStatement (statementInfo *const st, const boolean partial)
{
unsigned int i;
if (! partial)
{
st->scope = SCOPE_GLOBAL;
if (isContextualStatement (st->parent))
st->declaration = DECL_BASE;
else
st->declaration = DECL_NONE;
}
st->gotParenName = FALSE;
st->isPointer = FALSE;
st->inFunction = FALSE;
st->assignment = FALSE;
st->notVariable = FALSE;
st->implementation = IMP_DEFAULT;
st->gotArgs = FALSE;
st->gotName = FALSE;
st->haveQualifyingName = FALSE;
st->tokenIndex = 0;
if (st->parent != NULL)
st->inFunction = st->parent->inFunction;
for (i = 0 ; i < (unsigned int) NumTokens ; ++i)
initToken (st->token [i]);
initToken (st->context);
/* Keep the block name, so that a variable following after a comma will
* still have the structure name.
*/
if (! partial)
initToken (st->blockName);
vStringClear (st->parentClasses);
/* Init member info.
*/
if (! partial)
st->member.access = st->member.accessDefault;
}
static void initStatement (statementInfo *const st, statementInfo *const parent)
{
st->parent = parent;
initMemberInfo (st);
reinitStatement (st, FALSE);
}
/*
* Tag generation functions
*/
#define cTagKind(type) cTagKindFull(type, TRUE)
#define cTagKindNoAssert(type) cTagKindFull(type, FALSE)
static cKind cTagKindFull (const tagType type, const boolean with_assert)
{
cKind result = CK_UNDEFINED;
switch (type)
{
case TAG_CLASS: result = CK_CLASS; break;
case TAG_ENUM: result = CK_ENUMERATION; break;
case TAG_ENUMERATOR: result = CK_ENUMERATOR; break;
case TAG_FUNCTION: result = CK_FUNCTION; break;
case TAG_LOCAL: result = CK_LOCAL; break;
case TAG_MEMBER: result = CK_MEMBER; break;
case TAG_NAMESPACE: result = CK_NAMESPACE; break;
case TAG_PROTOTYPE: result = CK_PROTOTYPE; break;
case TAG_STRUCT: result = CK_STRUCT; break;
case TAG_TYPEDEF: result = CK_TYPEDEF; break;
case TAG_UNION: result = CK_UNION; break;
case TAG_VARIABLE: result = CK_VARIABLE; break;
case TAG_EXTERN_VAR: result = CK_EXTERN_VARIABLE; break;
case TAG_LABEL: result = CK_LABEL; break;
default: if (with_assert) Assert ("Bad C tag type" == NULL); break;
}
return result;
}
#define csharpTagKind(type) csharpTagKindFull(type, TRUE)
#define csharpTagKindNoAssert(type) csharpTagKindFull(type, FALSE)
static csharpKind csharpTagKindFull (const tagType type, const boolean with_assert)
{
csharpKind result = CSK_UNDEFINED;
switch (type)
{
case TAG_CLASS: result = CSK_CLASS; break;
case TAG_ENUM: result = CSK_ENUMERATION; break;
case TAG_ENUMERATOR: result = CSK_ENUMERATOR; break;
case TAG_EVENT: result = CSK_EVENT; break;
case TAG_FIELD: result = CSK_FIELD ; break;
case TAG_INTERFACE: result = CSK_INTERFACE; break;
case TAG_LOCAL: result = CSK_LOCAL; break;
case TAG_METHOD: result = CSK_METHOD; break;
case TAG_NAMESPACE: result = CSK_NAMESPACE; break;
case TAG_PROPERTY: result = CSK_PROPERTY; break;
case TAG_STRUCT: result = CSK_STRUCT; break;
case TAG_TYPEDEF: result = CSK_TYPEDEF; break;
default: if (with_assert) Assert ("Bad C# tag type" == NULL); break;
}
return result;
}
#define javaTagKind(type) javaTagKindFull(type, TRUE)
#define javaTagKindNoAssert(type) javaTagKindFull(type, FALSE)
static javaKind javaTagKindFull (const tagType type, boolean with_assert)
{
javaKind result = JK_UNDEFINED;
switch (type)
{
case TAG_CLASS: result = JK_CLASS; break;
case TAG_ENUM: result = JK_ENUM; break;
case TAG_ENUMERATOR: result = JK_ENUM_CONSTANT; break;
case TAG_FIELD: result = JK_FIELD; break;
case TAG_INTERFACE: result = JK_INTERFACE; break;
case TAG_LOCAL: result = JK_LOCAL; break;
case TAG_METHOD: result = JK_METHOD; break;
case TAG_PACKAGE: /* Fall through */
case TAG_PACKAGEREF: result = JK_PACKAGE; break;
/* I'm gonna go ahead and keep considering as interfaces for the output
* since the official syntax reference seems to consider them interfaces too
*/
case TAG_ANNOTATION: result = JK_INTERFACE; break;
default: if (with_assert) Assert ("Bad Java tag type" == NULL); break;
}
return result;
}
#define dTagKind(type) dTagKindFull(type, TRUE)
#define dTagKindNoAssert(type) dTagKindFull(type, FALSE)
static dKind dTagKindFull (const tagType type, boolean with_assert)
{
dKind result = DK_UNDEFINED;
switch (type)
{
case TAG_TYPEDEF: result = DK_ALIAS; break;
case TAG_CLASS: result = DK_CLASS; break;
case TAG_ENUM: result = DK_ENUMERATION; break;
case TAG_ENUMERATOR: result = DK_ENUMERATOR; break;
case TAG_EXTERN_VAR: result = DK_EXTERN_VARIABLE; break;
case TAG_FUNCTION: result = DK_FUNCTION; break;
case TAG_INTERFACE: result = DK_INTERFACE; break;
case TAG_LOCAL: result = DK_LOCAL; break;
case TAG_MEMBER: result = DK_MEMBER; break;
case TAG_MIXIN: result = DK_MIXIN; break;
case TAG_PACKAGE: result = DK_MODULE; break;
case TAG_NAMESPACE: result = DK_NAMESPACE; break;
case TAG_PROTOTYPE: result = DK_PROTOTYPE; break;
case TAG_STRUCT: result = DK_STRUCT; break;
case TAG_TEMPLATE: result = DK_TEMPLATE; break;
case TAG_UNION: result = DK_UNION; break;
case TAG_VARIABLE: result = DK_VARIABLE; break;
case TAG_VERSION: result = DK_VERSION; break;
default: if (with_assert) Assert ("Bad D tag type" == NULL); break;
}
return result;
}
#define veraTagKind(type) veraTagKindFull(type, TRUE)
#define veraTagKindNoAssert(type) veraTagKindFull(type, FALSE)
static veraKind veraTagKindFull (const tagType type, boolean with_assert) {
veraKind result = VK_UNDEFINED;
switch (type)
{
case TAG_CLASS: result = VK_CLASS; break;
case TAG_ENUM: result = VK_ENUMERATION; break;
case TAG_ENUMERATOR: result = VK_ENUMERATOR; break;
case TAG_FUNCTION: result = VK_FUNCTION; break;
case TAG_INTERFACE: result = VK_INTERFACE; break;
case TAG_LOCAL: result = VK_LOCAL; break;
case TAG_MEMBER: result = VK_MEMBER; break;
case TAG_PROGRAM: result = VK_PROGRAM; break;
case TAG_PROTOTYPE: result = VK_PROTOTYPE; break;
case TAG_SIGNAL: result = VK_SIGNAL; break;
case TAG_TASK: result = VK_TASK; break;
case TAG_TYPEDEF: result = VK_TYPEDEF; break;
case TAG_VARIABLE: result = VK_VARIABLE; break;
case TAG_EXTERN_VAR: result = VK_EXTERN_VARIABLE; break;
default: if (with_assert) Assert ("Bad Vera tag type" == NULL); break;
}
return result;
}
static const kindOption *kindForType (const tagType type)
{
const kindOption * result;
if (isInputLanguage (Lang_csharp))
result = &(CsharpKinds [csharpTagKind (type)]);
else if (isInputLanguage (Lang_java))
result = &(JavaKinds [javaTagKind (type)]);
else if (isInputLanguage (Lang_d))
result = &(DKinds [dTagKind (type)]);
else if (isInputLanguage (Lang_vera))
result = &(VeraKinds [veraTagKind (type)]);
else
result = &(CKinds [cTagKind (type)]);
return result;
}
static int roleForType (const tagType type)
{
int result;
result = ROLE_INDEX_DEFINITION;
if (isInputLanguage (Lang_java))
{
if (type == TAG_PACKAGEREF)
result = JAVAR_PACKAGE_IMPORTED;
}
return result;
}
static const char *tagName (const tagType type)
{
const char* result;
if (isInputLanguage (Lang_csharp))
result = CsharpKinds [csharpTagKind (type)].name;
else if (isInputLanguage (Lang_java))
result = JavaKinds [javaTagKind (type)].name;
else if (isInputLanguage (Lang_d))
result = DKinds [dTagKind (type)].name;
else if (isInputLanguage (Lang_vera))
result = VeraKinds [veraTagKind (type)].name;
else
result = CKinds [cTagKind (type)].name;
return result;
}
static boolean includeTag (const tagType type, const boolean isFileScope)
{
boolean result;
int k;
kindOption* kopt = NULL;
if (isFileScope && ! isXtagEnabled(XTAG_FILE_SCOPE))
result = FALSE;
else if (isInputLanguage (Lang_csharp))
{
k = csharpTagKindNoAssert (type);
kopt = CsharpKinds;
}
else if (isInputLanguage (Lang_java))
{
k = javaTagKindNoAssert (type);
kopt = JavaKinds;
}
else if (isInputLanguage (Lang_d))
{
k = dTagKindNoAssert (type);
kopt = DKinds;
}
else if (isInputLanguage (Lang_vera))
{
k = veraTagKindNoAssert (type);
kopt = VeraKinds;
}
else
{
k = cTagKindNoAssert (type);
kopt = CKinds;
}
if (kopt)
{
Assert (k >= COMMONK_UNDEFINED);
if (k == COMMONK_UNDEFINED)
result = FALSE;
else
result = kopt [k].enabled;
}
return result;
}
static tagType declToTagType (const declType declaration)
{
tagType type = TAG_UNDEFINED;
switch (declaration)
{
case DECL_CLASS: type = TAG_CLASS; break;
case DECL_ENUM: type = TAG_ENUM; break;
case DECL_EVENT: type = TAG_EVENT; break;
case DECL_FUNCTION: type = TAG_FUNCTION; break;
case DECL_FUNCTION_TEMPLATE: type = TAG_FUNCTION; break;
case DECL_INTERFACE: type = TAG_INTERFACE; break;
case DECL_NAMESPACE: type = TAG_NAMESPACE; break;
case DECL_PROGRAM: type = TAG_PROGRAM; break;
case DECL_PRIVATE: type = TAG_CLASS; break;
case DECL_PROTECTED: type = TAG_CLASS; break;
case DECL_PUBLIC: type = TAG_CLASS; break;
case DECL_TASK: type = TAG_TASK; break;
case DECL_TEMPLATE: type = TAG_TEMPLATE; break;
case DECL_STRUCT: type = TAG_STRUCT; break;
case DECL_UNION: type = TAG_UNION; break;
case DECL_VERSION: type = TAG_VERSION; break;
case DECL_ANNOTATION: type = TAG_ANNOTATION; break;
default: Assert ("Unexpected declaration" == NULL); break;
}
return type;
}
static const char* accessField (const statementInfo *const st)
{
const char* result = NULL;
if (isInputLanguage (Lang_cpp) && st->scope == SCOPE_FRIEND)
result = "friend";
else if (st->member.access != ACCESS_UNDEFINED)
result = accessString (st->member.access);
return result;
}
static void addContextSeparator (vString *const scope)
{
if (isInputLanguage (Lang_c) || isInputLanguage (Lang_cpp))
vStringCatS (scope, "::");
else if (isInputLanguage (Lang_java) || isInputLanguage (Lang_csharp) || isInputLanguage(Lang_d))
vStringCatS (scope, ".");
}
static void addOtherFields (tagEntryInfo* const tag, const tagType type,
const statementInfo *const st,
vString *const scope, vString *const typeRef)
{
/* For selected tag types, append an extension flag designating the
* parent object in which the tag is defined.
*/
switch (type)
{
default: break;
case TAG_FUNCTION:
case TAG_TEMPLATE:
case TAG_METHOD:
case TAG_PROTOTYPE:
if (vStringLength (Signature) > 0)
tag->extensionFields.signature = vStringValue (Signature);
case TAG_CLASS:
case TAG_ENUM:
case TAG_ENUMERATOR:
case TAG_EVENT:
case TAG_FIELD:
case TAG_INTERFACE:
case TAG_MEMBER:
case TAG_NAMESPACE:
case TAG_PROPERTY:
case TAG_SIGNAL:
case TAG_STRUCT:
case TAG_TASK:
case TAG_TYPEDEF:
case TAG_UNION:
case TAG_ANNOTATION:
if (vStringLength (scope) > 0 &&
(isMember (st) || st->parent->declaration == DECL_NAMESPACE))
{
tagType ptype;
if (isType (st->context, TOKEN_NAME))
{
tag->extensionFields.scopeKind = kindForType (TAG_CLASS);
tag->extensionFields.scopeName = vStringValue (scope);
}
else if ((ptype = declToTagType (parentDecl (st))) &&
includeTag (ptype, isXtagEnabled(XTAG_FILE_SCOPE)))
{
tag->extensionFields.scopeKind = kindForType (ptype);
tag->extensionFields.scopeName = vStringValue (scope);
}
}
if ((type == TAG_CLASS || type == TAG_INTERFACE ||
type == TAG_STRUCT || type == TAG_ANNOTATION) && vStringLength (st->parentClasses) > 0)
{
tag->extensionFields.inheritance =
vStringValue (st->parentClasses);
}
if (st->implementation != IMP_DEFAULT &&
(isInputLanguage (Lang_cpp) || isInputLanguage (Lang_csharp) ||
isInputLanguage (Lang_d) || isInputLanguage (Lang_java)))
{
tag->extensionFields.implementation =
implementationString (st->implementation);
}
if (isMember (st))
{
tag->extensionFields.access = accessField (st);
}
break;
}
/* Add typename info, type of the tag and name of struct/union/etc. */
if ((type == TAG_TYPEDEF || type == TAG_VARIABLE || type == TAG_MEMBER)
&& isContextualStatement(st))
{
char *p;
tag->extensionFields.typeRef [0] =
tagName (declToTagType (st->declaration));
p = vStringValue (st->blockName->name);
/* If there was no {} block get the name from the token before the
* name (current token is ';' or ',', previous token is the name).
*/
if (p == NULL || *p == '\0')
{
tokenInfo *const prev2 = prevToken (st, 2);
if (isType (prev2, TOKEN_NAME))
p = vStringValue (prev2->name);
}
/* Prepend the scope name if there is one. */
if (vStringLength (scope) > 0)
{
vStringCopy(typeRef, scope);
addContextSeparator (typeRef);
vStringCatS(typeRef, p);
p = vStringValue (typeRef);
}
tag->extensionFields.typeRef [1] = p;
}
}
static boolean findScopeHierarchy (vString *const string, const statementInfo *const st)
{
boolean found = FALSE;
vStringClear (string);
if (isType (st->context, TOKEN_NAME))
{
vStringCopy (string, st->context->name);
found = TRUE;
}
if (st->parent != NULL)
{
vString *temp = vStringNew ();
const statementInfo *s;
for (s = st->parent ; s != NULL ; s = s->parent)
{
if (isContextualStatement (s) ||
s->declaration == DECL_NAMESPACE ||
s->declaration == DECL_PROGRAM)
{
if (s->declaration == DECL_PRIVATE ||
s->declaration == DECL_PROTECTED ||
s->declaration == DECL_PUBLIC) {
continue;
}
found = TRUE;
vStringCopy (temp, string);
vStringClear (string);
if (isType (s->blockName, TOKEN_NAME))
{
if (isType (s->context, TOKEN_NAME) &&
vStringLength (s->context->name) > 0)
{
vStringCat (string, s->context->name);
addContextSeparator (string);
}
vStringCat (string, s->blockName->name);
if (vStringLength (temp) > 0)
addContextSeparator (string);
vStringCat (string, temp);
}
else
{
/* Information for building scope string
is lacking. Maybe input is broken. */
found = FALSE;
}
}
}
vStringDelete (temp);
}
return found;
}
static void makeExtraTagEntry (const tagType type, tagEntryInfo *const e,
vString *const scope)
{
if (isXtagEnabled(XTAG_QUALIFIED_TAGS) &&
scope != NULL && vStringLength (scope) > 0)
{
vString *const scopedName = vStringNew ();
if (type != TAG_ENUMERATOR)
vStringCopy (scopedName, scope);
else
{
/* remove last component (i.e. enumeration name) from scope */
const char* const sc = vStringValue (scope);
const char* colon = strrchr (sc, ':');
if (colon != NULL)
{
while (*colon == ':' && colon > sc)
--colon;
vStringNCopy (scopedName, scope, colon + 1 - sc);
}
}
if (vStringLength (scopedName) > 0)
{
addContextSeparator (scopedName);
vStringCatS (scopedName, e->name);
e->name = vStringValue (scopedName);
makeTagEntry (e);
}
vStringDelete (scopedName);
}
}
static void makeTag (const tokenInfo *const token,
const statementInfo *const st,
boolean isFileScope, const tagType type)
{
/* Nothing is really of file scope when it appears in a header file.
*/
isFileScope = (boolean) (isFileScope && ! isInputHeaderFile ());
if (isType (token, TOKEN_NAME) && vStringLength (token->name) > 0 &&
includeTag (type, isFileScope))
{
vString *scope;
vString *typeRef;
boolean isScopeBuilt;
/* Use "typeRef" to store the typename from addOtherFields() until
* it's used in makeTagEntry().
*/
tagEntryInfo e;
const kindOption *kind;
int role;
role = roleForType (type);
if (! (role == ROLE_INDEX_DEFINITION || isXtagEnabled (XTAG_REFERENCE_TAGS)))
return;
scope = vStringNew ();
typeRef = vStringNew ();
kind = kindForType (type);
if (role == ROLE_INDEX_DEFINITION)
initTagEntry (&e, vStringValue (token->name), kind);
else
initRefTagEntry (&e, vStringValue (token->name), kind, role);
e.lineNumber = token->lineNumber;
e.filePosition = token->filePosition;
e.isFileScope = isFileScope;
isScopeBuilt = findScopeHierarchy (scope, st);
addOtherFields (&e, type, st, scope, typeRef);
makeTagEntry (&e);
if (isScopeBuilt)
makeExtraTagEntry (type, &e, scope);
vStringDelete (scope);
vStringDelete (typeRef);
}
}
static boolean isValidTypeSpecifier (const declType declaration)
{
boolean result;
switch (declaration)
{
case DECL_BASE:
case DECL_CLASS:
case DECL_ENUM:
case DECL_EVENT:
case DECL_STRUCT:
case DECL_UNION:
case DECL_ANNOTATION:
result = TRUE;
break;
default:
result = FALSE;
break;
}
return result;
}
static void qualifyEnumeratorTag (const statementInfo *const st,
const tokenInfo *const nameToken)
{
if (isType (nameToken, TOKEN_NAME))
makeTag (nameToken, st, TRUE, TAG_ENUMERATOR);
}
static void qualifyFunctionTag (const statementInfo *const st,
const tokenInfo *const nameToken)
{
if (isType (nameToken, TOKEN_NAME))
{
tagType type;
const boolean isFileScope =
(boolean) (st->member.access == ACCESS_PRIVATE ||
(!isMember (st) && st->scope == SCOPE_STATIC));
if (isInputLanguage (Lang_java) || isInputLanguage (Lang_csharp))
type = TAG_METHOD;
else if (isInputLanguage (Lang_vera) && st->declaration == DECL_TASK)
type = TAG_TASK;
else
type = TAG_FUNCTION;
makeTag (nameToken, st, isFileScope, type);
}
}
static void qualifyFunctionDeclTag (const statementInfo *const st,
const tokenInfo *const nameToken)
{
if (! isType (nameToken, TOKEN_NAME))
;
else if (isInputLanguage (Lang_java) || isInputLanguage (Lang_csharp))
qualifyFunctionTag (st, nameToken);
else if (st->scope == SCOPE_TYPEDEF)
makeTag (nameToken, st, TRUE, TAG_TYPEDEF);
else if (isValidTypeSpecifier (st->declaration) && ! isInputLanguage (Lang_csharp))
makeTag (nameToken, st, TRUE, TAG_PROTOTYPE);
}
static void qualifyCompoundTag (const statementInfo *const st,
const tokenInfo *const nameToken)
{
if (isType (nameToken, TOKEN_NAME))
{
const tagType type = declToTagType (st->declaration);
const boolean fileScoped = (boolean)
(!(isInputLanguage (Lang_java) ||
isInputLanguage (Lang_csharp) ||
isInputLanguage (Lang_vera)));
if (type != TAG_UNDEFINED)
makeTag (nameToken, st, fileScoped, type);
}
}
static void qualifyBlockTag (statementInfo *const st,
const tokenInfo *const nameToken)
{
switch (st->declaration)
{
case DECL_CLASS:
case DECL_ENUM:
case DECL_INTERFACE:
case DECL_NAMESPACE:
case DECL_PROGRAM:
case DECL_STRUCT:
case DECL_UNION:
case DECL_TEMPLATE:
case DECL_VERSION:
case DECL_ANNOTATION:
qualifyCompoundTag (st, nameToken);
break;
default: break;
}
}
static void qualifyVariableTag (const statementInfo *const st,
const tokenInfo *const nameToken)
{
/* We have to watch that we do not interpret a declaration of the
* form "struct tag;" as a variable definition. In such a case, the
* token preceding the name will be a keyword.
*/
if (! isType (nameToken, TOKEN_NAME))
;
else if (st->scope == SCOPE_TYPEDEF)
makeTag (nameToken, st, TRUE, TAG_TYPEDEF);
else if (st->declaration == DECL_EVENT)
makeTag (nameToken, st, (boolean) (st->member.access == ACCESS_PRIVATE),
TAG_EVENT);
else if (st->declaration == DECL_PACKAGE)
makeTag (nameToken, st, FALSE, TAG_PACKAGE);
else if (st->declaration == DECL_PACKAGEREF)
makeTag (nameToken, st, FALSE, TAG_PACKAGEREF);
else if (st->declaration == DECL_USING && st->assignment)
makeTag (nameToken, st, TRUE, TAG_TYPEDEF);
else if (isValidTypeSpecifier (st->declaration))
{
if (st->notVariable)
;
else if (isMember (st))
{
if (isInputLanguage (Lang_java) || isInputLanguage (Lang_csharp))
makeTag (nameToken, st,
(boolean) (st->member.access == ACCESS_PRIVATE), TAG_FIELD);
else if (st->scope == SCOPE_GLOBAL || st->scope == SCOPE_STATIC)
makeTag (nameToken, st, TRUE, TAG_MEMBER);
}
else
{
if (st->scope == SCOPE_EXTERN || ! st->haveQualifyingName)
makeTag (nameToken, st, FALSE, TAG_EXTERN_VAR);
else if (st->inFunction)
makeTag (nameToken, st, (boolean) (st->scope == SCOPE_STATIC),
TAG_LOCAL);
else
makeTag (nameToken, st, (boolean) (st->scope == SCOPE_STATIC),
TAG_VARIABLE);
}
}
}
/*
* Parsing functions
*/
static int skipToOneOf (const char *const chars)
{
int c;
do
c = cppGetc ();
while (c != EOF && c != '\0' && strchr (chars, c) == NULL);
return c;
}
/* Skip to the next non-white character.
*/
static int skipToNonWhite (void)
{
boolean found = FALSE;
int c;
#if 0
do
c = cppGetc ();
while (isspace (c));
#else
while (1)
{
c = cppGetc ();
if (isspace (c))
found = TRUE;
else
break;
}
if (CollectingSignature && found)
vStringPut (Signature, ' ');
#endif
return c;
}
/* Skips to the next brace in column 1. This is intended for cases where
* preprocessor constructs result in unbalanced braces.
*/
static void skipToFormattedBraceMatch (void)
{
int c, next;
c = cppGetc ();
next = cppGetc ();
while (c != EOF && (c != '\n' || next != '}'))
{
c = next;
next = cppGetc ();
}
}
/* Skip to the matching character indicated by the pair string. If skipping
* to a matching brace and any brace is found within a different level of a
* #if conditional statement while brace formatting is in effect, we skip to
* the brace matched by its formatting. It is assumed that we have already
* read the character which starts the group (i.e. the first character of
* "pair").
*/
static void skipToMatch (const char *const pair)
{
const boolean braceMatching = (boolean) (strcmp ("{}", pair) == 0);
const boolean braceFormatting = (boolean) (isBraceFormat () && braceMatching);
const unsigned int initialLevel = getDirectiveNestLevel ();
const int begin = pair [0], end = pair [1];
const unsigned long inputLineNumber = getInputLineNumber ();
int matchLevel = 1;
int c = '\0';
while (matchLevel > 0 && (c = skipToNonWhite ()) != EOF)
{
if (CollectingSignature)
vStringPut (Signature, c);
if (c == begin)
{
++matchLevel;
if (braceFormatting && getDirectiveNestLevel () != initialLevel)
{
skipToFormattedBraceMatch ();
break;
}
}
else if (c == end)
{
--matchLevel;
if (braceFormatting && getDirectiveNestLevel () != initialLevel)
{
skipToFormattedBraceMatch ();
break;
}
}
}
if (c == EOF)
{
verbose ("%s: failed to find match for '%c' at line %lu\n",
getInputFileName (), begin, inputLineNumber);
if (braceMatching)
longjmp (Exception, (int) ExceptionBraceFormattingError);
else
longjmp (Exception, (int) ExceptionFormattingError);
}
}
static void skipCppTemplateParameterList (void)
{
const unsigned long inputLineNumber = getInputLineNumber ();
int angleBracketsLevel = 1;
int c = '\0';
int roundBracketsLevel = 0;
boolean defaultValueExpected = FALSE;
while (angleBracketsLevel > 0 && (c = skipToNonWhite ()) != EOF)
{
if (CollectingSignature)
vStringPut (Signature, c);
if (c == '<')
{
int x = cppGetc ();
if(x != '<')
{
cppUngetc (x);
if (roundBracketsLevel == 0)
{
if (defaultValueExpected == FALSE)
++angleBracketsLevel;
}
}
else if(CollectingSignature)
vStringPut (Signature, x);
}
else if (c == '>')
{
int x = cppGetc ();
if( x != '>')
{
cppUngetc (x);
if (roundBracketsLevel == 0)
{
--angleBracketsLevel;
defaultValueExpected = FALSE;
}
}
else if(CollectingSignature)
vStringPut (Signature, x);
}
else if (c == '(')
roundBracketsLevel ++;
else if (c == ')')
roundBracketsLevel --;
else if (c == '=' && (roundBracketsLevel == 0))
defaultValueExpected = TRUE;
else if (c == ',' && (roundBracketsLevel == 0))
defaultValueExpected = FALSE;
}
if (c == EOF)
{
verbose ("%s: failed to find match for '%c' at line %lu\n",
getInputFileName (), '<', inputLineNumber);
longjmp (Exception, (int) ExceptionFormattingError);
}
}
static void skipParens (void)
{
const int c = skipToNonWhite ();
if (c == '(')
skipToMatch ("()");
else
cppUngetc (c);
}
static void skipBraces (void)
{
const int c = skipToNonWhite ();
if (c == '{')
skipToMatch ("{}");
else
cppUngetc (c);
}
static keywordId analyzeKeyword (const char *const name)
{
const keywordId id = (keywordId) lookupKeyword (name, getInputLanguage ());
return id;
}
static void analyzeIdentifier (tokenInfo *const token)
{
char *const name = vStringValue (token->name);
const char *replacement = NULL;
boolean parensToo = FALSE;
if (isInputLanguage (Lang_java) ||
! isIgnoreToken (name, &parensToo, &replacement))
{
if (replacement != NULL)
token->keyword = analyzeKeyword (replacement);
else
token->keyword = analyzeKeyword (vStringValue (token->name));
if (token->keyword == KEYWORD_NONE)
token->type = TOKEN_NAME;
else
token->type = TOKEN_KEYWORD;
}
else
{
initToken (token);
if (parensToo)
{
int c = skipToNonWhite ();
if (c == '(')
skipToMatch ("()");
}
}
}
static void readIdentifier (tokenInfo *const token, const int firstChar)
{
vString *const name = token->name;
int c = firstChar;
boolean first = TRUE;
initToken (token);
/* Bug #1585745: strangely, C++ destructors allow whitespace between
* the ~ and the class name. */
if (isInputLanguage (Lang_cpp) && firstChar == '~')
{
vStringPut (name, c);
c = skipToNonWhite ();
}
do
{
vStringPut (name, c);
if (CollectingSignature)
{
if (!first)
vStringPut (Signature, c);
first = FALSE;
}
c = cppGetc ();
} while (isident (c) || ((isInputLanguage (Lang_java) || isInputLanguage (Lang_csharp)) && (isHighChar (c) || c == '.')));
vStringTerminate (name);
cppUngetc (c); /* unget non-identifier character */
analyzeIdentifier (token);
}
static void readPackageName (tokenInfo *const token, const int firstChar, boolean allowWildCard)
{
vString *const name = token->name;
int c = firstChar;
initToken (token);
while (isident (c) || (allowWildCard && (c == '*')) || c == '.')
{
vStringPut (name, c);
c = cppGetc ();
}
vStringTerminate (name);
cppUngetc (c); /* unget non-package character */
}
static void readPackageOrNamespace (statementInfo *const st, const declType declaration, boolean allowWildCard)
{
st->declaration = declaration;
if (declaration == DECL_NAMESPACE && !isInputLanguage (Lang_csharp))
{
/* In C++ a namespace is specified one level at a time. */
return;
}
else
{
/* In C#, a namespace can also be specified like a Java package name. */
tokenInfo *const token = activeToken (st);
Assert (isType (token, TOKEN_KEYWORD));
readPackageName (token, skipToNonWhite (), allowWildCard);
token->type = TOKEN_NAME;
st->gotName = TRUE;
st->haveQualifyingName = TRUE;
}
}
static void readVersionName (tokenInfo *const token, const int firstChar)
{
vString *const name = token->name;
int c = firstChar;
initToken (token);
while (isident (c))
{
vStringPut (name, c);
c = cppGetc ();
}
vStringTerminate (name);
cppGetc ();
}
static void readVersion (statementInfo *const st)
{
tokenInfo *const token = activeToken (st);
Assert (isType (token, TOKEN_KEYWORD));
skipToNonWhite ();
readVersionName (token, cppGetc ());
token->type = TOKEN_NAME;
st->declaration = DECL_VERSION;
st->gotName = TRUE;
st->haveQualifyingName = TRUE;
}
static void processName (statementInfo *const st)
{
Assert (isType (activeToken (st), TOKEN_NAME));
if (st->gotName && st->declaration == DECL_NONE)
st->declaration = DECL_BASE;
st->gotName = TRUE;
st->haveQualifyingName = TRUE;
}
static void readOperator (statementInfo *const st)
{
const char *const acceptable = "+-*/%^&|~!=<>,[]";
const tokenInfo* const prev = prevToken (st,1);
tokenInfo *const token = activeToken (st);
vString *const name = token->name;
int c = skipToNonWhite ();
/* When we arrive here, we have the keyword "operator" in 'name'.
*/
if (isType (prev, TOKEN_KEYWORD) && (prev->keyword == KEYWORD_ENUM ||
prev->keyword == KEYWORD_STRUCT || prev->keyword == KEYWORD_UNION))
; /* ignore "operator" keyword if preceded by these keywords */
else if (c == '(')
{
/* Verify whether this is a valid function call (i.e. "()") operator.
*/
if (cppGetc () == ')')
{
vStringPut (name, ' '); /* always separate operator from keyword */
c = skipToNonWhite ();
if (c == '(')
vStringCatS (name, "()");
}
else
{
skipToMatch ("()");
c = cppGetc ();
}
}
else if (isident1 (c))
{
/* Handle "new" and "delete" operators, and conversion functions
* (per 13.3.1.1.2 [2] of the C++ spec).
*/
boolean whiteSpace = TRUE; /* default causes insertion of space */
do
{
if (isspace (c))
whiteSpace = TRUE;
else
{
if (whiteSpace)
{
vStringPut (name, ' ');
whiteSpace = FALSE;
}
vStringPut (name, c);
}
c = cppGetc ();
} while (! isOneOf (c, "(;") && c != EOF);
vStringTerminate (name);
}
else if (isOneOf (c, acceptable))
{
vStringPut (name, ' '); /* always separate operator from keyword */
do
{
vStringPut (name, c);
c = cppGetc ();
} while (isOneOf (c, acceptable));
vStringTerminate (name);
}
cppUngetc (c);
token->type = TOKEN_NAME;
token->keyword = KEYWORD_NONE;
processName (st);
}
static void copyToken (tokenInfo *const dest, const tokenInfo *const src)
{
dest->type = src->type;
dest->keyword = src->keyword;
dest->filePosition = src->filePosition;
dest->lineNumber = src->lineNumber;
vStringCopy (dest->name, src->name);
}
static void setAccess (statementInfo *const st, const accessType access)
{
if (isInputLanguage (Lang_d))
{
int c = skipToNonWhite ();
if (c == '{')
{
switch(access)
{
case ACCESS_PRIVATE:
st->declaration = DECL_PRIVATE;
break;
case ACCESS_PUBLIC:
st->declaration = DECL_PUBLIC;
break;
case ACCESS_PROTECTED:
st->declaration = DECL_PROTECTED;
break;
default:
break;
}
st->member.access = access;
cppUngetc (c);
}
else if (c == ':') {
reinitStatement (st, FALSE);
st->member.accessDefault = access;
}
else {
cppUngetc (c);
}
}
if (isMember (st))
{
if (isInputLanguage (Lang_cpp))
{
int c = skipToNonWhite ();
if (c == ':')
reinitStatement (st, FALSE);
else
cppUngetc (c);
st->member.accessDefault = access;
}
else if (isInputLanguage (Lang_d))
{
if (st->parent != NULL &&
(st->parent->declaration == DECL_PRIVATE ||
st->parent->declaration == DECL_PROTECTED ||
st->parent->declaration == DECL_PUBLIC))
{
st->member.access = st->parent->member.access;
return;
}
}
st->member.access = access;
}
}
static void discardTypeList (tokenInfo *const token)
{
int c = skipToNonWhite ();
while (isident1 (c))
{
readIdentifier (token, c);
c = skipToNonWhite ();
if (c == '.' || c == ',')
c = skipToNonWhite ();
}
cppUngetc (c);
}
static void addParentClass (statementInfo *const st, tokenInfo *const token)
{
if (vStringLength (token->name) > 0 &&
vStringLength (st->parentClasses) > 0)
{
vStringPut (st->parentClasses, ',');
}
vStringCat (st->parentClasses, token->name);
}
static void readParents (statementInfo *const st, const int qualifier)
{
tokenInfo *const token = newToken ();
tokenInfo *const parent = newToken ();
int c;
do
{
c = skipToNonWhite ();
if (isident1 (c))
{
readIdentifier (token, c);
if (isType (token, TOKEN_NAME))
vStringCat (parent->name, token->name);
else
{
addParentClass (st, parent);
initToken (parent);
}
}
else if (c == qualifier)
vStringPut (parent->name, c);
else if (c == '<')
skipToMatch ("<>");
else if (isType (token, TOKEN_NAME))
{
addParentClass (st, parent);
initToken (parent);
}
} while (c != '{' && c != EOF);
cppUngetc (c);
deleteToken (parent);
deleteToken (token);
}
static void skipStatement (statementInfo *const st)
{
st->declaration = DECL_IGNORE;
skipToOneOf (";");
}
static void processAnnotation (statementInfo *const st)
{
st->declaration = DECL_ANNOTATION;
}
static void processInterface (statementInfo *const st)
{
st->declaration = DECL_INTERFACE;
}
static void checkIsClassEnum (statementInfo *const st, const declType decl)
{
if (! isInputLanguage (Lang_cpp) || st->declaration != DECL_ENUM)
st->declaration = decl;
}
static void processToken (tokenInfo *const token, statementInfo *const st)
{
switch (token->keyword) /* is it a reserved word? */
{
default: break;
case KEYWORD_NONE: processName (st); break;
case KEYWORD_ABSTRACT: st->implementation = IMP_ABSTRACT; break;
case KEYWORD_ATTRIBUTE: skipParens (); initToken (token); break;
case KEYWORD_BIND: st->declaration = DECL_BASE; break;
case KEYWORD_BIT: st->declaration = DECL_BASE; break;
case KEYWORD_CATCH: skipParens (); skipBraces (); break;
case KEYWORD_CHAR: st->declaration = DECL_BASE; break;
case KEYWORD_CLASS: checkIsClassEnum (st, DECL_CLASS); break;
case KEYWORD_CONST: st->declaration = DECL_BASE; break;
case KEYWORD_DOUBLE: st->declaration = DECL_BASE; break;
case KEYWORD_ENUM: st->declaration = DECL_ENUM; break;
case KEYWORD_EXTENDS: readParents (st, '.');
setToken (st, TOKEN_NONE); break;
case KEYWORD_FLOAT: st->declaration = DECL_BASE; break;
case KEYWORD_FUNCTION: st->declaration = DECL_BASE; break;
case KEYWORD_FRIEND: st->scope = SCOPE_FRIEND; break;
case KEYWORD_GOTO: skipStatement (st); break;
case KEYWORD_IMPLEMENTS:readParents (st, '.');
setToken (st, TOKEN_NONE); break;
case KEYWORD_IMPORT:
if (isInputLanguage (Lang_java))
readPackageOrNamespace (st, DECL_PACKAGEREF, TRUE);
else
skipStatement (st);
break;
case KEYWORD_INT: st->declaration = DECL_BASE; break;
case KEYWORD_INTEGER: st->declaration = DECL_BASE; break;
case KEYWORD_INTERFACE: processInterface (st); break;
case KEYWORD_LOCAL: setAccess (st, ACCESS_LOCAL); break;
case KEYWORD_LONG: st->declaration = DECL_BASE; break;
case KEYWORD_OPERATOR: readOperator (st); break;
case KEYWORD_MIXIN: st->declaration = DECL_MIXIN; break;
case KEYWORD_PRIVATE: setAccess (st, ACCESS_PRIVATE); break;
case KEYWORD_PROGRAM: st->declaration = DECL_PROGRAM; break;
case KEYWORD_PROTECTED: setAccess (st, ACCESS_PROTECTED); break;
case KEYWORD_PUBLIC: setAccess (st, ACCESS_PUBLIC); break;
case KEYWORD_RETURN: skipStatement (st); break;
case KEYWORD_SHORT: st->declaration = DECL_BASE; break;
case KEYWORD_SIGNED: st->declaration = DECL_BASE; break;
case KEYWORD_STRING: st->declaration = DECL_BASE; break;
case KEYWORD_STRUCT: checkIsClassEnum (st, DECL_STRUCT); break;
case KEYWORD_TASK: st->declaration = DECL_TASK; break;
case KEYWORD_THROWS: discardTypeList (token); break;
case KEYWORD_UNION: st->declaration = DECL_UNION; break;
case KEYWORD_UNSIGNED: st->declaration = DECL_BASE; break;
case KEYWORD_USING: st->declaration = DECL_USING; break;
case KEYWORD_VOID: st->declaration = DECL_BASE; break;
case KEYWORD_VOLATILE: st->declaration = DECL_BASE; break;
case KEYWORD_VERSION: readVersion(st); break;
case KEYWORD_VIRTUAL: st->implementation = IMP_VIRTUAL; break;
case KEYWORD_WCHAR_T: st->declaration = DECL_BASE; break;
case KEYWORD_TEMPLATE:
if (isInputLanguage (Lang_d))
st->declaration = DECL_TEMPLATE;
break;
case KEYWORD_NAMESPACE: readPackageOrNamespace (st, DECL_NAMESPACE, FALSE); break;
case KEYWORD_MODULE:
case KEYWORD_PACKAGE: readPackageOrNamespace (st, DECL_PACKAGE, FALSE); break;
case KEYWORD_EVENT:
if (isInputLanguage (Lang_csharp))
st->declaration = DECL_EVENT;
break;
case KEYWORD_ALIAS:
case KEYWORD_TYPEDEF:
reinitStatement (st, FALSE);
st->scope = SCOPE_TYPEDEF;
break;
case KEYWORD_EXTERN:
if (! isInputLanguage (Lang_csharp) || !st->gotName)
{
reinitStatement (st, FALSE);
st->scope = SCOPE_EXTERN;
st->declaration = DECL_BASE;
}
break;
case KEYWORD_STATIC:
if (! (isInputLanguage (Lang_java) || isInputLanguage (Lang_csharp)))
{
reinitStatement (st, FALSE);
st->scope = SCOPE_STATIC;
st->declaration = DECL_BASE;
}
break;
case KEYWORD_FOR:
case KEYWORD_FOREACH:
case KEYWORD_IF:
case KEYWORD_SWITCH:
case KEYWORD_WHILE:
{
int c = skipToNonWhite ();
if (c == '(')
skipToMatch ("()");
break;
}
}
}
/*
* Parenthesis handling functions
*/
static void restartStatement (statementInfo *const st)
{
tokenInfo *const save = newToken ();
tokenInfo *token = activeToken (st);
copyToken (save, token);
DebugStatement ( if (debug (DEBUG_PARSE)) printf ("<ES>");)
reinitStatement (st, FALSE);
token = activeToken (st);
copyToken (token, save);
deleteToken (save);
processToken (token, st);
}
/* Skips over a mem-initializer-list of a ctor-initializer, defined as:
*
* mem-initializer-list:
* mem-initializer, mem-initializer-list
*
* mem-initializer:
* [::] [nested-name-spec] class-name (...)
* identifier
*/
static void skipMemIntializerList (tokenInfo *const token)
{
int c;
do
{
c = skipToNonWhite ();
while (isident1 (c) || c == ':')
{
if (c != ':')
readIdentifier (token, c);
c = skipToNonWhite ();
}
if (c == '<')
{
skipToMatch ("<>");
c = skipToNonWhite ();
}
if (c == '(')
{
skipToMatch ("()");
c = skipToNonWhite ();
}
} while (c == ',');
cppUngetc (c);
}
static void skipMacro (statementInfo *const st)
{
tokenInfo *const prev2 = prevToken (st, 2);
if (isType (prev2, TOKEN_NAME))
retardToken (st);
skipToMatch ("()");
}
/* Skips over characters following the parameter list. This will be either
* non-ANSI style function declarations or C++ stuff. Our choices:
*
* C (K&R):
* int func ();
* int func (one, two) int one; float two; {...}
* C (ANSI):
* int func (int one, float two);
* int func (int one, float two) {...}
* C++:
* int foo (...) [const|volatile] [throw (...)];
* int foo (...) [const|volatile] [throw (...)] [ctor-initializer] {...}
* int foo (...) [const|volatile] [throw (...)] try [ctor-initializer] {...}
* catch (...) {...}
*/
static boolean skipPostArgumentStuff (
statementInfo *const st, parenInfo *const info)
{
tokenInfo *const token = activeToken (st);
unsigned int parameters = info->parameterCount;
unsigned int elementCount = 0;
boolean restart = FALSE;
boolean end = FALSE;
int c = skipToNonWhite ();
do
{
switch (c)
{
case ')': break;
case ':': skipMemIntializerList (token);break; /* ctor-initializer */
case '[': skipToMatch ("[]"); break;
case '=': cppUngetc (c); end = TRUE; break;
case '{': cppUngetc (c); end = TRUE; break;
case '}': cppUngetc (c); end = TRUE; break;
case '(':
if (elementCount > 0)
++elementCount;
skipToMatch ("()");
break;
case ';':
if (parameters == 0 || elementCount < 2)
{
cppUngetc (c);
end = TRUE;
}
else if (--parameters == 0)
end = TRUE;
break;
default:
if (isident1 (c))
{
readIdentifier (token, c);
switch (token->keyword)
{
case KEYWORD_ATTRIBUTE: skipParens (); break;
case KEYWORD_THROW: skipParens (); break;
case KEYWORD_IF: if (isInputLanguage (Lang_d)) skipParens (); break;
case KEYWORD_TRY: break;
case KEYWORD_NOEXCEPT: break;
case KEYWORD_CONST:
case KEYWORD_VOLATILE:
if (vStringLength (Signature) > 0)
{
vStringPut (Signature, ' ');
vStringCat (Signature, token->name);
}
break;
case KEYWORD_ALIAS:
case KEYWORD_CATCH:
case KEYWORD_CLASS:
case KEYWORD_EXPLICIT:
case KEYWORD_EXTERN:
case KEYWORD_FRIEND:
case KEYWORD_INLINE:
case KEYWORD_MUTABLE:
case KEYWORD_NAMESPACE:
case KEYWORD_NEW:
case KEYWORD_NEWCOV:
case KEYWORD_OPERATOR:
case KEYWORD_OVERLOAD:
case KEYWORD_PRIVATE:
case KEYWORD_PROTECTED:
case KEYWORD_PUBLIC:
case KEYWORD_STATIC:
case KEYWORD_TEMPLATE:
case KEYWORD_TYPEDEF:
case KEYWORD_TYPENAME:
case KEYWORD_USING:
case KEYWORD_VIRTUAL:
/* Never allowed within parameter declarations. */
restart = TRUE;
end = TRUE;
break;
default:
/* "override" and "final" are only keywords in the declaration of a virtual
* member function, so need to be handled specially, not as keywords */
if (isInputLanguage(Lang_cpp) && isType (token, TOKEN_NAME) &&
(strcmp ("override", vStringValue (token->name)) == 0 ||
strcmp ("final", vStringValue (token->name)) == 0))
;
else if (isType (token, TOKEN_NONE))
;
else if (info->isKnrParamList && info->parameterCount > 0)
++elementCount;
else
{
/* If we encounter any other identifier immediately
* following an empty parameter list, this is almost
* certainly one of those Microsoft macro "thingies"
* that the automatic source code generation sticks
* in. Terminate the current statement.
*/
restart = TRUE;
end = TRUE;
}
break;
}
}
}
if (! end)
{
c = skipToNonWhite ();
if (c == EOF)
end = TRUE;
}
} while (! end);
if (restart)
restartStatement (st);
else
setToken (st, TOKEN_NONE);
return (boolean) (c != EOF);
}
static void skipJavaThrows (statementInfo *const st)
{
tokenInfo *const token = activeToken (st);
int c = skipToNonWhite ();
if (isident1 (c))
{
readIdentifier (token, c);
if (token->keyword == KEYWORD_THROWS)
{
do
{
c = skipToNonWhite ();
if (isident1 (c))
{
readIdentifier (token, c);
c = skipToNonWhite ();
}
} while (c == '.' || c == ',');
}
}
cppUngetc (c);
setToken (st, TOKEN_NONE);
}
static void analyzePostParens (statementInfo *const st, parenInfo *const info)
{
const unsigned long inputLineNumber = getInputLineNumber ();
int c = skipToNonWhite ();
cppUngetc (c);
if (isOneOf (c, "{;,="))
;
else if (isInputLanguage (Lang_java)) {
if (!insideAnnotationBody(st)) {
skipJavaThrows (st);
}
} else {
if (! skipPostArgumentStuff (st, info))
{
verbose (
"%s: confusing argument declarations beginning at line %lu\n",
getInputFileName (), inputLineNumber);
longjmp (Exception, (int) ExceptionFormattingError);
}
}
}
static boolean languageSupportsGenerics (void)
{
return (boolean) (isInputLanguage (Lang_cpp) || isInputLanguage (Lang_csharp) ||
isInputLanguage (Lang_java));
}
static void processAngleBracket (void)
{
int c = cppGetc ();
if (c == '>') {
/* already found match for template */
} else if (languageSupportsGenerics () && c != '<' && c != '=') {
/* this is a template */
cppUngetc (c);
if (isInputLanguage (Lang_cpp))
skipCppTemplateParameterList ();
else
skipToMatch ("<>");
} else if (c == '<') {
/* skip "<<" or "<<=". */
c = cppGetc ();
if (c != '=') {
cppUngetc (c);
}
} else {
cppUngetc (c);
}
}
static void parseJavaAnnotation (statementInfo *const st)
{
/*
* @Override
* @Target(ElementType.METHOD)
* @SuppressWarnings(value = "unchecked")
*
* But watch out for "@interface"!
*/
tokenInfo *const token = activeToken (st);
int c = skipToNonWhite ();
readIdentifier (token, c);
if (token->keyword == KEYWORD_INTERFACE)
{
/* Oops. This was actually "@interface" defining a new annotation. */
processAnnotation(st);
}
else
{
/* Bug #1691412: skip any annotation arguments. */
skipParens ();
}
}
static int parseParens (statementInfo *const st, parenInfo *const info)
{
tokenInfo *const token = activeToken (st);
unsigned int identifierCount = 0;
unsigned int depth = 1;
boolean firstChar = TRUE;
int nextChar = '\0';
CollectingSignature = TRUE;
vStringClear (Signature);
vStringPut (Signature, '(');
info->parameterCount = 1;
do
{
int c = skipToNonWhite ();
vStringPut (Signature, c);
switch (c)
{
case '^':
break;
case '&':
case '*':
info->isPointer = TRUE;
info->isKnrParamList = FALSE;
if (identifierCount == 0)
info->isParamList = FALSE;
initToken (token);
break;
case ':':
info->isKnrParamList = FALSE;
break;
case '.':
info->isNameCandidate = FALSE;
c = cppGetc ();
if (c != '.')
{
cppUngetc (c);
info->isKnrParamList = FALSE;
}
else
{
c = cppGetc ();
if (c != '.')
{
cppUngetc (c);
info->isKnrParamList = FALSE;
}
else
vStringCatS (Signature, "..."); /* variable arg list */
}
break;
case ',':
info->isNameCandidate = FALSE;
if (info->isKnrParamList)
{
++info->parameterCount;
identifierCount = 0;
}
break;
case '=':
info->isKnrParamList = FALSE;
info->isNameCandidate = FALSE;
if (firstChar)
{
info->isParamList = FALSE;
skipMacro (st);
depth = 0;
}
break;
case '[':
info->isKnrParamList = FALSE;
skipToMatch ("[]");
break;
case '<':
info->isKnrParamList = FALSE;
processAngleBracket ();
break;
case ')':
if (firstChar)
info->parameterCount = 0;
--depth;
break;
case '(':
info->isKnrParamList = FALSE;
if (firstChar)
{
info->isNameCandidate = FALSE;
cppUngetc (c);
vStringClear (Signature);
skipMacro (st);
depth = 0;
vStringChop (Signature);
}
else if (isType (token, TOKEN_PAREN_NAME))
{
c = skipToNonWhite ();
if (c == '*') /* check for function pointer */
{
skipToMatch ("()");
c = skipToNonWhite ();
if (c == '(')
skipToMatch ("()");
else
cppUngetc (c);
}
else
{
cppUngetc (c);
cppUngetc ('(');
info->nestedArgs = TRUE;
}
}
else
++depth;
break;
default:
if (c == '@' && isInputLanguage (Lang_java))
{
parseJavaAnnotation(st);
}
else if (isident1 (c))
{
if (++identifierCount > 1)
info->isKnrParamList = FALSE;
readIdentifier (token, c);
if (isType (token, TOKEN_NAME) && info->isNameCandidate)
token->type = TOKEN_PAREN_NAME;
else if (isType (token, TOKEN_KEYWORD))
{
if (token->keyword != KEYWORD_CONST &&
token->keyword != KEYWORD_VOLATILE)
{
info->isKnrParamList = FALSE;
info->isNameCandidate = FALSE;
}
}
}
else
{
info->isParamList = FALSE;
info->isKnrParamList = FALSE;
info->isNameCandidate = FALSE;
info->invalidContents = TRUE;
}
break;
}
firstChar = FALSE;
} while (! info->nestedArgs && depth > 0 &&
(info->isKnrParamList || info->isNameCandidate));
if (! info->nestedArgs) while (depth > 0)
{
skipToMatch ("()");
--depth;
}
if (! info->isNameCandidate)
initToken (token);
vStringTerminate (Signature);
if (info->isKnrParamList)
vStringClear (Signature);
CollectingSignature = FALSE;
return nextChar;
}
static void initParenInfo (parenInfo *const info)
{
info->isPointer = FALSE;
info->isParamList = TRUE;
info->isKnrParamList = isInputLanguage (Lang_c);
info->isNameCandidate = TRUE;
info->invalidContents = FALSE;
info->nestedArgs = FALSE;
info->parameterCount = 0;
}
static void analyzeParens (statementInfo *const st)
{
tokenInfo *const prev = prevToken (st, 1);
const tokenInfo *const prev2 = prevToken (st, 2);
if (
st->inFunction &&
!st->assignment &&
!(
/* C++: Accept Type var(...) as variable; */
isInputLanguage(Lang_cpp) &&
isType(prev,TOKEN_NAME) &&
isType(prev2,TOKEN_NAME)
)
)
{
st->notVariable = TRUE;
}
if (! isType (prev, TOKEN_NONE)) /* in case of ignored enclosing macros */
{
tokenInfo *const token = activeToken (st);
parenInfo info;
int c;
initParenInfo (&info);
parseParens (st, &info);
c = skipToNonWhite ();
cppUngetc (c);
if (info.invalidContents)
{
/* FIXME: This breaks parsing of variable instantiations that have
constants as parameters: Type var(0) or Type var("..."). */
reinitStatement (st, FALSE);
}
else if (info.isNameCandidate && isType (token, TOKEN_PAREN_NAME) &&
! st->gotParenName &&
(! info.isParamList || ! st->haveQualifyingName ||
c == '(' ||
(c == '=' && st->implementation != IMP_VIRTUAL && !isInputLanguage (Lang_cpp)) ||
(st->declaration == DECL_NONE && isOneOf (c, ",;"))))
{
token->type = TOKEN_NAME;
processName (st);
st->gotParenName = TRUE;
if (! (c == '(' && info.nestedArgs))
st->isPointer = info.isPointer;
if (isInputLanguage(Lang_d) && c == '(' && isType (prev, TOKEN_NAME))
{
st->declaration = DECL_FUNCTION_TEMPLATE;
copyToken (st->blockName, prev);
}
}
else if (! st->gotArgs && info.isParamList)
{
st->gotArgs = TRUE;
setToken (st, TOKEN_ARGS);
advanceToken (st);
if (st->scope != SCOPE_TYPEDEF)
analyzePostParens (st, &info);
}
else
setToken (st, TOKEN_NONE);
}
}
/*
* Token parsing functions
*/
static void addContext (statementInfo *const st, const tokenInfo* const token)
{
if (isType (token, TOKEN_NAME))
{
if (vStringLength (st->context->name) > 0)
{
if (isInputLanguage (Lang_c) || isInputLanguage (Lang_cpp))
vStringCatS (st->context->name, "::");
else if (isInputLanguage (Lang_java) || isInputLanguage (Lang_csharp) ||
isInputLanguage (Lang_d))
vStringCatS (st->context->name, ".");
}
vStringCat (st->context->name, token->name);
st->context->type = TOKEN_NAME;
}
}
static boolean inheritingDeclaration (declType decl)
{
/* enum base types */
if (decl == DECL_ENUM)
{
return (boolean) (isInputLanguage (Lang_cpp) || isInputLanguage (Lang_csharp) ||
isInputLanguage (Lang_d));
}
return (boolean) (
decl == DECL_CLASS ||
decl == DECL_STRUCT ||
decl == DECL_INTERFACE);
}
static void processColon (statementInfo *const st)
{
int c = (isInputLanguage (Lang_cpp) ? cppGetc () : skipToNonWhite ());
const boolean doubleColon = (boolean) (c == ':');
if (doubleColon)
{
setToken (st, TOKEN_DOUBLE_COLON);
st->haveQualifyingName = FALSE;
}
else
{
cppUngetc (c);
if ((isInputLanguage (Lang_cpp) || isInputLanguage (Lang_csharp) || isInputLanguage (Lang_d)) &&
inheritingDeclaration (st->declaration))
{
readParents (st, ':');
}
else if (parentDecl (st) == DECL_STRUCT)
{
c = skipToOneOf (",;");
if (c == ',')
setToken (st, TOKEN_COMMA);
else if (c == ';')
setToken (st, TOKEN_SEMICOLON);
}
else
{
const tokenInfo *const prev = prevToken (st, 1);
const tokenInfo *const prev2 = prevToken (st, 2);
if (prev->keyword == KEYWORD_DEFAULT ||
prev2->keyword == KEYWORD_CASE)
{
reinitStatement (st, FALSE);
}
else if (st->parent != NULL)
{
if (prevToken (st->parent, 1)->keyword != KEYWORD_SWITCH)
makeTag (prev, st, FALSE, TAG_LABEL);
reinitStatement (st, FALSE);
}
}
}
}
/* Skips over any initializing value which may follow an '=' character in a
* variable definition.
*/
static int skipInitializer (statementInfo *const st)
{
boolean done = FALSE;
int c;
while (! done)
{
c = skipToNonWhite ();
if (c == EOF)
longjmp (Exception, (int) ExceptionFormattingError);
else switch (c)
{
case ',':
case ';': done = TRUE; break;
case '0':
if (st->implementation == IMP_VIRTUAL)
st->implementation = IMP_PURE_VIRTUAL;
break;
case '[': skipToMatch ("[]"); break;
case '(': skipToMatch ("()"); break;
case '{': skipToMatch ("{}"); break;
case '<': processAngleBracket(); break;
case '}':
if (insideEnumBody (st))
done = TRUE;
else if (! isBraceFormat ())
{
verbose ("%s: unexpected closing brace at line %lu\n",
getInputFileName (), getInputLineNumber ());
longjmp (Exception, (int) ExceptionBraceFormattingError);
}
break;
default: break;
}
}
return c;
}
static void processInitializer (statementInfo *const st)
{
const boolean inEnumBody = insideEnumBody (st);
int c = cppGetc ();
if (c != '=')
{
cppUngetc (c);
c = skipInitializer (st);
st->assignment = TRUE;
if (c == ';')
setToken (st, TOKEN_SEMICOLON);
else if (c == ',')
setToken (st, TOKEN_COMMA);
else if (c == '}' && inEnumBody)
{
cppUngetc (c);
setToken (st, TOKEN_COMMA);
}
if (st->scope == SCOPE_EXTERN)
st->scope = SCOPE_GLOBAL;
}
}
static void parseIdentifier (statementInfo *const st, const int c)
{
tokenInfo *const token = activeToken (st);
readIdentifier (token, c);
if (! isType (token, TOKEN_NONE))
processToken (token, st);
}
static void parseGeneralToken (statementInfo *const st, const int c)
{
const tokenInfo *const prev = prevToken (st, 1);
if (isident1 (c) || (isInputLanguage (Lang_java) && isHighChar (c)))
{
parseIdentifier (st, c);
if (isType (st->context, TOKEN_NAME) &&
isType (activeToken (st), TOKEN_NAME) && isType (prev, TOKEN_NAME))
{
initToken (st->context);
}
}
else if (c == '.' || c == '-')
{
if (! st->assignment)
st->notVariable = TRUE;
if (c == '-')
{
int c2 = cppGetc ();
if (c2 != '>')
cppUngetc (c2);
}
}
else if (c == '!' || c == '>')
{
int c2 = cppGetc ();
if (c2 != '=')
cppUngetc (c2);
}
else if (c == '@' && isInputLanguage (Lang_java))
{
parseJavaAnnotation (st);
}
else if (isExternCDecl (st, c))
{
st->declaration = DECL_NOMANGLE;
st->scope = SCOPE_GLOBAL;
} else if (c == STRING_SYMBOL) {
setToken(st, TOKEN_NONE);
}
}
/* Reads characters from the pre-processor and assembles tokens, setting
* the current statement state.
*/
static void nextToken (statementInfo *const st)
{
tokenInfo *token;
do
{
int c = skipToNonWhite ();
switch (c)
{
case EOF: longjmp (Exception, (int) ExceptionEOF); break;
case '(': analyzeParens (st); break;
case '<': processAngleBracket (); break;
case '*': st->haveQualifyingName = FALSE; break;
case ',': setToken (st, TOKEN_COMMA); break;
case ':': processColon (st); break;
case ';': setToken (st, TOKEN_SEMICOLON); break;
case '=': processInitializer (st); break;
case '[': skipToMatch ("[]"); break;
case '{': setToken (st, TOKEN_BRACE_OPEN); break;
case '}': setToken (st, TOKEN_BRACE_CLOSE); break;
default: parseGeneralToken (st, c); break;
}
token = activeToken (st);
} while (isType (token, TOKEN_NONE));
}
/*
* Scanning support functions
*/
static statementInfo *CurrentStatement = NULL;
static statementInfo *newStatement (statementInfo *const parent)
{
statementInfo *const st = xMalloc (1, statementInfo);
unsigned int i;
for (i = 0 ; i < (unsigned int) NumTokens ; ++i)
st->token [i] = newToken ();
st->context = newToken ();
st->blockName = newToken ();
st->parentClasses = vStringNew ();
initStatement (st, parent);
CurrentStatement = st;
return st;
}
static void deleteStatement (void)
{
statementInfo *const st = CurrentStatement;
statementInfo *const parent = st->parent;
unsigned int i;
for (i = 0 ; i < (unsigned int) NumTokens ; ++i)
{
deleteToken (st->token [i]); st->token [i] = NULL;
}
deleteToken (st->blockName); st->blockName = NULL;
deleteToken (st->context); st->context = NULL;
vStringDelete (st->parentClasses); st->parentClasses = NULL;
eFree (st);
CurrentStatement = parent;
}
static void deleteAllStatements (void)
{
while (CurrentStatement != NULL)
deleteStatement ();
}
static boolean isStatementEnd (const statementInfo *const st)
{
const tokenInfo *const token = activeToken (st);
boolean isEnd;
if (isType (token, TOKEN_SEMICOLON))
isEnd = TRUE;
else if (isType (token, TOKEN_BRACE_CLOSE))
/* Java and C# do not require semicolons to end a block. Neither do C++
* namespaces. All other blocks require a semicolon to terminate them.
*/
isEnd = (boolean) (isInputLanguage (Lang_java) || isInputLanguage (Lang_csharp) ||
isInputLanguage (Lang_d) || ! isContextualStatement (st));
else
isEnd = FALSE;
return isEnd;
}
static void checkStatementEnd (statementInfo *const st)
{
const tokenInfo *const token = activeToken (st);
if (isType (token, TOKEN_COMMA))
reinitStatement (st, TRUE);
else if (isStatementEnd (st))
{
DebugStatement ( if (debug (DEBUG_PARSE)) printf ("<ES>"); )
reinitStatement (st, FALSE);
cppEndStatement ();
}
else
{
cppBeginStatement ();
advanceToken (st);
}
}
static void nest (statementInfo *const st, const unsigned int nestLevel)
{
switch (st->declaration)
{
case DECL_TEMPLATE:
case DECL_VERSION:
st->inFunction = FALSE;
case DECL_CLASS:
case DECL_ENUM:
case DECL_INTERFACE:
case DECL_NAMESPACE:
case DECL_NOMANGLE:
case DECL_PRIVATE:
case DECL_PROTECTED:
case DECL_PUBLIC:
case DECL_STRUCT:
case DECL_UNION:
case DECL_ANNOTATION:
createTags (nestLevel, st);
break;
case DECL_FUNCTION:
case DECL_TASK:
st->inFunction = TRUE;
/* fall through */
default:
if (includeTag (TAG_LOCAL, FALSE) || includeTag (TAG_LABEL, FALSE))
createTags (nestLevel, st);
else
skipToMatch ("{}");
break;
}
advanceToken (st);
setToken (st, TOKEN_BRACE_CLOSE);
}
static void tagCheck (statementInfo *const st)
{
const tokenInfo *const token = activeToken (st);
const tokenInfo *const prev = prevToken (st, 1);
const tokenInfo *const prev2 = prevToken (st, 2);
switch (token->type)
{
case TOKEN_NAME:
if (insideEnumBody (st))
qualifyEnumeratorTag (st, token);
if (st->declaration == DECL_MIXIN)
makeTag (token, st, FALSE, TAG_MIXIN);
if (isInputLanguage (Lang_vera) && insideInterfaceBody (st))
{
/* Quoted from
http://www.asic-world.com/vera/hdl1.html#Interface_Declaration
------------------------------------------------
interface interface_name
{
signal_direction [signal_width] signal_name signal_type
[skew] [depth value][vca q_value][force][hdl_node "hdl_path"];
}
Where
signal_direction : This can be one of the following
input : ...
output : ...
inout : ...
signal_width : The signal_width is a range specifying the width of
a vector signal. It must be in the form [msb:lsb].
Interface signals can have any integer lsb value,
even a negative value. The default width is 1.
signal_name : The signal_name identifies the signal being defined.
It is the Vera name for the HDL signal being connected.
signal_type : There are many signals types, most commonly used one are
NHOLD : ...
PHOLD : ...
PHOLD NHOLD : ...
NSAMPLE : ...
PSAMPLE : ...
PSAMPLE NSAMPLE : ...
CLOCK : ...
PSAMPLE PHOLD : ...
NSAMPLE NHOLD : ...
PSAMPLE PHOLD NSAMPLE NHOLD : ...
------------------------------------------------
We want to capture "signal_name" here.
*/
if (( isType (prev, TOKEN_KEYWORD)
&& isSignalDirection(prev) ) ||
( isType (prev2, TOKEN_KEYWORD)
&& isSignalDirection(prev) ))
makeTag (token, st, FALSE, TAG_SIGNAL);
}
break;
#if 0
case TOKEN_PACKAGE:
if (st->haveQualifyingName)
makeTag (token, st, FALSE, TAG_PACKAGE);
break;
#endif
case TOKEN_BRACE_OPEN:
if (isType (prev, TOKEN_ARGS))
{
if (st->declaration == DECL_TEMPLATE)
qualifyBlockTag (st, prev2);
else if (st->declaration == DECL_FUNCTION_TEMPLATE) {
qualifyFunctionTag (st, st->blockName);
}
else if (st->haveQualifyingName)
{
if (isType (prev2, TOKEN_NAME))
copyToken (st->blockName, prev2);
/* D declaration templates */
if (isInputLanguage (Lang_d) &&
(st->declaration == DECL_CLASS || st->declaration == DECL_STRUCT ||
st->declaration == DECL_INTERFACE || st->declaration == DECL_UNION))
qualifyBlockTag (st, prev2);
else if(isInputLanguage (Lang_cpp) && st->inFunction)
{
/* Ignore. C/C++ allows nested function prototypes but
this code actually catches far too many of them.
Better some missing tags than a lot of false positives. */
}
else
{
if (! isInputLanguage (Lang_vera))
st->declaration = DECL_FUNCTION;
qualifyFunctionTag (st, prev2);
}
}
}
else if (isContextualStatement (st) ||
st->declaration == DECL_VERSION ||
st->declaration == DECL_PROGRAM)
{
const tokenInfo *name_token = prev;
/* C++ 11 allows class <name> final { ... } */
if (isInputLanguage (Lang_cpp) && isType (prev, TOKEN_NAME) &&
strcmp("final", vStringValue(prev->name)) == 0 &&
isType(prev2, TOKEN_NAME))
{
name_token = prev2;
}
if (isType (name_token, TOKEN_NAME))
copyToken (st->blockName, name_token);
else
{
/* For an anonymous struct or union we use a unique ID
* a number, so that the members can be found.
*/
char buf [20]; /* length of "_anon" + digits + null */
sprintf (buf, "__anon%d", ++AnonymousID);
vStringCopyS (st->blockName->name, buf);
st->blockName->type = TOKEN_NAME;
st->blockName->keyword = KEYWORD_NONE;
}
qualifyBlockTag (st, name_token);
}
else if (isInputLanguage (Lang_csharp))
makeTag (prev, st, FALSE, TAG_PROPERTY);
break;
case TOKEN_KEYWORD:
if (token->keyword == KEYWORD_DEFAULT && isType(prev, TOKEN_ARGS) && insideAnnotationBody(st)) {
qualifyFunctionDeclTag(st, prev2);
}
break;
case TOKEN_SEMICOLON:
case TOKEN_COMMA:
if (insideEnumBody (st))
;
else if (isType (prev, TOKEN_NAME))
{
if (isContextualKeyword (prev2))
makeTag (prev, st, TRUE, TAG_EXTERN_VAR);
else
qualifyVariableTag (st, prev);
}
else if (isType (prev, TOKEN_ARGS) && isType (prev2, TOKEN_NAME))
{
if (st->isPointer || st->inFunction)
{
/* If it looks like a pointer or we are in a function body then
it's far more likely to be a variable. */
qualifyVariableTag (st, prev2);
}
else
qualifyFunctionDeclTag (st, prev2);
}
if (isInputLanguage (Lang_java) && token->type == TOKEN_SEMICOLON && insideEnumBody (st))
{
/* In Java, after an initial enum-like part,
* a semicolon introduces a class-like part.
* See Bug #1730485 for the full rationale. */
st->parent->declaration = DECL_CLASS;
}
break;
default: break;
}
}
/* Parses the current file and decides whether to write out and tags that
* are discovered.
*/
static void createTags (const unsigned int nestLevel,
statementInfo *const parent)
{
statementInfo *const st = newStatement (parent);
DebugStatement ( if (nestLevel > 0) debugParseNest (TRUE, nestLevel); )
while (TRUE)
{
tokenInfo *token;
nextToken (st);
token = activeToken (st);
if (isType (token, TOKEN_BRACE_CLOSE))
{
if (nestLevel > 0)
break;
else
{
verbose ("%s: unexpected closing brace at line %lu\n",
getInputFileName (), getInputLineNumber ());
longjmp (Exception, (int) ExceptionBraceFormattingError);
}
}
else if (isType (token, TOKEN_DOUBLE_COLON))
{
addContext (st, prevToken (st, 1));
advanceToken (st);
}
else
{
tagCheck (st);
if (isType (token, TOKEN_BRACE_OPEN))
nest (st, nestLevel + 1);
checkStatementEnd (st);
}
}
deleteStatement ();
DebugStatement ( if (nestLevel > 0) debugParseNest (FALSE, nestLevel - 1); )
}
static rescanReason findCTags (const unsigned int passCount)
{
exception_t exception;
rescanReason rescan;
kindOption *kind_for_define = NULL;
kindOption *kind_for_header = NULL;
int role_for_macro_undef = ROLE_INDEX_DEFINITION;
int role_for_header_system = ROLE_INDEX_DEFINITION;
int role_for_header_local = ROLE_INDEX_DEFINITION;
Assert (passCount < 3);
AnonymousID = 0;
if (isInputLanguage (Lang_c) || isInputLanguage (Lang_cpp))
{
kind_for_define = CKinds+CK_DEFINE;
kind_for_header = CKinds+CK_HEADER;
role_for_macro_undef = CR_MACRO_UNDEF;
role_for_header_system = CR_HEADER_SYSTEM;
role_for_header_local = CR_HEADER_LOCAL;
}
else if (isInputLanguage (Lang_vera))
{
kind_for_define = VeraKinds+VK_DEFINE;
kind_for_header = VeraKinds+VK_HEADER;
role_for_macro_undef = VR_MACRO_UNDEF;
role_for_header_system = VR_HEADER_SYSTEM;
role_for_header_local = VR_HEADER_LOCAL;
}
cppInit ((boolean) (passCount > 1), isInputLanguage (Lang_csharp), isInputLanguage(Lang_cpp),
isInputLanguage(Lang_vera),
kind_for_define, role_for_macro_undef,
kind_for_header, role_for_header_system, role_for_header_local);
Signature = vStringNew ();
exception = (exception_t) setjmp (Exception);
rescan = RESCAN_NONE;
if (exception == ExceptionNone)
createTags (0, NULL);
else
{
deleteAllStatements ();
if (exception == ExceptionBraceFormattingError && passCount == 1)
{
rescan = RESCAN_FAILED;
verbose ("%s: retrying file with fallback brace matching algorithm\n",
getInputFileName ());
}
}
vStringDelete (Signature);
cppTerminate ();
return rescan;
}
static void buildKeywordHash (const langType language, unsigned int idx)
{
const size_t count = ARRAY_SIZE (KeywordTable);
size_t i;
for (i = 0 ; i < count ; ++i)
{
const keywordDesc* const p = &KeywordTable [i];
if (p->isValid [idx])
addKeyword (p->name, language, (int) p->id);
}
}
static void initializeCParser (const langType language)
{
Lang_c = language;
buildKeywordHash (language, 0);
}
static void initializeCppParser (const langType language)
{
Lang_cpp = language;
buildKeywordHash (language, 1);
}
static void initializeCsharpParser (const langType language)
{
Lang_csharp = language;
buildKeywordHash (language, 2);
}
static void initializeDParser (const langType language)
{
Lang_d = language;
buildKeywordHash (language, 3);
}
static void initializeJavaParser (const langType language)
{
Lang_java = language;
buildKeywordHash (language, 4);
}
static void initializeVeraParser (const langType language)
{
Lang_vera = language;
buildKeywordHash (language, 5);
}
extern parserDefinition* CParser (void)
{
static const char *const extensions [] = { "c", NULL };
parserDefinition* def = parserNew ("C");
def->kinds = CKinds;
def->kindCount = ARRAY_SIZE (CKinds);
def->extensions = extensions;
def->parser2 = findCTags;
def->initialize = initializeCParser;
return def;
}
extern parserDefinition* DParser (void)
{
static const char *const extensions [] = { "d", "di", NULL };
parserDefinition* def = parserNew ("D");
def->kinds = DKinds;
def->kindCount = ARRAY_SIZE (DKinds);
def->extensions = extensions;
def->parser2 = findCTags;
def->initialize = initializeDParser;
return def;
}
extern parserDefinition* CppParser (void)
{
static const char *const extensions [] = {
"c++", "cc", "cp", "cpp", "cxx",
"h", "h++", "hh", "hp", "hpp", "hxx", "inl",
#ifndef CASE_INSENSITIVE_FILENAMES
"C", "H",
#endif
NULL
};
static selectLanguage selectors[] = { selectByObjectiveCKeywords,
NULL };
parserDefinition* def = parserNew ("C++");
def->kinds = CKinds;
def->kindCount = ARRAY_SIZE (CKinds);
def->extensions = extensions;
def->parser2 = findCTags;
def->initialize = initializeCppParser;
def->selectLanguage = selectors;
return def;
}
extern parserDefinition* CsharpParser (void)
{
static const char *const extensions [] = { "cs", NULL };
static const char *const aliases [] = { "csharp", NULL };
parserDefinition* def = parserNew ("C#");
def->kinds = CsharpKinds;
def->kindCount = ARRAY_SIZE (CsharpKinds);
def->extensions = extensions;
def->aliases = aliases;
def->parser2 = findCTags;
def->initialize = initializeCsharpParser;
return def;
}
extern parserDefinition* JavaParser (void)
{
static const char *const extensions [] = { "java", NULL };
parserDefinition* def = parserNew ("Java");
def->kinds = JavaKinds;
def->kindCount = ARRAY_SIZE (JavaKinds);
def->extensions = extensions;
def->parser2 = findCTags;
def->initialize = initializeJavaParser;
return def;
}
extern parserDefinition* VeraParser (void)
{
static const char *const extensions [] = { "vr", "vri", "vrh", NULL };
parserDefinition* def = parserNew ("Vera");
def->kinds = VeraKinds;
def->kindCount = ARRAY_SIZE (VeraKinds);
def->extensions = extensions;
def->parser2 = findCTags;
def->initialize = initializeVeraParser;
return def;
}
/* vi:set tabstop=4 shiftwidth=4 noexpandtab: */
| 1 | 13,214 | If I understand the patch correctly the `else` is no longer required. | universal-ctags-ctags | c |
@@ -11,14 +11,14 @@ import (
var (
dealsSearchCount uint64
- addToBlacklist bool
+ blacklistTypeStr string
crNewDurationFlag string
crNewPriceFlag string
)
func init() {
dealListCmd.PersistentFlags().Uint64Var(&dealsSearchCount, "limit", 10, "Deals count to show")
- dealCloseCmd.PersistentFlags().BoolVar(&addToBlacklist, "blacklist", false, "Add counterparty to blacklist")
+ dealCloseCmd.PersistentFlags().StringVar(&blacklistTypeStr, "blacklist", "none", "Whom to add to blacklist (worker, master or neither)")
changeRequestCreateCmd.PersistentFlags().StringVar(&crNewDurationFlag, "new-duration", "", "Propose new duration for a deal")
changeRequestCreateCmd.PersistentFlags().StringVar(&crNewPriceFlag, "new-price", "", "Propose new price for a deal")
| 1 | package commands
import (
"os"
"time"
pb "github.com/sonm-io/core/proto"
"github.com/sonm-io/core/util"
"github.com/spf13/cobra"
)
var (
dealsSearchCount uint64
addToBlacklist bool
crNewDurationFlag string
crNewPriceFlag string
)
func init() {
dealListCmd.PersistentFlags().Uint64Var(&dealsSearchCount, "limit", 10, "Deals count to show")
dealCloseCmd.PersistentFlags().BoolVar(&addToBlacklist, "blacklist", false, "Add counterparty to blacklist")
changeRequestCreateCmd.PersistentFlags().StringVar(&crNewDurationFlag, "new-duration", "", "Propose new duration for a deal")
changeRequestCreateCmd.PersistentFlags().StringVar(&crNewPriceFlag, "new-price", "", "Propose new price for a deal")
changeRequestsRoot.AddCommand(
changeRequestCreateCmd,
changeRequestApproveCmd,
changeRequestCancelCmd,
)
dealRootCmd.AddCommand(
dealListCmd,
dealStatusCmd,
dealOpenCmd,
dealQuickBuyCmd,
dealCloseCmd,
changeRequestsRoot,
)
}
var dealRootCmd = &cobra.Command{
Use: "deal",
Short: "Manage deals",
}
var dealListCmd = &cobra.Command{
Use: "list",
Short: "Show your active deals",
PreRun: loadKeyStoreWrapper,
Run: func(cmd *cobra.Command, _ []string) {
ctx, cancel := newTimeoutContext()
defer cancel()
dealer, err := newDealsClient(ctx)
if err != nil {
showError(cmd, "Cannot create client connection", err)
os.Exit(1)
}
req := &pb.Count{Count: dealsSearchCount}
deals, err := dealer.List(ctx, req)
if err != nil {
showError(cmd, "Cannot get deals list", err)
os.Exit(1)
}
printDealsList(cmd, deals.GetDeal())
},
}
var dealStatusCmd = &cobra.Command{
Use: "status <deal_id>",
Short: "Show deal status",
Args: cobra.MinimumNArgs(1),
PreRun: loadKeyStoreIfRequired,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := newTimeoutContext()
defer cancel()
dealer, err := newDealsClient(ctx)
if err != nil {
showError(cmd, "Cannot create client connection", err)
os.Exit(1)
}
id, err := pb.NewBigIntFromString(args[0])
if err != nil {
showError(cmd, "Cannot convert arg to number", err)
os.Exit(1)
}
reply, err := dealer.Status(ctx, id)
if err != nil {
showError(cmd, "Cannot get deal info", err)
os.Exit(1)
}
changeRequests, _ := dealer.ChangeRequestsList(ctx, id)
printDealInfo(cmd, reply, changeRequests)
},
}
var dealOpenCmd = &cobra.Command{
Use: "open <ask_id> <bid_id>",
Short: "Open deal with given orders",
Args: cobra.MinimumNArgs(2),
PreRun: loadKeyStoreWrapper,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := newTimeoutContext()
defer cancel()
askID, err := util.ParseBigInt(args[0])
if err != nil {
// do not wraps error with human-readable text, the error text is self-explainable.
showError(cmd, err.Error(), nil)
os.Exit(1)
}
bidID, err := util.ParseBigInt(args[1])
if err != nil {
showError(cmd, err.Error(), nil)
os.Exit(1)
}
deals, err := newDealsClient(ctx)
if err != nil {
showError(cmd, "Cannot create blockchain connection", err)
os.Exit(1)
}
deal, err := deals.Open(ctx, &pb.OpenDealRequest{
BidID: pb.NewBigInt(bidID),
AskID: pb.NewBigInt(askID),
})
if err != nil {
showError(cmd, "Cannot open deal", err)
os.Exit(1)
}
printID(cmd, deal.GetId().Unwrap().String())
},
}
var dealQuickBuyCmd = &cobra.Command{
Use: "quick-buy <ask_id>",
Short: "Copy given ASK order with BID type and open a deal with this orders",
Args: cobra.MinimumNArgs(1),
PreRun: loadKeyStoreWrapper,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := newTimeoutContext()
defer cancel()
deals, err := newDealsClient(ctx)
if err != nil {
showError(cmd, "Cannot create client connection", err)
os.Exit(1)
}
id, err := pb.NewBigIntFromString(args[0])
if err != nil {
showError(cmd, "Cannot convert arg to number", err)
os.Exit(1)
}
deal, err := deals.QuickBuy(ctx, id)
if err != nil {
showError(cmd, "Cannot perform quick buy on given order", err)
os.Exit(1)
}
printDealInfo(cmd, &pb.DealInfoReply{Deal: deal}, nil)
},
}
var dealCloseCmd = &cobra.Command{
Use: "close <deal_id>",
Short: "Close given deal",
Args: cobra.MinimumNArgs(1),
PreRun: loadKeyStoreIfRequired,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := newTimeoutContext()
defer cancel()
dealer, err := newDealsClient(ctx)
if err != nil {
showError(cmd, "Cannot create client connection", err)
os.Exit(1)
}
id, err := util.ParseBigInt(args[0])
if err != nil {
showError(cmd, "Cannot convert arg to number", err)
os.Exit(1)
}
_, err = dealer.Finish(ctx, &pb.DealFinishRequest{
Id: pb.NewBigInt(id),
AddToBlacklist: addToBlacklist,
})
if err != nil {
showError(cmd, "Cannot finish deal", err)
os.Exit(1)
}
showOk(cmd)
},
}
var changeRequestsRoot = &cobra.Command{
Use: "change-request",
Short: "Request changes for deals",
}
var changeRequestCreateCmd = &cobra.Command{
Use: "create <deal_id>",
// space is added to align `usage` and `example` output into cobra's help message
Example: " sonmcli deal change-request create 123 --new-duration=10h --new-price=0.3USD/h",
Short: "Request changes for given deal",
Args: cobra.MinimumNArgs(1),
PreRun: loadKeyStoreIfRequired,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := newTimeoutContext()
defer cancel()
dealer, err := newDealsClient(ctx)
if err != nil {
showError(cmd, "Cannot create client connection", err)
os.Exit(1)
}
id, err := util.ParseBigInt(args[0])
if err != nil {
showError(cmd, "Cannot convert arg to id", err)
os.Exit(1)
}
durationRaw := cmd.Flag("new-duration").Value.String()
priceRaw := cmd.Flag("new-price").Value.String()
// check that at least one flag is present
if len(durationRaw) == 0 && len(priceRaw) == 0 {
showError(cmd, "Please specify at least one flag: --new-duration or --new-price", nil)
os.Exit(1)
}
var newPrice = &pb.Price{}
var newDuration time.Duration
if len(durationRaw) > 0 {
newDuration, err = time.ParseDuration(durationRaw)
if err != nil {
showError(cmd, "Cannot convert flag value to duration", err)
os.Exit(1)
}
}
if len(priceRaw) > 0 {
if err := newPrice.LoadFromString(priceRaw); err != nil {
showError(cmd, "Cannot convert flag value to price", err)
os.Exit(1)
}
}
req := &pb.DealChangeRequest{
DealID: pb.NewBigInt(id),
Duration: uint64(newDuration.Seconds()),
Price: newPrice.GetPerSecond(),
}
crid, err := dealer.CreateChangeRequest(ctx, req)
if err != nil {
showError(cmd, "Cannot create change request", err)
os.Exit(1)
}
cmd.Printf("Change request ID = %v\n", crid.Unwrap().String())
},
}
var changeRequestApproveCmd = &cobra.Command{
Use: "approve <req_id>",
Short: "Agree to change deal conditions with given change request",
Args: cobra.MinimumNArgs(1),
PreRun: loadKeyStoreIfRequired,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := newTimeoutContext()
defer cancel()
dealer, err := newDealsClient(ctx)
if err != nil {
showError(cmd, "Cannot create client connection", err)
os.Exit(1)
}
id, err := util.ParseBigInt(args[0])
if err != nil {
showError(cmd, "Cannot convert arg to id", err)
os.Exit(1)
}
if _, err := dealer.ApproveChangeRequest(ctx, pb.NewBigInt(id)); err != nil {
showError(cmd, "Cannot approve change request", err)
os.Exit(1)
}
showOk(cmd)
},
}
var changeRequestCancelCmd = &cobra.Command{
Use: "cancel <req_id>",
Short: "Decline given change request",
Args: cobra.MinimumNArgs(1),
PreRun: loadKeyStoreIfRequired,
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := newTimeoutContext()
defer cancel()
dealer, err := newDealsClient(ctx)
if err != nil {
showError(cmd, "Cannot create client connection", err)
os.Exit(1)
}
id, err := util.ParseBigInt(args[0])
if err != nil {
showError(cmd, "Cannot convert arg to id", err)
os.Exit(1)
}
if _, err := dealer.CancelChangeRequest(ctx, pb.NewBigInt(id)); err != nil {
showError(cmd, "Cannot cancel change request", err)
os.Exit(1)
}
showOk(cmd)
},
}
| 1 | 7,153 | Properly describe valid flag values here: `neither` should be replaced with `none` as they parsed below. | sonm-io-core | go |
@@ -379,7 +379,16 @@ public class AzkabanExecutorServer {
logger.info(("Exception when logging top memory consumers"), e);
}
- logger.info("Shutting down...");
+ String host = app.getHost();
+ int port = app.getPort();
+ try {
+ logger.info(String.format("Removing executor(host: %s, port: %s) entry from database...", host, port));
+ app.getExecutorLoader().removeExecutor(host, port);
+ } catch (ExecutorManagerException ex) {
+ logger.error(String.format("Exception when removing executor(host: %s, port: %s)", host, port), ex);
+ }
+
+ logger.info("Shutting down executor...");
try {
app.shutdownNow();
} catch (Exception e) { | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import com.google.common.base.Throwables;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.joda.time.DateTimeZone;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.thread.QueuedThreadPool;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Constructor;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.TimeZone;
import javax.management.MBeanInfo;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import azkaban.constants.ServerInternals;
import azkaban.constants.ServerProperties;
import azkaban.execapp.event.JobCallbackManager;
import azkaban.execapp.jmx.JmxFlowRunnerManager;
import azkaban.execapp.jmx.JmxJobMBeanManager;
import azkaban.execapp.metric.NumFailedFlowMetric;
import azkaban.execapp.metric.NumFailedJobMetric;
import azkaban.execapp.metric.NumQueuedFlowMetric;
import azkaban.execapp.metric.NumRunningFlowMetric;
import azkaban.execapp.metric.NumRunningJobMetric;
import azkaban.executor.Executor;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.JdbcExecutorLoader;
import azkaban.jmx.JmxJettyServer;
import azkaban.metric.IMetricEmitter;
import azkaban.metric.MetricException;
import azkaban.metric.MetricReportManager;
import azkaban.metric.inmemoryemitter.InMemoryMetricEmitter;
import azkaban.project.JdbcProjectLoader;
import azkaban.project.ProjectLoader;
import azkaban.server.AzkabanServer;
import azkaban.utils.Props;
import azkaban.utils.StdOutErrRedirect;
import azkaban.utils.SystemMemoryInfo;
import azkaban.utils.Utils;
import azkaban.metrics.MetricsManager;
import static azkaban.constants.ServerInternals.AZKABAN_EXECUTOR_PORT_FILENAME;
import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.requireNonNull;
public class AzkabanExecutorServer {
private static final String CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY = "jmx.attribute.processor.class";
private static final Logger logger = Logger.getLogger(AzkabanExecutorServer.class);
private static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024;
public static final String JOBTYPE_PLUGIN_DIR = "azkaban.jobtype.plugin.dir";
public static final String METRIC_INTERVAL = "executor.metric.milisecinterval.";
public static final int DEFAULT_HEADER_BUFFER_SIZE = 4096;
private static final String DEFAULT_TIMEZONE_ID = "default.timezone.id";
private static final int DEFAULT_THREAD_NUMBER = 50;
private static AzkabanExecutorServer app;
private final ExecutorLoader executionLoader;
private final ProjectLoader projectLoader;
private final FlowRunnerManager runnerManager;
private final Props props;
private final Server server;
private final ArrayList<ObjectName> registeredMBeans = new ArrayList<ObjectName>();
private MBeanServer mbeanServer;
/**
* Constructor
*
* @throws Exception
*/
public AzkabanExecutorServer(Props props) throws Exception {
this.props = props;
server = createJettyServer(props);
executionLoader = new JdbcExecutorLoader(props);
projectLoader = new JdbcProjectLoader(props);
runnerManager = new FlowRunnerManager(props, executionLoader, projectLoader, getClass().getClassLoader());
JmxJobMBeanManager.getInstance().initialize(props);
// make sure this happens before
configureJobCallback(props);
configureMBeanServer();
configureMetricReports();
SystemMemoryInfo.init(props.getInt("executor.memCheck.interval", 30));
loadCustomJMXAttributeProcessor(props);
try {
server.start();
} catch (Exception e) {
logger.error(e);
Utils.croak(e.getMessage(), 1);
}
insertExecutorEntryIntoDB();
dumpPortToFile();
logger.info("Started Executor Server on " + getExecutorHostPort());
if (props.getBoolean(ServerProperties.IS_METRICS_ENABLED, false)) {
startExecMetrics();
}
}
private Server createJettyServer(Props props) {
int maxThreads = props.getInt("executor.maxThreads", DEFAULT_THREAD_NUMBER);
/*
* Default to a port number 0 (zero)
* The Jetty server automatically finds an unused port when the port number is set to zero
* TODO: This is using a highly outdated version of jetty [year 2010]. needs to be updated.
*/
Server server = new Server(props.getInt("executor.port", 0));
QueuedThreadPool httpThreadPool = new QueuedThreadPool(maxThreads);
server.setThreadPool(httpThreadPool);
boolean isStatsOn = props.getBoolean("executor.connector.stats", true);
logger.info("Setting up connector with stats on: " + isStatsOn);
for (Connector connector : server.getConnectors()) {
connector.setStatsOn(isStatsOn);
logger.info(String.format(
"Jetty connector name: %s, default header buffer size: %d",
connector.getName(), connector.getHeaderBufferSize()));
connector.setHeaderBufferSize(props.getInt("jetty.headerBufferSize",
DEFAULT_HEADER_BUFFER_SIZE));
logger.info(String.format(
"Jetty connector name: %s, (if) new header buffer size: %d",
connector.getName(), connector.getHeaderBufferSize()));
}
Context root = new Context(server, "/", Context.SESSIONS);
root.setMaxFormContentSize(MAX_FORM_CONTENT_SIZE);
root.addServlet(new ServletHolder(new ExecutorServlet()), "/executor");
root.addServlet(new ServletHolder(new JMXHttpServlet()), "/jmx");
root.addServlet(new ServletHolder(new StatsServlet()), "/stats");
root.addServlet(new ServletHolder(new ServerStatisticsServlet()), "/serverStatistics");
root.setAttribute(ServerInternals.AZKABAN_SERVLET_CONTEXT_KEY, this);
return server;
}
private void startExecMetrics() throws Exception {
ExecMetrics.INSTANCE.addFlowRunnerManagerMetrics(getFlowRunnerManager());
logger.info("starting reporting Executor Metrics");
MetricsManager.INSTANCE.startReporting("AZ-EXEC", props);
}
private void insertExecutorEntryIntoDB() {
try {
final String host = requireNonNull(getHost());
final int port = getPort();
checkState(port != -1);
final Executor executor = executionLoader.fetchExecutor(host, port);
if (executor == null) {
executionLoader.addExecutor(host, port);
}
// If executor already exists, ignore it
} catch (ExecutorManagerException e) {
logger.error("Error inserting executor entry into DB", e);
Throwables.propagate(e);
}
}
private void dumpPortToFile() {
// By default this should write to the working directory
try (BufferedWriter writer = new BufferedWriter(new FileWriter(AZKABAN_EXECUTOR_PORT_FILENAME))) {
writer.write(String.valueOf(getPort()));
writer.write("\n");
} catch (IOException e) {
logger.error(e);
Throwables.propagate(e);
}
}
private void configureJobCallback(Props props) {
boolean jobCallbackEnabled =
props.getBoolean("azkaban.executor.jobcallback.enabled", true);
logger.info("Job callback enabled? " + jobCallbackEnabled);
if (jobCallbackEnabled) {
JobCallbackManager.initialize(props);
}
}
/**
* Configure Metric Reporting as per azkaban.properties settings
*
* @throws MetricException
*/
private void configureMetricReports() throws MetricException {
Props props = getAzkabanProps();
if (props != null && props.getBoolean("executor.metric.reports", false)) {
logger.info("Starting to configure Metric Reports");
MetricReportManager metricManager = MetricReportManager.getInstance();
IMetricEmitter metricEmitter = new InMemoryMetricEmitter(props);
metricManager.addMetricEmitter(metricEmitter);
logger.info("Adding number of failed flow metric");
metricManager.addMetric(new NumFailedFlowMetric(metricManager, props
.getInt(METRIC_INTERVAL
+ NumFailedFlowMetric.NUM_FAILED_FLOW_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Adding number of failed jobs metric");
metricManager.addMetric(new NumFailedJobMetric(metricManager, props
.getInt(METRIC_INTERVAL
+ NumFailedJobMetric.NUM_FAILED_JOB_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Adding number of running Jobs metric");
metricManager.addMetric(new NumRunningJobMetric(metricManager, props
.getInt(METRIC_INTERVAL
+ NumRunningJobMetric.NUM_RUNNING_JOB_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Adding number of running flows metric");
metricManager.addMetric(new NumRunningFlowMetric(runnerManager,
metricManager, props.getInt(METRIC_INTERVAL
+ NumRunningFlowMetric.NUM_RUNNING_FLOW_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Adding number of queued flows metric");
metricManager.addMetric(new NumQueuedFlowMetric(runnerManager,
metricManager, props.getInt(METRIC_INTERVAL
+ NumQueuedFlowMetric.NUM_QUEUED_FLOW_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Completed configuring Metric Reports");
}
}
/**
* Load a custom class, which is provided by a configuration
* CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY.
*
* This method will try to instantiate an instance of this custom class and
* with given properties as the argument in the constructor.
*
* Basically the custom class must have a constructor that takes an argument
* with type Properties.
*
* @param props
*/
private void loadCustomJMXAttributeProcessor(Props props) {
String jmxAttributeEmitter =
props.get(CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY);
if (jmxAttributeEmitter != null) {
try {
logger.info("jmxAttributeEmitter: " + jmxAttributeEmitter);
Constructor<Props>[] constructors =
(Constructor<Props>[]) Class.forName(jmxAttributeEmitter)
.getConstructors();
constructors[0].newInstance(props.toProperties());
} catch (Exception e) {
logger.error("Encountered error while loading and instantiating "
+ jmxAttributeEmitter, e);
throw new IllegalStateException(
"Encountered error while loading and instantiating "
+ jmxAttributeEmitter, e);
}
} else {
logger.info("No value for property: "
+ CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY + " was found");
}
}
public ProjectLoader getProjectLoader() {
return projectLoader;
}
public ExecutorLoader getExecutorLoader() {
return executionLoader;
}
/**
* Returns the global azkaban properties
*
* @return
*/
public Props getAzkabanProps() {
return props;
}
/**
* Returns the currently executing executor server, if one exists.
*
* @return
*/
public static AzkabanExecutorServer getApp() {
return app;
}
/**
* Azkaban using Jetty
*
* @param args
* @throws IOException
*/
public static void main(String[] args) throws Exception {
// Redirect all std out and err messages into log4j
StdOutErrRedirect.redirectOutAndErrToLog();
logger.info("Starting Jetty Azkaban Executor...");
Props azkabanSettings = AzkabanServer.loadProps(args);
if (azkabanSettings == null) {
logger.error("Azkaban Properties not loaded.");
logger.error("Exiting Azkaban Executor Server...");
return;
}
// Setup time zone
if (azkabanSettings.containsKey(DEFAULT_TIMEZONE_ID)) {
String timezone = azkabanSettings.getString(DEFAULT_TIMEZONE_ID);
System.setProperty("user.timezone", timezone);
TimeZone.setDefault(TimeZone.getTimeZone(timezone));
DateTimeZone.setDefault(DateTimeZone.forID(timezone));
logger.info("Setting timezone to " + timezone);
}
app = new AzkabanExecutorServer(azkabanSettings);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
logTopMemoryConsumers();
} catch (Exception e) {
logger.info(("Exception when logging top memory consumers"), e);
}
logger.info("Shutting down...");
try {
app.shutdownNow();
} catch (Exception e) {
logger.error("Error while shutting down http server.", e);
}
}
public void logTopMemoryConsumers() throws Exception, IOException {
if (new File("/bin/bash").exists() && new File("/bin/ps").exists()
&& new File("/usr/bin/head").exists()) {
logger.info("logging top memeory consumer");
java.lang.ProcessBuilder processBuilder =
new java.lang.ProcessBuilder("/bin/bash", "-c",
"/bin/ps aux --sort -rss | /usr/bin/head");
Process p = processBuilder.start();
p.waitFor();
InputStream is = p.getInputStream();
java.io.BufferedReader reader =
new java.io.BufferedReader(new InputStreamReader(is));
String line = null;
while ((line = reader.readLine()) != null) {
logger.info(line);
}
is.close();
}
}
});
}
/**
* Loads the Azkaban property file from the AZKABAN_HOME conf directory
*
* @return
*/
/* package */static Props loadConfigurationFromAzkabanHome() {
String azkabanHome = System.getenv("AZKABAN_HOME");
if (azkabanHome == null) {
logger.error("AZKABAN_HOME not set. Will try default.");
return null;
}
if (!new File(azkabanHome).isDirectory()
|| !new File(azkabanHome).canRead()) {
logger.error(azkabanHome + " is not a readable directory.");
return null;
}
File confPath = new File(azkabanHome, ServerInternals.DEFAULT_CONF_PATH);
if (!confPath.exists() || !confPath.isDirectory() || !confPath.canRead()) {
logger
.error(azkabanHome + " does not contain a readable conf directory.");
return null;
}
return loadAzkabanConfigurationFromDirectory(confPath);
}
public FlowRunnerManager getFlowRunnerManager() {
return runnerManager;
}
/**
* Loads the Azkaban conf file int a Props object
*
* @return
*/
private static Props loadAzkabanConfigurationFromDirectory(File dir) {
File azkabanPrivatePropsFile =
new File(dir, ServerInternals.AZKABAN_PRIVATE_PROPERTIES_FILE);
File azkabanPropsFile = new File(dir, ServerInternals.AZKABAN_PROPERTIES_FILE);
Props props = null;
try {
// This is purely optional
if (azkabanPrivatePropsFile.exists() && azkabanPrivatePropsFile.isFile()) {
logger.info("Loading azkaban private properties file");
props = new Props(null, azkabanPrivatePropsFile);
}
if (azkabanPropsFile.exists() && azkabanPropsFile.isFile()) {
logger.info("Loading azkaban properties file");
props = new Props(props, azkabanPropsFile);
}
} catch (FileNotFoundException e) {
logger.error("File not found. Could not load azkaban config file", e);
} catch (IOException e) {
logger.error(
"File found, but error reading. Could not load azkaban config file",
e);
}
return props;
}
private void configureMBeanServer() {
logger.info("Registering MBeans...");
mbeanServer = ManagementFactory.getPlatformMBeanServer();
registerMbean("executorJetty", new JmxJettyServer(server));
registerMbean("flowRunnerManager", new JmxFlowRunnerManager(runnerManager));
registerMbean("jobJMXMBean", JmxJobMBeanManager.getInstance());
if (JobCallbackManager.isInitialized()) {
JobCallbackManager jobCallbackMgr = JobCallbackManager.getInstance();
registerMbean("jobCallbackJMXMBean",
jobCallbackMgr.getJmxJobCallbackMBean());
}
}
public void close() {
try {
for (ObjectName name : registeredMBeans) {
mbeanServer.unregisterMBean(name);
logger.info("Jmx MBean " + name.getCanonicalName() + " unregistered.");
}
} catch (Exception e) {
logger.error("Failed to cleanup MBeanServer", e);
}
}
private void registerMbean(String name, Object mbean) {
Class<?> mbeanClass = mbean.getClass();
ObjectName mbeanName;
try {
mbeanName = new ObjectName(mbeanClass.getName() + ":name=" + name);
mbeanServer.registerMBean(mbean, mbeanName);
logger.info("Bean " + mbeanClass.getCanonicalName() + " registered.");
registeredMBeans.add(mbeanName);
} catch (Exception e) {
logger.error("Error registering mbean " + mbeanClass.getCanonicalName(),
e);
}
}
public List<ObjectName> getMbeanNames() {
return registeredMBeans;
}
public MBeanInfo getMBeanInfo(ObjectName name) {
try {
return mbeanServer.getMBeanInfo(name);
} catch (Exception e) {
logger.error(e);
return null;
}
}
public Object getMBeanAttribute(ObjectName name, String attribute) {
try {
return mbeanServer.getAttribute(name, attribute);
} catch (Exception e) {
logger.error(e);
return null;
}
}
/**
* Get the hostname
*
* @return hostname
*/
public String getHost() {
if(props.containsKey(ServerProperties.AZKABAN_SERVER_HOST_NAME)) {
String hostName = props.getString(ServerProperties.AZKABAN_SERVER_HOST_NAME);
if(!StringUtils.isEmpty(hostName)) {
return hostName;
}
}
String host = "unkownHost";
try {
host = InetAddress.getLocalHost().getCanonicalHostName();
} catch (Exception e) {
logger.error("Failed to fetch LocalHostName");
}
return host;
}
/**
* Get the current server port
* @return the port at which the executor server is running
*/
public int getPort() {
final Connector[] connectors = server.getConnectors();
checkState(connectors.length >= 1, "Server must have at least 1 connector");
// The first connector is created upon initializing the server. That's the one that has the port.
return connectors[0].getLocalPort();
}
/**
* Returns host:port combination for currently running executor
* @return
*/
public String getExecutorHostPort() {
return getHost() + ":" + getPort();
}
/**
* Shutdown the server.
* - performs a safe shutdown. Waits for completion of current tasks
* - spawns a shutdown thread and returns immediately.
*/
public void shutdown() {
logger.warn("Shutting down AzkabanExecutorServer...");
new Thread(() -> {
try {
// Hack: Sleep for a little time to allow API calls to complete
Thread.sleep(2000);
} catch (InterruptedException e) {
logger.error(e);
}
shutdownInternal();
}, "shutdown").start();
}
/**
* (internal API)
* Note: This should be run in a separate thread.
*
* Shutdown the server. (blocking call)
* - waits for jobs to finish
* - doesn't accept any new jobs
*/
private void shutdownInternal() {
getFlowRunnerManager().shutdown();
try {
shutdownNow();
logger.warn("Shutdown AzkabanExecutorServer complete");
} catch (Exception e) {
logger.error(e);
}
}
/**
* Shutdown the server now! (unsafe)
* @throws Exception
*/
public void shutdownNow() throws Exception {
server.stop();
server.destroy();
SystemMemoryInfo.shutdown();
getFlowRunnerManager().shutdownNow();
}
}
| 1 | 12,437 | Can we escalate this to `warn`. It is a major event. | azkaban-azkaban | java |
@@ -86,6 +86,8 @@ var (
_ = stateOffDstPort
stateOffPostNATDstPort int16 = 24
stateOffIPProto int16 = 26
+ stateOffICMPType int16 = 22
+ stateOffICMPCode int16 = 23
// Compile-time check that IPSetEntrySize hasn't changed; if it changes, the code will need to change.
_ = [1]struct{}{{}}[20-ipsets.IPSetEntrySize] | 1 | // Copyright (c) 2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package polprog
import (
"fmt"
"math"
"math/bits"
"strings"
"github.com/projectcalico/felix/bpf/ipsets"
"github.com/projectcalico/felix/bpf"
log "github.com/sirupsen/logrus"
. "github.com/projectcalico/felix/bpf/asm"
"github.com/projectcalico/felix/ip"
"github.com/projectcalico/felix/proto"
)
type Builder struct {
b *Block
ruleID int
rulePartID int
ipSetIDProvider ipSetIDProvider
ipSetMapFD bpf.MapFD
stateMapFD bpf.MapFD
jumpMapFD bpf.MapFD
}
type ipSetIDProvider interface {
GetNoAlloc(ipSetID string) uint64
}
func NewBuilder(ipSetIDProvider ipSetIDProvider, ipsetMapFD, stateMapFD, jumpMapFD bpf.MapFD) *Builder {
b := &Builder{
ipSetIDProvider: ipSetIDProvider,
ipSetMapFD: ipsetMapFD,
stateMapFD: stateMapFD,
jumpMapFD: jumpMapFD,
}
return b
}
var offset int = 0
func nextOffset(size int, align int) int16 {
offset -= size
remainder := offset % align
if remainder != 0 {
// For negative numbers, the remainder is negative (e.g. -9 % 8 == -1)
offset = offset - remainder - align
}
return int16(offset)
}
var (
// Stack offsets. These are defined locally.
offStateKey = nextOffset(4, 4)
offSrcIPSetKey = nextOffset(ipsets.IPSetEntrySize, 8)
offDstIPSetKey = nextOffset(ipsets.IPSetEntrySize, 8)
// Offsets within the cal_tc_state struct.
// WARNING: must be kept in sync with the definitions in bpf/include/jump.h.
stateOffIPSrc int16 = 0
stateOffIPDst int16 = 4
_ = stateOffIPDst
stateOffPostNATIPDst int16 = 8
stateOffPolResult int16 = 16
stateOffSrcPort int16 = 20
stateOffDstPort int16 = 22
_ = stateOffDstPort
stateOffPostNATDstPort int16 = 24
stateOffIPProto int16 = 26
// Compile-time check that IPSetEntrySize hasn't changed; if it changes, the code will need to change.
_ = [1]struct{}{{}}[20-ipsets.IPSetEntrySize]
// Offsets within struct ip4_set_key.
// WARNING: must be kept in sync with the definitions in bpf/ipsets/map.go.
// WARNING: must be kept in sync with the definitions in bpf/include/policy.h.
ipsKeyPrefix int16 = 0
ipsKeyID int16 = 4
ipsKeyAddr int16 = 12
ipsKeyPort int16 = 16
ipsKeyProto int16 = 18
ipsKeyPad int16 = 19
)
func (p *Builder) Instructions(rules [][][]*proto.Rule) (Insns, error) {
p.b = NewBlock()
p.writeProgramHeader()
p.writeRules(rules)
p.writeProgramFooter()
return p.b.Assemble()
}
// writeProgramHeader emits instructions to load the state from the state map, leaving
// R6 = program context
// R9 = pointer to state map
func (p *Builder) writeProgramHeader() {
// Pre-amble to the policy program.
p.b.LabelNextInsn("start")
p.b.Mov64(R6, R1) // Save R1 (context) in R6.
// Zero-out the map key
p.b.MovImm64(R1, 0) // R1 = 0
p.b.StoreStack32(R1, offStateKey)
// Get pointer to map key in R2.
p.b.Mov64(R2, R10) // R2 = R10
p.b.AddImm64(R2, int32(offStateKey))
// Load map file descriptor into R1.
// clang uses a 64-bit load so copy that for now.
p.b.LoadMapFD(R1, uint32(p.stateMapFD)) // R1 = 0 (64-bit immediate)
p.b.Call(HelperMapLookupElem) // Call helper
// Check return value for NULL.
p.b.JumpEqImm64(R0, 0, "deny")
// Save state pointer in R9.
p.b.Mov64(R9, R0)
p.b.LabelNextInsn("policy")
}
const (
jumpIdxPolicy = iota
jumpIdxEpilogue
_ = jumpIdxPolicy
)
const (
PolRCNoMatch = 0
PolRCAllow = 1
PolRCDeny = 2
PolRCEpilogueTailCallFailed = 10
)
// writeProgramFooter emits the program exit jump targets.
func (p *Builder) writeProgramFooter() {
// Fall through here if there's no match. Also used when we hit an error or if policy rejects packet.
p.b.LabelNextInsn("deny")
p.b.MovImm64(R0, 2 /* TC_ACT_SHOT */)
p.b.Exit()
if p.b.TargetIsUsed("allow") {
p.b.LabelNextInsn("allow")
// Store the policy result in the state for the next program to see.
p.b.MovImm32(R1, 1)
p.b.Store32(R9, R1, stateOffPolResult)
// Execute the tail call.
p.b.Mov64(R1, R6) // First arg is the context.
p.b.LoadMapFD(R2, uint32(p.jumpMapFD)) // Second arg is the map.
p.b.MovImm32(R3, jumpIdxEpilogue) // Third arg is the index (rather than a pointer to the index).
p.b.Call(HelperTailCall)
// Fall through if tail call fails.
p.b.MovImm32(R1, PolRCEpilogueTailCallFailed)
p.b.Store32(R9, R1, stateOffPolResult)
p.b.MovImm64(R0, 2 /* TC_ACT_SHOT */)
p.b.Exit()
}
}
func (p *Builder) setUpSrcIPSetKey(ipsetID uint64) {
p.setUpIPSetKey(ipsetID, offSrcIPSetKey, stateOffIPSrc, stateOffSrcPort)
}
func (p *Builder) setUpDstIPSetKey(ipsetID uint64) {
p.setUpIPSetKey(ipsetID, offDstIPSetKey, stateOffPostNATIPDst, stateOffPostNATDstPort)
}
func (p *Builder) setUpIPSetKey(ipsetID uint64, keyOffset, ipOffset, portOffset int16) {
// TODO track whether we've already done an initialisation and skip the parts that don't change.
// Zero the padding.
p.b.MovImm64(R1, 0) // R1 = 0
p.b.StoreStack8(R1, keyOffset+ipsKeyPad)
p.b.MovImm64(R1, 128) // R1 = 128
p.b.StoreStack32(R1, keyOffset+ipsKeyPrefix)
// Store the IP address, port and protocol.
p.b.Load32(R1, R9, ipOffset)
p.b.StoreStack32(R1, keyOffset+ipsKeyAddr)
p.b.Load16(R1, R9, portOffset)
p.b.StoreStack16(R1, keyOffset+ipsKeyPort)
p.b.Load8(R1, R9, stateOffIPProto)
p.b.StoreStack8(R1, keyOffset+ipsKeyProto)
// Store the IP set ID. It is 64-bit but, since it's a packed struct, we have to write it in two
// 32-bit chunks.
beIPSetID := bits.ReverseBytes64(ipsetID)
p.b.MovImm32(R1, int32(beIPSetID))
p.b.StoreStack32(R1, keyOffset+ipsKeyID)
p.b.MovImm32(R1, int32(beIPSetID>>32))
p.b.StoreStack32(R1, keyOffset+ipsKeyID+4)
}
func (p *Builder) writeRules(rules [][][]*proto.Rule) {
for polOrProfIdx, polsOrProfs := range rules {
endOfTierLabel := fmt.Sprint("end_of_tier_", polOrProfIdx)
log.Debugf("Start of policies or profiles %d", polOrProfIdx)
for polIdx, pol := range polsOrProfs {
log.Debugf("Start of policy/profile %d", polIdx)
for ruleIdx, rule := range pol {
log.Debugf("Start of rule %d", ruleIdx)
p.writeRule(rule, endOfTierLabel)
log.Debugf("End of rule %d", ruleIdx)
}
log.Debugf("End of policy/profile %d", polIdx)
}
// End of polsOrProfs drop rule.
log.Debugf("End of policies/profiles drop")
p.writeRule(&proto.Rule{Action: "deny"}, endOfTierLabel)
p.b.LabelNextInsn(endOfTierLabel)
}
}
type matchLeg string
const (
legSource matchLeg = "source"
legDest matchLeg = "dest"
)
func (p *Builder) writeRule(rule *proto.Rule, passLabel string) {
// TODO IP version
p.writeStartOfRule()
if rule.Protocol != nil {
log.WithField("proto", rule.Protocol).Debugf("Protocol match")
p.writeProtoMatch(false, rule.Protocol)
}
if rule.NotProtocol != nil {
log.WithField("proto", rule.NotProtocol).Debugf("NotProtocol match")
p.writeProtoMatch(true, rule.NotProtocol)
}
if len(rule.SrcNet) != 0 {
log.WithField("cidrs", rule.SrcNet).Debugf("SrcNet match")
p.writeCIDRSMatch(false, legSource, rule.SrcNet)
}
if len(rule.NotSrcNet) != 0 {
log.WithField("cidrs", rule.NotSrcNet).Debugf("NotSrcNet match")
p.writeCIDRSMatch(true, legSource, rule.NotSrcNet)
}
if len(rule.DstNet) != 0 {
log.WithField("cidrs", rule.DstNet).Debugf("DstNet match")
p.writeCIDRSMatch(false, legDest, rule.DstNet)
}
if len(rule.NotDstNet) != 0 {
log.WithField("cidrs", rule.NotDstNet).Debugf("NotDstNet match")
p.writeCIDRSMatch(true, legDest, rule.NotDstNet)
}
if len(rule.SrcIpSetIds) > 0 {
log.WithField("ipSetIDs", rule.SrcIpSetIds).Debugf("SrcIpSetIds match")
p.writeIPSetMatch(false, legSource, rule.SrcIpSetIds)
}
if len(rule.NotSrcIpSetIds) > 0 {
log.WithField("ipSetIDs", rule.NotSrcIpSetIds).Debugf("NotSrcIpSetIds match")
p.writeIPSetMatch(true, legSource, rule.NotSrcIpSetIds)
}
if len(rule.DstIpSetIds) > 0 {
log.WithField("ipSetIDs", rule.DstIpSetIds).Debugf("DstIpSetIds match")
p.writeIPSetMatch(false, legDest, rule.DstIpSetIds)
}
if len(rule.NotDstIpSetIds) > 0 {
log.WithField("ipSetIDs", rule.NotDstIpSetIds).Debugf("NotDstIpSetIds match")
p.writeIPSetMatch(true, legDest, rule.NotDstIpSetIds)
}
if len(rule.SrcPorts) > 0 || len(rule.SrcNamedPortIpSetIds) > 0 {
log.WithField("ports", rule.SrcPorts).Debugf("SrcPorts match")
p.writePortsMatch(false, legSource, rule.SrcPorts, rule.SrcNamedPortIpSetIds)
}
if len(rule.NotSrcPorts) > 0 || len(rule.NotSrcNamedPortIpSetIds) > 0 {
log.WithField("ports", rule.NotSrcPorts).Debugf("NotSrcPorts match")
p.writePortsMatch(true, legSource, rule.NotSrcPorts, rule.NotSrcNamedPortIpSetIds)
}
if len(rule.DstPorts) > 0 || len(rule.DstNamedPortIpSetIds) > 0 {
log.WithField("ports", rule.DstPorts).Debugf("DstPorts match")
p.writePortsMatch(false, legDest, rule.DstPorts, rule.DstNamedPortIpSetIds)
}
if len(rule.NotDstPorts) > 0 || len(rule.NotDstNamedPortIpSetIds) > 0 {
log.WithField("ports", rule.NotDstPorts).Debugf("NotDstPorts match")
p.writePortsMatch(true, legDest, rule.NotDstPorts, rule.NotDstNamedPortIpSetIds)
}
// TODO ICMP
p.writeEndOfRule(rule, passLabel)
p.ruleID++
p.rulePartID = 0
}
func (p *Builder) writeStartOfRule() {
}
func (p *Builder) writeEndOfRule(rule *proto.Rule, passLabel string) {
// If all the match criteria are mat, we fall through to the end of the rule
// so all that's left to do is to jump to the relevant action.
// TODO log and log-and-xxx actions
action := strings.ToLower(rule.Action)
if action == "pass" {
action = passLabel
}
p.b.Jump(action)
p.b.LabelNextInsn(p.endOfRuleLabel())
}
func (p *Builder) writeProtoMatch(negate bool, protocol *proto.Protocol) {
p.b.Load8(R1, R9, stateOffIPProto)
protoNum := protocolToNumber(protocol)
if negate {
p.b.JumpEqImm64(R1, int32(protoNum), p.endOfRuleLabel())
} else {
p.b.JumpNEImm64(R1, int32(protoNum), p.endOfRuleLabel())
}
}
func (p *Builder) writeCIDRSMatch(negate bool, leg matchLeg, cidrs []string) {
var offset int16
if leg == legSource {
offset = stateOffIPSrc
} else {
offset = stateOffPostNATIPDst
}
p.b.Load32(R1, R9, offset)
var onMatchLabel string
if negate {
// Match negated, if we match any CIDR then we jump to the next rule.
onMatchLabel = p.endOfRuleLabel()
} else {
// Match is non-negated, if we match, got to the next match criteria.
onMatchLabel = p.freshPerRuleLabel()
}
for _, cidrStr := range cidrs {
cidr := ip.MustParseCIDROrIP(cidrStr)
addrU32 := bits.ReverseBytes32(cidr.Addr().(ip.V4Addr).AsUint32()) // TODO IPv6
maskU32 := bits.ReverseBytes32(math.MaxUint32 << (32 - cidr.Prefix()) & math.MaxUint32)
p.b.MovImm32(R2, int32(maskU32))
p.b.And32(R2, R1)
p.b.JumpEqImm32(R2, int32(addrU32), onMatchLabel)
}
if !negate {
// If we fall through then none of the CIDRs matched so the rule doesn't match.
p.b.Jump(p.endOfRuleLabel())
// Label the next match so we can skip to it on success.
p.b.LabelNextInsn(onMatchLabel)
}
}
func (p *Builder) writeIPSetMatch(negate bool, leg matchLeg, ipSets []string) {
// IP sets are different to CIDRs, if we have multiple IP sets then they all have to match
// so we treat them as independent match criteria.
for _, ipSetID := range ipSets {
id := p.ipSetIDProvider.GetNoAlloc(ipSetID)
if id == 0 {
log.WithField("setID", ipSetID).Panic("Failed to look up IP set ID.")
}
var keyOffset int16
if leg == legSource {
p.setUpSrcIPSetKey(id)
keyOffset = offSrcIPSetKey
} else {
p.setUpDstIPSetKey(id)
keyOffset = offDstIPSetKey
}
p.b.LoadMapFD(R1, uint32(p.ipSetMapFD))
p.b.Mov64(R2, R10)
p.b.AddImm64(R2, int32(keyOffset))
p.b.Call(HelperMapLookupElem)
if negate {
// Negated; if we got a hit (non-0) then the rule doesn't match.
// (Otherwise we fall through to the next match criteria.)
p.b.JumpNEImm64(R0, 0, p.endOfRuleLabel())
} else {
// Non-negated; if we got a miss (non-0) then the rule can't match.
// (Otherwise we fall through to the next match criteria.)
p.b.JumpEqImm64(R0, 0, p.endOfRuleLabel())
}
}
}
func (p *Builder) writePortsMatch(negate bool, leg matchLeg, ports []*proto.PortRange, namedPorts []string) {
// For a ports match, numeric ports and named ports are ORed together. Check any
// numeric ports first and then any named ports.
var portOffset int16
if leg == legSource {
portOffset = stateOffSrcPort
} else {
portOffset = stateOffPostNATDstPort
}
var onMatchLabel string
if negate {
// Match negated, if we match any port then we jump to the next rule.
onMatchLabel = p.endOfRuleLabel()
} else {
// Match is non-negated, if we match, go to the next match criteria.
onMatchLabel = p.freshPerRuleLabel()
}
// R1 = port to test against.
p.b.Load16(R1, R9, portOffset)
for _, portRange := range ports {
if portRange.First == portRange.Last {
// Optimisation, single port, just do a comparison.
p.b.JumpEqImm64(R1, portRange.First, onMatchLabel)
} else {
// Port range,
var skipToNextPortLabel string
if portRange.First > 0 {
// If port is too low, skip to next port.
skipToNextPortLabel = p.freshPerRuleLabel()
p.b.JumpLTImm64(R1, portRange.First, skipToNextPortLabel)
}
// If port is in range, got a match, otherwise fall through to next port.
p.b.JumpLEImm64(R1, portRange.Last, onMatchLabel)
if portRange.First > 0 {
p.b.LabelNextInsn(skipToNextPortLabel)
}
}
}
for _, ipSetID := range namedPorts {
id := p.ipSetIDProvider.GetNoAlloc(ipSetID)
if id == 0 {
log.WithField("setID", ipSetID).Panic("Failed to look up IP set ID.")
}
var keyOffset int16
if leg == legSource {
p.setUpSrcIPSetKey(id)
keyOffset = offSrcIPSetKey
} else {
p.setUpDstIPSetKey(id)
keyOffset = offDstIPSetKey
}
p.b.LoadMapFD(R1, uint32(p.ipSetMapFD))
p.b.Mov64(R2, R10)
p.b.AddImm64(R2, int32(keyOffset))
p.b.Call(HelperMapLookupElem)
p.b.JumpNEImm64(R0, 0, onMatchLabel)
}
if !negate {
// If we fall through then none of the ports matched so the rule doesn't match.
p.b.Jump(p.endOfRuleLabel())
// Label the next match so we can skip to it on success.
p.b.LabelNextInsn(onMatchLabel)
}
}
func (p *Builder) freshPerRuleLabel() string {
part := p.rulePartID
p.rulePartID++
return fmt.Sprintf("rule_%d_part_%d", p.ruleID, part)
}
func (p *Builder) endOfRuleLabel() string {
return fmt.Sprintf("rule_%d_no_match", p.ruleID)
}
func protocolToNumber(protocol *proto.Protocol) uint8 {
var pcol uint8
switch p := protocol.NumberOrName.(type) {
case *proto.Protocol_Name:
switch strings.ToLower(p.Name) {
case "tcp":
pcol = 6
case "udp":
pcol = 17
case "icmp":
pcol = 1
case "sctp":
pcol = 132
}
case *proto.Protocol_Number:
pcol = uint8(p.Number)
}
return pcol
}
| 1 | 17,643 | Please move these up to line 86 so the numbers are in order. | projectcalico-felix | go |
@@ -0,0 +1,16 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
[email protected]_module()
+class NASFCOS(SingleStageDetector):
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained) | 1 | 1 | 19,567 | Add a docstring to contain the paper link. | open-mmlab-mmdetection | py |
|
@@ -170,7 +170,11 @@ func Build(pkgName, outpath string, config *compileopts.Config, action func(stri
for i, path := range config.ExtraFiles() {
abspath := filepath.Join(root, path)
outpath := filepath.Join(dir, "extra-"+strconv.Itoa(i)+"-"+filepath.Base(path)+".o")
- err := runCCompiler(config.Target.Compiler, append(config.CFlags(), "-c", "-o", outpath, abspath)...)
+ targ:=config.Target.Triple
+ if targ!="" {
+ targ="--target="+config.Target.Triple
+ }
+ err := runCCompiler(config.Target.Compiler, append(config.CFlags(), "-c", targ,"-o", outpath, abspath)...)
if err != nil {
return &commandError{"failed to build", path, err}
} | 1 | // Package builder is the compiler driver of TinyGo. It takes in a package name
// and an output path, and outputs an executable. It manages the entire
// compilation pipeline in between.
package builder
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/tinygo-org/tinygo/compileopts"
"github.com/tinygo-org/tinygo/compiler"
"github.com/tinygo-org/tinygo/goenv"
"github.com/tinygo-org/tinygo/interp"
"github.com/tinygo-org/tinygo/transform"
"tinygo.org/x/go-llvm"
)
// Build performs a single package to executable Go build. It takes in a package
// name, an output path, and set of compile options and from that it manages the
// whole compilation process.
//
// The error value may be of type *MultiError. Callers will likely want to check
// for this case and print such errors individually.
func Build(pkgName, outpath string, config *compileopts.Config, action func(string) error) error {
// Compile Go code to IR.
machine, err := compiler.NewTargetMachine(config)
if err != nil {
return err
}
mod, extraFiles, errs := compiler.Compile(pkgName, machine, config)
if errs != nil {
return newMultiError(errs)
}
if config.Options.PrintIR {
fmt.Println("; Generated LLVM IR:")
fmt.Println(mod.String())
}
if err := llvm.VerifyModule(mod, llvm.PrintMessageAction); err != nil {
return errors.New("verification error after IR construction")
}
err = interp.Run(mod, config.DumpSSA())
if err != nil {
return err
}
if err := llvm.VerifyModule(mod, llvm.PrintMessageAction); err != nil {
return errors.New("verification error after interpreting runtime.initAll")
}
if config.GOOS() != "darwin" {
transform.ApplyFunctionSections(mod) // -ffunction-sections
}
// Browsers cannot handle external functions that have type i64 because it
// cannot be represented exactly in JavaScript (JS only has doubles). To
// keep functions interoperable, pass int64 types as pointers to
// stack-allocated values.
// Use -wasm-abi=generic to disable this behaviour.
if config.Options.WasmAbi == "js" && strings.HasPrefix(config.Triple(), "wasm") {
err := transform.ExternalInt64AsPtr(mod)
if err != nil {
return err
}
}
// Optimization levels here are roughly the same as Clang, but probably not
// exactly.
errs = nil
switch config.Options.Opt {
case "none", "0":
errs = transform.Optimize(mod, config, 0, 0, 0) // -O0
case "1":
errs = transform.Optimize(mod, config, 1, 0, 0) // -O1
case "2":
errs = transform.Optimize(mod, config, 2, 0, 225) // -O2
case "s":
errs = transform.Optimize(mod, config, 2, 1, 225) // -Os
case "z":
errs = transform.Optimize(mod, config, 2, 2, 5) // -Oz, default
default:
errs = []error{errors.New("unknown optimization level: -opt=" + config.Options.Opt)}
}
if len(errs) > 0 {
return newMultiError(errs)
}
if err := llvm.VerifyModule(mod, llvm.PrintMessageAction); err != nil {
return errors.New("verification failure after LLVM optimization passes")
}
// On the AVR, pointers can point either to flash or to RAM, but we don't
// know. As a temporary fix, load all global variables in RAM.
// In the future, there should be a compiler pass that determines which
// pointers are flash and which are in RAM so that pointers can have a
// correct address space parameter (address space 1 is for flash).
if strings.HasPrefix(config.Triple(), "avr") {
transform.NonConstGlobals(mod)
if err := llvm.VerifyModule(mod, llvm.PrintMessageAction); err != nil {
return errors.New("verification error after making all globals non-constant on AVR")
}
}
// Generate output.
outext := filepath.Ext(outpath)
switch outext {
case ".o":
llvmBuf, err := machine.EmitToMemoryBuffer(mod, llvm.ObjectFile)
if err != nil {
return err
}
return ioutil.WriteFile(outpath, llvmBuf.Bytes(), 0666)
case ".bc":
data := llvm.WriteBitcodeToMemoryBuffer(mod).Bytes()
return ioutil.WriteFile(outpath, data, 0666)
case ".ll":
data := []byte(mod.String())
return ioutil.WriteFile(outpath, data, 0666)
default:
// Act as a compiler driver.
// Create a temporary directory for intermediary files.
dir, err := ioutil.TempDir("", "tinygo")
if err != nil {
return err
}
defer os.RemoveAll(dir)
// Write the object file.
objfile := filepath.Join(dir, "main.o")
llvmBuf, err := machine.EmitToMemoryBuffer(mod, llvm.ObjectFile)
if err != nil {
return err
}
err = ioutil.WriteFile(objfile, llvmBuf.Bytes(), 0666)
if err != nil {
return err
}
// Prepare link command.
executable := filepath.Join(dir, "main")
tmppath := executable // final file
ldflags := append(config.LDFlags(), "-o", executable, objfile)
// Load builtins library from the cache, possibly compiling it on the
// fly.
if config.Target.RTLib == "compiler-rt" {
librt, err := CompilerRT.Load(config.Triple())
if err != nil {
return err
}
ldflags = append(ldflags, librt)
}
// Add libc.
if config.Target.Libc == "picolibc" {
libc, err := Picolibc.Load(config.Triple())
if err != nil {
return err
}
ldflags = append(ldflags, libc)
}
// Compile extra files.
root := goenv.Get("TINYGOROOT")
for i, path := range config.ExtraFiles() {
abspath := filepath.Join(root, path)
outpath := filepath.Join(dir, "extra-"+strconv.Itoa(i)+"-"+filepath.Base(path)+".o")
err := runCCompiler(config.Target.Compiler, append(config.CFlags(), "-c", "-o", outpath, abspath)...)
if err != nil {
return &commandError{"failed to build", path, err}
}
ldflags = append(ldflags, outpath)
}
// Compile C files in packages.
for i, file := range extraFiles {
outpath := filepath.Join(dir, "pkg"+strconv.Itoa(i)+"-"+filepath.Base(file)+".o")
err := runCCompiler(config.Target.Compiler, append(config.CFlags(), "-c", "-o", outpath, file)...)
if err != nil {
return &commandError{"failed to build", file, err}
}
ldflags = append(ldflags, outpath)
}
// Link the object files together.
err = link(config.Target.Linker, ldflags...)
if err != nil {
return &commandError{"failed to link", executable, err}
}
if config.Options.PrintSizes == "short" || config.Options.PrintSizes == "full" {
sizes, err := loadProgramSize(executable)
if err != nil {
return err
}
if config.Options.PrintSizes == "short" {
fmt.Printf(" code data bss | flash ram\n")
fmt.Printf("%7d %7d %7d | %7d %7d\n", sizes.Code, sizes.Data, sizes.BSS, sizes.Code+sizes.Data, sizes.Data+sizes.BSS)
} else {
fmt.Printf(" code rodata data bss | flash ram | package\n")
for _, name := range sizes.sortedPackageNames() {
pkgSize := sizes.Packages[name]
fmt.Printf("%7d %7d %7d %7d | %7d %7d | %s\n", pkgSize.Code, pkgSize.ROData, pkgSize.Data, pkgSize.BSS, pkgSize.Flash(), pkgSize.RAM(), name)
}
fmt.Printf("%7d %7d %7d %7d | %7d %7d | (sum)\n", sizes.Sum.Code, sizes.Sum.ROData, sizes.Sum.Data, sizes.Sum.BSS, sizes.Sum.Flash(), sizes.Sum.RAM())
fmt.Printf("%7d - %7d %7d | %7d %7d | (all)\n", sizes.Code, sizes.Data, sizes.BSS, sizes.Code+sizes.Data, sizes.Data+sizes.BSS)
}
}
// Get an Intel .hex file or .bin file from the .elf file.
if outext == ".hex" || outext == ".bin" || outext == ".gba" {
tmppath = filepath.Join(dir, "main"+outext)
err := objcopy(executable, tmppath)
if err != nil {
return err
}
} else if outext == ".uf2" {
// Get UF2 from the .elf file.
tmppath = filepath.Join(dir, "main"+outext)
err := convertELFFileToUF2File(executable, tmppath, config.Target.UF2FamilyID)
if err != nil {
return err
}
}
return action(tmppath)
}
}
| 1 | 9,421 | Instead of adding the `--target` flag here, the `Target` struct should be set up correctly. Assuming this is for the Raspberry Pi 3, adding it to the `cflags` key of the JSON file should be enough (if not, you can print `config.CFlags()` here to check whether `--target` is already included). | tinygo-org-tinygo | go |
@@ -280,7 +280,9 @@ func (s *Service) BatchCreateFederatedBundle(ctx context.Context, req *bundlev1.
r := s.createFederatedBundle(ctx, b, req.OutputMask)
results = append(results, r)
- rpccontext.AuditRPCWithTypesStatus(ctx, r.Status, fieldsFromBundleProto(b, nil))
+ if _, ok := rpccontext.AuditLog(ctx); ok {
+ rpccontext.AuditRPCWithTypesStatus(ctx, r.Status, fieldsFromBundleProto(b, nil))
+ }
}
return &bundlev1.BatchCreateFederatedBundleResponse{ | 1 | package bundle
import (
"context"
"fmt"
"github.com/sirupsen/logrus"
"github.com/spiffe/go-spiffe/v2/spiffeid"
bundlev1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/bundle/v1"
"github.com/spiffe/spire-api-sdk/proto/spire/api/types"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/telemetry"
"github.com/spiffe/spire/pkg/server/api"
"github.com/spiffe/spire/pkg/server/api/rpccontext"
"github.com/spiffe/spire/pkg/server/cache/dscache"
"github.com/spiffe/spire/pkg/server/datastore"
"github.com/spiffe/spire/proto/spire/common"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// UpstreamPublisher defines the publisher interface.
type UpstreamPublisher interface {
PublishJWTKey(ctx context.Context, jwtKey *common.PublicKey) ([]*common.PublicKey, error)
}
// UpstreamPublisherFunc defines the function.
type UpstreamPublisherFunc func(ctx context.Context, jwtKey *common.PublicKey) ([]*common.PublicKey, error)
// PublishJWTKey publishes the JWT key with the given function.
func (fn UpstreamPublisherFunc) PublishJWTKey(ctx context.Context, jwtKey *common.PublicKey) ([]*common.PublicKey, error) {
return fn(ctx, jwtKey)
}
// Config defines the bundle service configuration.
type Config struct {
DataStore datastore.DataStore
TrustDomain spiffeid.TrustDomain
UpstreamPublisher UpstreamPublisher
}
// Service defines the v1 bundle service properties.
type Service struct {
bundlev1.UnsafeBundleServer
ds datastore.DataStore
td spiffeid.TrustDomain
up UpstreamPublisher
}
// New creates a new bundle service.
func New(config Config) *Service {
return &Service{
ds: config.DataStore,
td: config.TrustDomain,
up: config.UpstreamPublisher,
}
}
// RegisterService registers the bundle service on the gRPC server.
func RegisterService(s *grpc.Server, service *Service) {
bundlev1.RegisterBundleServer(s, service)
}
// CountBundles returns the total number of bundles.
func (s *Service) CountBundles(ctx context.Context, req *bundlev1.CountBundlesRequest) (*bundlev1.CountBundlesResponse, error) {
count, err := s.ds.CountBundles(ctx)
if err != nil {
log := rpccontext.Logger(ctx)
return nil, api.MakeErr(log, codes.Internal, "failed to count bundles", err)
}
rpccontext.AuditRPC(ctx)
return &bundlev1.CountBundlesResponse{Count: count}, nil
}
// GetBundle returns the bundle associated with the given trust domain.
func (s *Service) GetBundle(ctx context.Context, req *bundlev1.GetBundleRequest) (*types.Bundle, error) {
rpccontext.AddRPCAuditFields(ctx, logrus.Fields{telemetry.TrustDomainID: s.td.String()})
log := rpccontext.Logger(ctx)
commonBundle, err := s.ds.FetchBundle(dscache.WithCache(ctx), s.td.IDString())
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to fetch bundle", err)
}
if commonBundle == nil {
return nil, api.MakeErr(log, codes.NotFound, "bundle not found", nil)
}
bundle, err := api.BundleToProto(commonBundle)
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to convert bundle", err)
}
applyBundleMask(bundle, req.OutputMask)
rpccontext.AuditRPC(ctx)
return bundle, nil
}
// AppendBundle appends the given authorities to the given bundlev1.
func (s *Service) AppendBundle(ctx context.Context, req *bundlev1.AppendBundleRequest) (*types.Bundle, error) {
parseRequest := func() logrus.Fields {
fields := logrus.Fields{}
for k, v := range fieldsFromJwtAuthoritiesProto(req.JwtAuthorities) {
fields[k] = v
}
for k, v := range fieldsFromX509AuthoritiesProto(req.X509Authorities) {
fields[k] = v
}
return fields
}
rpccontext.AddRPCAuditFields(ctx, parseRequest())
log := rpccontext.Logger(ctx)
if len(req.JwtAuthorities) == 0 && len(req.X509Authorities) == 0 {
return nil, api.MakeErr(log, codes.InvalidArgument, "no authorities to append", nil)
}
log = log.WithField(telemetry.TrustDomainID, s.td.String())
jwtAuth, err := api.ParseJWTAuthorities(req.JwtAuthorities)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "failed to convert JWT authority", err)
}
x509Auth, err := api.ParseX509Authorities(req.X509Authorities)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "failed to convert X.509 authority", err)
}
dsBundle, err := s.ds.AppendBundle(ctx, &common.Bundle{
TrustDomainId: s.td.IDString(),
JwtSigningKeys: jwtAuth,
RootCas: x509Auth,
})
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to append bundle", err)
}
bundle, err := api.BundleToProto(dsBundle)
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to convert bundle", err)
}
applyBundleMask(bundle, req.OutputMask)
rpccontext.AuditRPC(ctx)
return bundle, nil
}
// PublishJWTAuthority published the JWT key on the server.
func (s *Service) PublishJWTAuthority(ctx context.Context, req *bundlev1.PublishJWTAuthorityRequest) (*bundlev1.PublishJWTAuthorityResponse, error) {
parseRequest := func() logrus.Fields {
fields := logrus.Fields{}
if req.JwtAuthority != nil {
fields[telemetry.JWTAuthorityExpiresAt] = req.JwtAuthority.ExpiresAt
fields[telemetry.JWTAuthorityKeyID] = req.JwtAuthority.KeyId
fields[telemetry.JWTAuthorityPublicKeySHA256] = api.HashByte(req.JwtAuthority.PublicKey)
}
return fields
}
rpccontext.AddRPCAuditFields(ctx, parseRequest())
log := rpccontext.Logger(ctx)
if err := rpccontext.RateLimit(ctx, 1); err != nil {
return nil, api.MakeErr(log, status.Code(err), "rejecting request due to key publishing rate limiting", err)
}
if req.JwtAuthority == nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "missing JWT authority", nil)
}
keys, err := api.ParseJWTAuthorities([]*types.JWTKey{req.JwtAuthority})
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "invalid JWT authority", err)
}
resp, err := s.up.PublishJWTKey(ctx, keys[0])
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to publish JWT key", err)
}
rpccontext.AuditRPC(ctx)
return &bundlev1.PublishJWTAuthorityResponse{
JwtAuthorities: api.PublicKeysToProto(resp),
}, nil
}
// ListFederatedBundles returns an optionally paginated list of federated bundles.
func (s *Service) ListFederatedBundles(ctx context.Context, req *bundlev1.ListFederatedBundlesRequest) (*bundlev1.ListFederatedBundlesResponse, error) {
log := rpccontext.Logger(ctx)
listReq := &datastore.ListBundlesRequest{}
// Set pagination parameters
if req.PageSize > 0 {
listReq.Pagination = &datastore.Pagination{
PageSize: req.PageSize,
Token: req.PageToken,
}
}
dsResp, err := s.ds.ListBundles(ctx, listReq)
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to list bundles", err)
}
resp := &bundlev1.ListFederatedBundlesResponse{}
if dsResp.Pagination != nil {
resp.NextPageToken = dsResp.Pagination.Token
}
for _, commonBundle := range dsResp.Bundles {
log = log.WithField(telemetry.TrustDomainID, commonBundle.TrustDomainId)
td, err := spiffeid.TrustDomainFromString(commonBundle.TrustDomainId)
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "bundle has an invalid trust domain ID", err)
}
// Filter server bundle
if s.td.Compare(td) == 0 {
continue
}
b, err := api.BundleToProto(commonBundle)
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to convert bundle", err)
}
applyBundleMask(b, req.OutputMask)
resp.Bundles = append(resp.Bundles, b)
}
rpccontext.AuditRPC(ctx)
return resp, nil
}
// GetFederatedBundle returns the bundle associated with the given trust domain.
func (s *Service) GetFederatedBundle(ctx context.Context, req *bundlev1.GetFederatedBundleRequest) (*types.Bundle, error) {
rpccontext.AddRPCAuditFields(ctx, logrus.Fields{telemetry.TrustDomainID: req.TrustDomain})
log := rpccontext.Logger(ctx).WithField(telemetry.TrustDomainID, req.TrustDomain)
td, err := spiffeid.TrustDomainFromString(req.TrustDomain)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "trust domain argument is not valid", err)
}
if s.td.Compare(td) == 0 {
return nil, api.MakeErr(log, codes.InvalidArgument, "getting a federated bundle for the server's own trust domain is not allowed", nil)
}
commonBundle, err := s.ds.FetchBundle(ctx, td.IDString())
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to fetch bundle", err)
}
if commonBundle == nil {
return nil, api.MakeErr(log, codes.NotFound, "bundle not found", nil)
}
bundle, err := api.BundleToProto(commonBundle)
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to convert bundle", err)
}
applyBundleMask(bundle, req.OutputMask)
rpccontext.AuditRPC(ctx)
return bundle, nil
}
// BatchCreateFederatedBundle adds one or more bundles to the server.
func (s *Service) BatchCreateFederatedBundle(ctx context.Context, req *bundlev1.BatchCreateFederatedBundleRequest) (*bundlev1.BatchCreateFederatedBundleResponse, error) {
var results []*bundlev1.BatchCreateFederatedBundleResponse_Result
for _, b := range req.Bundle {
r := s.createFederatedBundle(ctx, b, req.OutputMask)
results = append(results, r)
rpccontext.AuditRPCWithTypesStatus(ctx, r.Status, fieldsFromBundleProto(b, nil))
}
return &bundlev1.BatchCreateFederatedBundleResponse{
Results: results,
}, nil
}
func (s *Service) createFederatedBundle(ctx context.Context, b *types.Bundle, outputMask *types.BundleMask) *bundlev1.BatchCreateFederatedBundleResponse_Result {
log := rpccontext.Logger(ctx).WithField(telemetry.TrustDomainID, b.TrustDomain)
td, err := idutil.TrustDomainFromString(b.TrustDomain)
if err != nil {
return &bundlev1.BatchCreateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "trust domain argument is not valid", err),
}
}
if s.td.Compare(td) == 0 {
return &bundlev1.BatchCreateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "creating a federated bundle for the server's own trust domain is not allowed", nil),
}
}
commonBundle, err := api.ProtoToBundle(b)
if err != nil {
return &bundlev1.BatchCreateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "failed to convert bundle", err),
}
}
cb, err := s.ds.CreateBundle(ctx, commonBundle)
switch status.Code(err) {
case codes.OK:
case codes.AlreadyExists:
return &bundlev1.BatchCreateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.AlreadyExists, "bundle already exists", nil),
}
default:
return &bundlev1.BatchCreateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.Internal, "unable to create bundle", err),
}
}
protoBundle, err := api.BundleToProto(cb)
if err != nil {
return &bundlev1.BatchCreateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.Internal, "failed to convert bundle", err),
}
}
applyBundleMask(protoBundle, outputMask)
log.Debug("Federated bundle created")
return &bundlev1.BatchCreateFederatedBundleResponse_Result{
Status: api.OK(),
Bundle: protoBundle,
}
}
func (s *Service) setFederatedBundle(ctx context.Context, b *types.Bundle, outputMask *types.BundleMask) *bundlev1.BatchSetFederatedBundleResponse_Result {
log := rpccontext.Logger(ctx).WithField(telemetry.TrustDomainID, b.TrustDomain)
td, err := idutil.TrustDomainFromString(b.TrustDomain)
if err != nil {
return &bundlev1.BatchSetFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "trust domain argument is not valid", err),
}
}
if s.td.Compare(td) == 0 {
return &bundlev1.BatchSetFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "setting a federated bundle for the server's own trust domain is not allowed", nil),
}
}
commonBundle, err := api.ProtoToBundle(b)
if err != nil {
return &bundlev1.BatchSetFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "failed to convert bundle", err),
}
}
dsBundle, err := s.ds.SetBundle(ctx, commonBundle)
if err != nil {
return &bundlev1.BatchSetFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.Internal, "failed to set bundle", err),
}
}
protoBundle, err := api.BundleToProto(dsBundle)
if err != nil {
return &bundlev1.BatchSetFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.Internal, "failed to convert bundle", err),
}
}
applyBundleMask(protoBundle, outputMask)
log.Info("Bundle set successfully")
return &bundlev1.BatchSetFederatedBundleResponse_Result{
Status: api.OK(),
Bundle: protoBundle,
}
}
// BatchUpdateFederatedBundle updates one or more bundles in the server.
func (s *Service) BatchUpdateFederatedBundle(ctx context.Context, req *bundlev1.BatchUpdateFederatedBundleRequest) (*bundlev1.BatchUpdateFederatedBundleResponse, error) {
var results []*bundlev1.BatchUpdateFederatedBundleResponse_Result
for _, b := range req.Bundle {
r := s.updateFederatedBundle(ctx, b, req.InputMask, req.OutputMask)
results = append(results, r)
rpccontext.AuditRPCWithTypesStatus(ctx, r.Status, fieldsFromBundleProto(b, req.InputMask))
}
return &bundlev1.BatchUpdateFederatedBundleResponse{
Results: results,
}, nil
}
func (s *Service) updateFederatedBundle(ctx context.Context, b *types.Bundle, inputMask, outputMask *types.BundleMask) *bundlev1.BatchUpdateFederatedBundleResponse_Result {
log := rpccontext.Logger(ctx).WithField(telemetry.TrustDomainID, b.TrustDomain)
td, err := idutil.TrustDomainFromString(b.TrustDomain)
if err != nil {
return &bundlev1.BatchUpdateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "trust domain argument is not valid", err),
}
}
if s.td.Compare(td) == 0 {
return &bundlev1.BatchUpdateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "updating a federated bundle for the server's own trust domain is not allowed", nil),
}
}
commonBundle, err := api.ProtoToBundle(b)
if err != nil {
return &bundlev1.BatchUpdateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "failed to convert bundle", err),
}
}
dsBundle, err := s.ds.UpdateBundle(ctx, commonBundle, api.ProtoToBundleMask(inputMask))
switch status.Code(err) {
case codes.OK:
case codes.NotFound:
return &bundlev1.BatchUpdateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.NotFound, "bundle not found", err),
}
default:
return &bundlev1.BatchUpdateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.Internal, "failed to update bundle", err),
}
}
protoBundle, err := api.BundleToProto(dsBundle)
if err != nil {
return &bundlev1.BatchUpdateFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.Internal, "failed to convert bundle", err),
}
}
applyBundleMask(protoBundle, outputMask)
log.Debug("Federated bundle updated")
return &bundlev1.BatchUpdateFederatedBundleResponse_Result{
Status: api.OK(),
Bundle: protoBundle,
}
}
// BatchSetFederatedBundle upserts one or more bundles in the server.
func (s *Service) BatchSetFederatedBundle(ctx context.Context, req *bundlev1.BatchSetFederatedBundleRequest) (*bundlev1.BatchSetFederatedBundleResponse, error) {
var results []*bundlev1.BatchSetFederatedBundleResponse_Result
for _, b := range req.Bundle {
r := s.setFederatedBundle(ctx, b, req.OutputMask)
results = append(results, r)
rpccontext.AuditRPCWithTypesStatus(ctx, r.Status, fieldsFromBundleProto(b, nil))
}
return &bundlev1.BatchSetFederatedBundleResponse{
Results: results,
}, nil
}
// BatchDeleteFederatedBundle removes one or more bundles from the server.
func (s *Service) BatchDeleteFederatedBundle(ctx context.Context, req *bundlev1.BatchDeleteFederatedBundleRequest) (*bundlev1.BatchDeleteFederatedBundleResponse, error) {
log := rpccontext.Logger(ctx)
mode, err := parseDeleteMode(req.Mode)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "failed to parse deletion mode", err)
}
log = log.WithField(telemetry.DeleteFederatedBundleMode, mode.String())
var results []*bundlev1.BatchDeleteFederatedBundleResponse_Result
for _, trustDomain := range req.TrustDomains {
r := s.deleteFederatedBundle(ctx, log, trustDomain, mode)
results = append(results, r)
rpccontext.AuditRPCWithTypesStatus(ctx, r.Status, logrus.Fields{
telemetry.TrustDomainID: trustDomain,
telemetry.Mode: mode,
})
}
return &bundlev1.BatchDeleteFederatedBundleResponse{
Results: results,
}, nil
}
func (s *Service) deleteFederatedBundle(ctx context.Context, log logrus.FieldLogger, trustDomain string, mode datastore.DeleteMode) *bundlev1.BatchDeleteFederatedBundleResponse_Result {
log = log.WithField(telemetry.TrustDomainID, trustDomain)
td, err := spiffeid.TrustDomainFromString(trustDomain)
if err != nil {
return &bundlev1.BatchDeleteFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.InvalidArgument, "trust domain argument is not valid", err),
TrustDomain: trustDomain,
}
}
if s.td.Compare(td) == 0 {
return &bundlev1.BatchDeleteFederatedBundleResponse_Result{
TrustDomain: trustDomain,
Status: api.MakeStatus(log, codes.InvalidArgument, "removing the bundle for the server trust domain is not allowed", nil),
}
}
err = s.ds.DeleteBundle(ctx, td.IDString(), mode)
code := status.Code(err)
switch code {
case codes.OK:
return &bundlev1.BatchDeleteFederatedBundleResponse_Result{
Status: api.OK(),
TrustDomain: trustDomain,
}
case codes.NotFound:
return &bundlev1.BatchDeleteFederatedBundleResponse_Result{
Status: api.MakeStatus(log, codes.NotFound, "bundle not found", err),
TrustDomain: trustDomain,
}
default:
return &bundlev1.BatchDeleteFederatedBundleResponse_Result{
TrustDomain: trustDomain,
Status: api.MakeStatus(log, code, "failed to delete federated bundle", err),
}
}
}
func parseDeleteMode(mode bundlev1.BatchDeleteFederatedBundleRequest_Mode) (datastore.DeleteMode, error) {
switch mode {
case bundlev1.BatchDeleteFederatedBundleRequest_RESTRICT:
return datastore.Restrict, nil
case bundlev1.BatchDeleteFederatedBundleRequest_DISSOCIATE:
return datastore.Dissociate, nil
case bundlev1.BatchDeleteFederatedBundleRequest_DELETE:
return datastore.Delete, nil
default:
return datastore.Restrict, fmt.Errorf("unhandled delete mode %q", mode)
}
}
func applyBundleMask(b *types.Bundle, mask *types.BundleMask) {
if mask == nil {
return
}
if !mask.RefreshHint {
b.RefreshHint = 0
}
if !mask.SequenceNumber {
b.SequenceNumber = 0
}
if !mask.X509Authorities {
b.X509Authorities = nil
}
if !mask.JwtAuthorities {
b.JwtAuthorities = nil
}
}
func fieldsFromBundleProto(proto *types.Bundle, inputMask *types.BundleMask) logrus.Fields {
fields := logrus.Fields{
telemetry.TrustDomainID: proto.TrustDomain,
}
if inputMask == nil || inputMask.RefreshHint {
fields[telemetry.RefreshHint] = proto.RefreshHint
}
if inputMask == nil || inputMask.SequenceNumber {
fields[telemetry.SequenceNumber] = proto.SequenceNumber
}
if inputMask == nil || inputMask.JwtAuthorities {
for k, v := range fieldsFromJwtAuthoritiesProto(proto.JwtAuthorities) {
fields[k] = v
}
}
if inputMask == nil || inputMask.X509Authorities {
for k, v := range fieldsFromX509AuthoritiesProto(proto.X509Authorities) {
fields[k] = v
}
}
return fields
}
func fieldsFromJwtAuthoritiesProto(jwtAuthorities []*types.JWTKey) logrus.Fields {
fields := make(logrus.Fields, 3*len(jwtAuthorities))
for i, jwtAuthority := range jwtAuthorities {
fields[fmt.Sprintf("%s.%d", telemetry.JWTAuthorityExpiresAt, i)] = jwtAuthority.ExpiresAt
fields[fmt.Sprintf("%s.%d", telemetry.JWTAuthorityKeyID, i)] = jwtAuthority.KeyId
fields[fmt.Sprintf("%s.%d", telemetry.JWTAuthorityPublicKeySHA256, i)] = api.HashByte(jwtAuthority.PublicKey)
}
return fields
}
func fieldsFromX509AuthoritiesProto(x509Authorities []*types.X509Certificate) logrus.Fields {
fields := make(logrus.Fields, len(x509Authorities))
for i, x509Authority := range x509Authorities {
fields[fmt.Sprintf("%s.%d", telemetry.X509AuthoritiesASN1SHA256, i)] = api.HashByte(x509Authority.Asn1)
}
return fields
}
| 1 | 17,083 | The changes in this file look unnecessary since `rpccontext.AuditRPCWithTypesStatus()` already does this check. Was there a reason you needed to add them? | spiffe-spire | go |
@@ -0,0 +1,9 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package batchstore
+
+const StateKey = stateKey
+
+var BatchKey = batchKey | 1 | 1 | 13,300 | you could probably use `const` for both? | ethersphere-bee | go |
|
@@ -0,0 +1,9 @@
+import { isDataTable } from '../commons/table';
+import { isFocusable } from '../commons/dom';
+
+// TODO: es-modules add tests. No way to access this on the `axe` object
+function dataTableMatches(node) {
+ return !isDataTable(node) && !isFocusable(node);
+}
+
+export default dataTableMatches; | 1 | 1 | 15,756 | This looks like an "ES Module" to me. I do not understand this TODO. | dequelabs-axe-core | js |
|
@@ -103,8 +103,8 @@ public class RowDataRewriter implements Serializable {
OutputFileFactory fileFactory = new OutputFileFactory(
spec, format, locations, io.value(), encryptionManager.value(), partitionId, taskId);
- TaskWriter<InternalRow> writer;
- if (spec.fields().isEmpty()) {
+ final TaskWriter<InternalRow> writer;
+ if (spec.isUnpartitioned()) {
writer = new UnpartitionedWriter<>(spec, format, appenderFactory, fileFactory, io.value(),
Long.MAX_VALUE);
} else if (PropertyUtil.propertyAsBoolean(properties, | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.io.Serializable;
import java.util.Collection;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
import org.apache.iceberg.io.OutputFileFactory;
import org.apache.iceberg.io.TaskWriter;
import org.apache.iceberg.io.UnpartitionedWriter;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.spark.SparkSchemaUtil;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.spark.TaskContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.TableProperties.DEFAULT_NAME_MAPPING;
public class RowDataRewriter implements Serializable {
private static final Logger LOG = LoggerFactory.getLogger(RowDataRewriter.class);
private final Schema schema;
private final PartitionSpec spec;
private final Map<String, String> properties;
private final FileFormat format;
private final Broadcast<FileIO> io;
private final Broadcast<EncryptionManager> encryptionManager;
private final LocationProvider locations;
private final String nameMapping;
private final boolean caseSensitive;
public RowDataRewriter(Table table, PartitionSpec spec, boolean caseSensitive,
Broadcast<FileIO> io, Broadcast<EncryptionManager> encryptionManager) {
this.schema = table.schema();
this.spec = spec;
this.locations = table.locationProvider();
this.properties = table.properties();
this.io = io;
this.encryptionManager = encryptionManager;
this.caseSensitive = caseSensitive;
this.nameMapping = table.properties().get(DEFAULT_NAME_MAPPING);
String formatString = table.properties().getOrDefault(
TableProperties.DEFAULT_FILE_FORMAT, TableProperties.DEFAULT_FILE_FORMAT_DEFAULT);
this.format = FileFormat.valueOf(formatString.toUpperCase(Locale.ENGLISH));
}
public List<DataFile> rewriteDataForTasks(JavaRDD<CombinedScanTask> taskRDD) {
JavaRDD<List<DataFile>> dataFilesRDD = taskRDD.map(this::rewriteDataForTask);
return dataFilesRDD.collect().stream()
.flatMap(Collection::stream)
.collect(Collectors.toList());
}
private List<DataFile> rewriteDataForTask(CombinedScanTask task) throws Exception {
TaskContext context = TaskContext.get();
int partitionId = context.partitionId();
long taskId = context.taskAttemptId();
RowDataReader dataReader = new RowDataReader(
task, schema, schema, nameMapping, io.value(), encryptionManager.value(), caseSensitive);
StructType structType = SparkSchemaUtil.convert(schema);
SparkAppenderFactory appenderFactory = new SparkAppenderFactory(properties, schema, structType, spec);
OutputFileFactory fileFactory = new OutputFileFactory(
spec, format, locations, io.value(), encryptionManager.value(), partitionId, taskId);
TaskWriter<InternalRow> writer;
if (spec.fields().isEmpty()) {
writer = new UnpartitionedWriter<>(spec, format, appenderFactory, fileFactory, io.value(),
Long.MAX_VALUE);
} else if (PropertyUtil.propertyAsBoolean(properties,
TableProperties.SPARK_WRITE_PARTITIONED_FANOUT_ENABLED,
TableProperties.SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT)) {
writer = new SparkPartitionedFanoutWriter(
spec, format, appenderFactory, fileFactory, io.value(), Long.MAX_VALUE, schema,
structType);
} else {
writer = new SparkPartitionedWriter(
spec, format, appenderFactory, fileFactory, io.value(), Long.MAX_VALUE, schema,
structType);
}
try {
while (dataReader.next()) {
InternalRow row = dataReader.get();
writer.write(row);
}
dataReader.close();
dataReader = null;
writer.close();
return Lists.newArrayList(writer.dataFiles());
} catch (Throwable originalThrowable) {
try {
LOG.error("Aborting task", originalThrowable);
context.markTaskFailed(originalThrowable);
LOG.error("Aborting commit for partition {} (task {}, attempt {}, stage {}.{})",
partitionId, taskId, context.attemptNumber(), context.stageId(), context.stageAttemptNumber());
if (dataReader != null) {
dataReader.close();
}
writer.abort();
LOG.error("Aborted commit for partition {} (task {}, attempt {}, stage {}.{})",
partitionId, taskId, context.taskAttemptId(), context.stageId(), context.stageAttemptNumber());
} catch (Throwable inner) {
if (originalThrowable != inner) {
originalThrowable.addSuppressed(inner);
LOG.warn("Suppressing exception in catch: {}", inner.getMessage(), inner);
}
}
if (originalThrowable instanceof Exception) {
throw originalThrowable;
} else {
throw new RuntimeException(originalThrowable);
}
}
}
}
| 1 | 32,296 | Iceberg doesn't use `final` because it is unlikely that this actually helps. In Java 8, final detection is quite good, which is why non-final variables can be used in closures and lambdas. And final doesn't produce different byte code so it can't do much to help at runtime. | apache-iceberg | java |
@@ -7391,7 +7391,7 @@ int LuaScriptInterface::luaPlayerGetIp(lua_State* L)
// player:getIp()
Player* player = getUserdata<Player>(L, 1);
if (player) {
- lua_pushnumber(L, player->getIP());
+ lua_pushstring(L, player->getIP().to_string().c_str());
} else {
lua_pushnil(L);
} | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2017 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include <boost/range/adaptor/reversed.hpp>
#include "luascript.h"
#include "chat.h"
#include "player.h"
#include "game.h"
#include "protocolstatus.h"
#include "spells.h"
#include "iologindata.h"
#include "configmanager.h"
#include "teleport.h"
#include "databasemanager.h"
#include "bed.h"
#include "monster.h"
#include "scheduler.h"
#include "databasetasks.h"
extern Chat* g_chat;
extern Game g_game;
extern Monsters g_monsters;
extern ConfigManager g_config;
extern Vocations g_vocations;
extern Spells* g_spells;
ScriptEnvironment::DBResultMap ScriptEnvironment::tempResults;
uint32_t ScriptEnvironment::lastResultId = 0;
std::multimap<ScriptEnvironment*, Item*> ScriptEnvironment::tempItems;
LuaEnvironment g_luaEnvironment;
ScriptEnvironment::ScriptEnvironment()
{
resetEnv();
}
ScriptEnvironment::~ScriptEnvironment()
{
resetEnv();
}
void ScriptEnvironment::resetEnv()
{
scriptId = 0;
callbackId = 0;
timerEvent = false;
interface = nullptr;
localMap.clear();
tempResults.clear();
auto pair = tempItems.equal_range(this);
auto it = pair.first;
while (it != pair.second) {
Item* item = it->second;
if (item->getParent() == VirtualCylinder::virtualCylinder) {
g_game.ReleaseItem(item);
}
it = tempItems.erase(it);
}
}
bool ScriptEnvironment::setCallbackId(int32_t callbackId, LuaScriptInterface* scriptInterface)
{
if (this->callbackId != 0) {
//nested callbacks are not allowed
if (interface) {
interface->reportErrorFunc("Nested callbacks!");
}
return false;
}
this->callbackId = callbackId;
interface = scriptInterface;
return true;
}
void ScriptEnvironment::getEventInfo(int32_t& scriptId, LuaScriptInterface*& scriptInterface, int32_t& callbackId, bool& timerEvent) const
{
scriptId = this->scriptId;
scriptInterface = interface;
callbackId = this->callbackId;
timerEvent = this->timerEvent;
}
uint32_t ScriptEnvironment::addThing(Thing* thing)
{
if (!thing || thing->isRemoved()) {
return 0;
}
Creature* creature = thing->getCreature();
if (creature) {
return creature->getID();
}
Item* item = thing->getItem();
if (item && item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
return item->getUniqueId();
}
for (const auto& it : localMap) {
if (it.second == item) {
return it.first;
}
}
localMap[++lastUID] = item;
return lastUID;
}
void ScriptEnvironment::insertItem(uint32_t uid, Item* item)
{
auto result = localMap.emplace(uid, item);
if (!result.second) {
std::cout << std::endl << "Lua Script Error: Thing uid already taken.";
}
}
Thing* ScriptEnvironment::getThingByUID(uint32_t uid)
{
if (uid >= 0x10000000) {
return g_game.getCreatureByID(uid);
}
if (uid <= std::numeric_limits<uint16_t>::max()) {
Item* item = g_game.getUniqueItem(uid);
if (item && !item->isRemoved()) {
return item;
}
return nullptr;
}
auto it = localMap.find(uid);
if (it != localMap.end()) {
Item* item = it->second;
if (!item->isRemoved()) {
return item;
}
}
return nullptr;
}
Item* ScriptEnvironment::getItemByUID(uint32_t uid)
{
Thing* thing = getThingByUID(uid);
if (!thing) {
return nullptr;
}
return thing->getItem();
}
Container* ScriptEnvironment::getContainerByUID(uint32_t uid)
{
Item* item = getItemByUID(uid);
if (!item) {
return nullptr;
}
return item->getContainer();
}
void ScriptEnvironment::removeItemByUID(uint32_t uid)
{
if (uid <= std::numeric_limits<uint16_t>::max()) {
g_game.removeUniqueItem(uid);
return;
}
auto it = localMap.find(uid);
if (it != localMap.end()) {
localMap.erase(it);
}
}
void ScriptEnvironment::addTempItem(Item* item)
{
tempItems.emplace(this, item);
}
void ScriptEnvironment::removeTempItem(Item* item)
{
for (auto it = tempItems.begin(), end = tempItems.end(); it != end; ++it) {
if (it->second == item) {
tempItems.erase(it);
break;
}
}
}
uint32_t ScriptEnvironment::addResult(DBResult_ptr res)
{
tempResults[++lastResultId] = res;
return lastResultId;
}
bool ScriptEnvironment::removeResult(uint32_t id)
{
auto it = tempResults.find(id);
if (it == tempResults.end()) {
return false;
}
tempResults.erase(it);
return true;
}
DBResult_ptr ScriptEnvironment::getResultByID(uint32_t id)
{
auto it = tempResults.find(id);
if (it == tempResults.end()) {
return nullptr;
}
return it->second;
}
std::string LuaScriptInterface::getErrorDesc(ErrorCode_t code)
{
switch (code) {
case LUA_ERROR_PLAYER_NOT_FOUND: return "Player not found";
case LUA_ERROR_CREATURE_NOT_FOUND: return "Creature not found";
case LUA_ERROR_ITEM_NOT_FOUND: return "Item not found";
case LUA_ERROR_THING_NOT_FOUND: return "Thing not found";
case LUA_ERROR_TILE_NOT_FOUND: return "Tile not found";
case LUA_ERROR_HOUSE_NOT_FOUND: return "House not found";
case LUA_ERROR_COMBAT_NOT_FOUND: return "Combat not found";
case LUA_ERROR_CONDITION_NOT_FOUND: return "Condition not found";
case LUA_ERROR_AREA_NOT_FOUND: return "Area not found";
case LUA_ERROR_CONTAINER_NOT_FOUND: return "Container not found";
case LUA_ERROR_VARIANT_NOT_FOUND: return "Variant not found";
case LUA_ERROR_VARIANT_UNKNOWN: return "Unknown variant type";
case LUA_ERROR_SPELL_NOT_FOUND: return "Spell not found";
default: return "Bad error code";
}
}
ScriptEnvironment LuaScriptInterface::scriptEnv[16];
int32_t LuaScriptInterface::scriptEnvIndex = -1;
LuaScriptInterface::LuaScriptInterface(std::string interfaceName) : interfaceName(std::move(interfaceName))
{
if (!g_luaEnvironment.getLuaState()) {
g_luaEnvironment.initState();
}
}
LuaScriptInterface::~LuaScriptInterface()
{
closeState();
}
bool LuaScriptInterface::reInitState()
{
g_luaEnvironment.clearCombatObjects(this);
g_luaEnvironment.clearAreaObjects(this);
closeState();
return initState();
}
/// Same as lua_pcall, but adds stack trace to error strings in called function.
int LuaScriptInterface::protectedCall(lua_State* L, int nargs, int nresults)
{
int error_index = lua_gettop(L) - nargs;
lua_pushcfunction(L, luaErrorHandler);
lua_insert(L, error_index);
int ret = lua_pcall(L, nargs, nresults, error_index);
lua_remove(L, error_index);
return ret;
}
int32_t LuaScriptInterface::loadFile(const std::string& file, Npc* npc /* = nullptr*/)
{
//loads file as a chunk at stack top
int ret = luaL_loadfile(luaState, file.c_str());
if (ret != 0) {
lastLuaError = popString(luaState);
return -1;
}
//check that it is loaded as a function
if (!isFunction(luaState, -1)) {
return -1;
}
loadingFile = file;
if (!reserveScriptEnv()) {
return -1;
}
ScriptEnvironment* env = getScriptEnv();
env->setScriptId(EVENT_ID_LOADING, this);
env->setNpc(npc);
//execute it
ret = protectedCall(luaState, 0, 0);
if (ret != 0) {
reportError(nullptr, popString(luaState));
resetScriptEnv();
return -1;
}
resetScriptEnv();
return 0;
}
int32_t LuaScriptInterface::getEvent(const std::string& eventName)
{
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//get current event function pointer
lua_getglobal(luaState, eventName.c_str());
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 2);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -1);
lua_rawseti(luaState, -3, runningEventId);
lua_pop(luaState, 2);
//reset global value of this event
lua_pushnil(luaState);
lua_setglobal(luaState, eventName.c_str());
cacheFiles[runningEventId] = loadingFile + ":" + eventName;
return runningEventId++;
}
int32_t LuaScriptInterface::getMetaEvent(const std::string& globalName, const std::string& eventName)
{
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//get current event function pointer
lua_getglobal(luaState, globalName.c_str());
lua_getfield(luaState, -1, eventName.c_str());
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 3);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -1);
lua_rawseti(luaState, -4, runningEventId);
lua_pop(luaState, 1);
//reset global value of this event
lua_pushnil(luaState);
lua_setfield(luaState, -2, eventName.c_str());
lua_pop(luaState, 2);
cacheFiles[runningEventId] = loadingFile + ":" + globalName + "@" + eventName;
return runningEventId++;
}
const std::string& LuaScriptInterface::getFileById(int32_t scriptId)
{
if (scriptId == EVENT_ID_LOADING) {
return loadingFile;
}
auto it = cacheFiles.find(scriptId);
if (it == cacheFiles.end()) {
static const std::string& unk = "(Unknown scriptfile)";
return unk;
}
return it->second;
}
std::string LuaScriptInterface::getStackTrace(const std::string& error_desc)
{
lua_getglobal(luaState, "debug");
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return error_desc;
}
lua_getfield(luaState, -1, "traceback");
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 2);
return error_desc;
}
lua_replace(luaState, -2);
pushString(luaState, error_desc);
lua_call(luaState, 1, 1);
return popString(luaState);
}
void LuaScriptInterface::reportError(const char* function, const std::string& error_desc, bool stack_trace/* = false*/)
{
int32_t scriptId;
int32_t callbackId;
bool timerEvent;
LuaScriptInterface* scriptInterface;
getScriptEnv()->getEventInfo(scriptId, scriptInterface, callbackId, timerEvent);
std::cout << std::endl << "Lua Script Error: ";
if (scriptInterface) {
std::cout << '[' << scriptInterface->getInterfaceName() << "] " << std::endl;
if (timerEvent) {
std::cout << "in a timer event called from: " << std::endl;
}
if (callbackId) {
std::cout << "in callback: " << scriptInterface->getFileById(callbackId) << std::endl;
}
std::cout << scriptInterface->getFileById(scriptId) << std::endl;
}
if (function) {
std::cout << function << "(). ";
}
if (stack_trace && scriptInterface) {
std::cout << scriptInterface->getStackTrace(error_desc) << std::endl;
} else {
std::cout << error_desc << std::endl;
}
}
bool LuaScriptInterface::pushFunction(int32_t functionId)
{
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
return false;
}
lua_rawgeti(luaState, -1, functionId);
lua_replace(luaState, -2);
return isFunction(luaState, -1);
}
bool LuaScriptInterface::initState()
{
luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return false;
}
lua_newtable(luaState);
eventTableRef = luaL_ref(luaState, LUA_REGISTRYINDEX);
runningEventId = EVENT_ID_USER;
return true;
}
bool LuaScriptInterface::closeState()
{
if (!g_luaEnvironment.getLuaState() || !luaState) {
return false;
}
cacheFiles.clear();
if (eventTableRef != -1) {
luaL_unref(luaState, LUA_REGISTRYINDEX, eventTableRef);
eventTableRef = -1;
}
luaState = nullptr;
return true;
}
int LuaScriptInterface::luaErrorHandler(lua_State* L)
{
const std::string& errorMessage = popString(L);
auto interface = getScriptEnv()->getScriptInterface();
assert(interface); //This fires if the ScriptEnvironment hasn't been setup
pushString(L, interface->getStackTrace(errorMessage));
return 1;
}
bool LuaScriptInterface::callFunction(int params)
{
bool result = false;
int size = lua_gettop(luaState);
if (protectedCall(luaState, params, 1) != 0) {
LuaScriptInterface::reportError(nullptr, LuaScriptInterface::getString(luaState, -1));
} else {
result = LuaScriptInterface::getBoolean(luaState, -1);
}
lua_pop(luaState, 1);
if ((lua_gettop(luaState) + params + 1) != size) {
LuaScriptInterface::reportError(nullptr, "Stack size changed!");
}
resetScriptEnv();
return result;
}
void LuaScriptInterface::callVoidFunction(int params)
{
int size = lua_gettop(luaState);
if (protectedCall(luaState, params, 0) != 0) {
LuaScriptInterface::reportError(nullptr, LuaScriptInterface::popString(luaState));
}
if ((lua_gettop(luaState) + params + 1) != size) {
LuaScriptInterface::reportError(nullptr, "Stack size changed!");
}
resetScriptEnv();
}
void LuaScriptInterface::pushVariant(lua_State* L, const LuaVariant& var)
{
lua_createtable(L, 0, 2);
setField(L, "type", var.type);
switch (var.type) {
case VARIANT_NUMBER:
setField(L, "number", var.number);
break;
case VARIANT_STRING:
setField(L, "string", var.text);
break;
case VARIANT_TARGETPOSITION:
case VARIANT_POSITION: {
pushPosition(L, var.pos);
lua_setfield(L, -2, "pos");
break;
}
default:
break;
}
setMetatable(L, -1, "Variant");
}
void LuaScriptInterface::pushThing(lua_State* L, Thing* thing)
{
if (!thing) {
lua_createtable(L, 0, 4);
setField(L, "uid", 0);
setField(L, "itemid", 0);
setField(L, "actionid", 0);
setField(L, "type", 0);
return;
}
if (Item* item = thing->getItem()) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else if (Creature* creature = thing->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else {
lua_pushnil(L);
}
}
void LuaScriptInterface::pushCylinder(lua_State* L, Cylinder* cylinder)
{
if (Creature* creature = cylinder->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else if (Item* parentItem = cylinder->getItem()) {
pushUserdata<Item>(L, parentItem);
setItemMetatable(L, -1, parentItem);
} else if (Tile* tile = cylinder->getTile()) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else if (cylinder == VirtualCylinder::virtualCylinder) {
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
}
void LuaScriptInterface::pushString(lua_State* L, const std::string& value)
{
lua_pushlstring(L, value.c_str(), value.length());
}
void LuaScriptInterface::pushCallback(lua_State* L, int32_t callback)
{
lua_rawgeti(L, LUA_REGISTRYINDEX, callback);
}
std::string LuaScriptInterface::popString(lua_State* L)
{
if (lua_gettop(L) == 0) {
return std::string();
}
std::string str(getString(L, -1));
lua_pop(L, 1);
return str;
}
int32_t LuaScriptInterface::popCallback(lua_State* L)
{
return luaL_ref(L, LUA_REGISTRYINDEX);
}
// Metatables
void LuaScriptInterface::setMetatable(lua_State* L, int32_t index, const std::string& name)
{
luaL_getmetatable(L, name.c_str());
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setWeakMetatable(lua_State* L, int32_t index, const std::string& name)
{
static std::set<std::string> weakObjectTypes;
const std::string& weakName = name + "_weak";
auto result = weakObjectTypes.emplace(name);
if (result.second) {
luaL_getmetatable(L, name.c_str());
int childMetatable = lua_gettop(L);
luaL_newmetatable(L, weakName.c_str());
int metatable = lua_gettop(L);
static const std::vector<std::string> methodKeys = {"__index", "__metatable", "__eq"};
for (const std::string& metaKey : methodKeys) {
lua_getfield(L, childMetatable, metaKey.c_str());
lua_setfield(L, metatable, metaKey.c_str());
}
static const std::vector<int> methodIndexes = {'h', 'p', 't'};
for (int metaIndex : methodIndexes) {
lua_rawgeti(L, childMetatable, metaIndex);
lua_rawseti(L, metatable, metaIndex);
}
lua_pushnil(L);
lua_setfield(L, metatable, "__gc");
lua_remove(L, childMetatable);
} else {
luaL_getmetatable(L, weakName.c_str());
}
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setItemMetatable(lua_State* L, int32_t index, const Item* item)
{
if (item->getContainer()) {
luaL_getmetatable(L, "Container");
} else if (item->getTeleport()) {
luaL_getmetatable(L, "Teleport");
} else {
luaL_getmetatable(L, "Item");
}
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setCreatureMetatable(lua_State* L, int32_t index, const Creature* creature)
{
if (creature->getPlayer()) {
luaL_getmetatable(L, "Player");
} else if (creature->getMonster()) {
luaL_getmetatable(L, "Monster");
} else {
luaL_getmetatable(L, "Npc");
}
lua_setmetatable(L, index - 1);
}
// Get
std::string LuaScriptInterface::getString(lua_State* L, int32_t arg)
{
size_t len;
const char* c_str = lua_tolstring(L, arg, &len);
if (!c_str || len == 0) {
return std::string();
}
return std::string(c_str, len);
}
Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg, int32_t& stackpos)
{
Position position;
position.x = getField<uint16_t>(L, arg, "x");
position.y = getField<uint16_t>(L, arg, "y");
position.z = getField<uint8_t>(L, arg, "z");
lua_getfield(L, arg, "stackpos");
if (lua_isnil(L, -1) == 1) {
stackpos = 0;
} else {
stackpos = getNumber<int32_t>(L, -1);
}
lua_pop(L, 4);
return position;
}
Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg)
{
Position position;
position.x = getField<uint16_t>(L, arg, "x");
position.y = getField<uint16_t>(L, arg, "y");
position.z = getField<uint8_t>(L, arg, "z");
lua_pop(L, 3);
return position;
}
Outfit_t LuaScriptInterface::getOutfit(lua_State* L, int32_t arg)
{
Outfit_t outfit;
outfit.lookMount = getField<uint16_t>(L, arg, "lookMount");
outfit.lookAddons = getField<uint8_t>(L, arg, "lookAddons");
outfit.lookFeet = getField<uint8_t>(L, arg, "lookFeet");
outfit.lookLegs = getField<uint8_t>(L, arg, "lookLegs");
outfit.lookBody = getField<uint8_t>(L, arg, "lookBody");
outfit.lookHead = getField<uint8_t>(L, arg, "lookHead");
outfit.lookTypeEx = getField<uint16_t>(L, arg, "lookTypeEx");
outfit.lookType = getField<uint16_t>(L, arg, "lookType");
lua_pop(L, 8);
return outfit;
}
LuaVariant LuaScriptInterface::getVariant(lua_State* L, int32_t arg)
{
LuaVariant var;
switch (var.type = getField<LuaVariantType_t>(L, arg, "type")) {
case VARIANT_NUMBER: {
var.number = getField<uint32_t>(L, arg, "number");
lua_pop(L, 2);
break;
}
case VARIANT_STRING: {
var.text = getFieldString(L, arg, "string");
lua_pop(L, 2);
break;
}
case VARIANT_POSITION:
case VARIANT_TARGETPOSITION: {
lua_getfield(L, arg, "pos");
var.pos = getPosition(L, lua_gettop(L));
lua_pop(L, 2);
break;
}
default: {
var.type = VARIANT_NONE;
lua_pop(L, 1);
break;
}
}
return var;
}
Thing* LuaScriptInterface::getThing(lua_State* L, int32_t arg)
{
Thing* thing;
if (lua_getmetatable(L, arg) != 0) {
lua_rawgeti(L, -1, 't');
switch(getNumber<uint32_t>(L, -1)) {
case LuaData_Item:
thing = getUserdata<Item>(L, arg);
break;
case LuaData_Container:
thing = getUserdata<Container>(L, arg);
break;
case LuaData_Teleport:
thing = getUserdata<Teleport>(L, arg);
break;
case LuaData_Player:
thing = getUserdata<Player>(L, arg);
break;
case LuaData_Monster:
thing = getUserdata<Monster>(L, arg);
break;
case LuaData_Npc:
thing = getUserdata<Npc>(L, arg);
break;
default:
thing = nullptr;
break;
}
lua_pop(L, 2);
} else {
thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, arg));
}
return thing;
}
Creature* LuaScriptInterface::getCreature(lua_State* L, int32_t arg)
{
if (isUserdata(L, arg)) {
return getUserdata<Creature>(L, arg);
}
return g_game.getCreatureByID(getNumber<uint32_t>(L, arg));
}
Player* LuaScriptInterface::getPlayer(lua_State* L, int32_t arg)
{
if (isUserdata(L, arg)) {
return getUserdata<Player>(L, arg);
}
return g_game.getPlayerByID(getNumber<uint32_t>(L, arg));
}
std::string LuaScriptInterface::getFieldString(lua_State* L, int32_t arg, const std::string& key)
{
lua_getfield(L, arg, key.c_str());
return getString(L, -1);
}
LuaDataType LuaScriptInterface::getUserdataType(lua_State* L, int32_t arg)
{
if (lua_getmetatable(L, arg) == 0) {
return LuaData_Unknown;
}
lua_rawgeti(L, -1, 't');
LuaDataType type = getNumber<LuaDataType>(L, -1);
lua_pop(L, 2);
return type;
}
// Push
void LuaScriptInterface::pushBoolean(lua_State* L, bool value)
{
lua_pushboolean(L, value ? 1 : 0);
}
void LuaScriptInterface::pushInstantSpell(lua_State* L, const InstantSpell& spell)
{
lua_createtable(L, 0, 6);
setField(L, "name", spell.getName());
setField(L, "words", spell.getWords());
setField(L, "level", spell.getLevel());
setField(L, "mlevel", spell.getMagicLevel());
setField(L, "mana", spell.getMana());
setField(L, "manapercent", spell.getManaPercent());
setMetatable(L, -1, "Spell");
}
void LuaScriptInterface::pushPosition(lua_State* L, const Position& position, int32_t stackpos/* = 0*/)
{
lua_createtable(L, 0, 4);
setField(L, "x", position.x);
setField(L, "y", position.y);
setField(L, "z", position.z);
setField(L, "stackpos", stackpos);
setMetatable(L, -1, "Position");
}
void LuaScriptInterface::pushOutfit(lua_State* L, const Outfit_t& outfit)
{
lua_createtable(L, 0, 8);
setField(L, "lookType", outfit.lookType);
setField(L, "lookTypeEx", outfit.lookTypeEx);
setField(L, "lookHead", outfit.lookHead);
setField(L, "lookBody", outfit.lookBody);
setField(L, "lookLegs", outfit.lookLegs);
setField(L, "lookFeet", outfit.lookFeet);
setField(L, "lookAddons", outfit.lookAddons);
setField(L, "lookMount", outfit.lookMount);
}
#define registerEnum(value) { std::string enumName = #value; registerGlobalVariable(enumName.substr(enumName.find_last_of(':') + 1), value); }
#define registerEnumIn(tableName, value) { std::string enumName = #value; registerVariable(tableName, enumName.substr(enumName.find_last_of(':') + 1), value); }
void LuaScriptInterface::registerFunctions()
{
//getPlayerFlagValue(cid, flag)
lua_register(luaState, "getPlayerFlagValue", LuaScriptInterface::luaGetPlayerFlagValue);
//doPlayerAddItem(uid, itemid, <optional: default: 1> count/subtype)
//doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype)
//Returns uid of the created item
lua_register(luaState, "doPlayerAddItem", LuaScriptInterface::luaDoPlayerAddItem);
//doTileAddItemEx(pos, uid)
lua_register(luaState, "doTileAddItemEx", LuaScriptInterface::luaDoTileAddItemEx);
//doSetCreatureLight(cid, lightLevel, lightColor, time)
lua_register(luaState, "doSetCreatureLight", LuaScriptInterface::luaDoSetCreatureLight);
//getCreatureCondition(cid, condition[, subId])
lua_register(luaState, "getCreatureCondition", LuaScriptInterface::luaGetCreatureCondition);
//isValidUID(uid)
lua_register(luaState, "isValidUID", LuaScriptInterface::luaIsValidUID);
//isDepot(uid)
lua_register(luaState, "isDepot", LuaScriptInterface::luaIsDepot);
//isMovable(uid)
lua_register(luaState, "isMovable", LuaScriptInterface::luaIsMoveable);
//doAddContainerItem(uid, itemid, <optional> count/subtype)
lua_register(luaState, "doAddContainerItem", LuaScriptInterface::luaDoAddContainerItem);
//getDepotId(uid)
lua_register(luaState, "getDepotId", LuaScriptInterface::luaGetDepotId);
//getWorldTime()
lua_register(luaState, "getWorldTime", LuaScriptInterface::luaGetWorldTime);
//getWorldLight()
lua_register(luaState, "getWorldLight", LuaScriptInterface::luaGetWorldLight);
//getWorldUpTime()
lua_register(luaState, "getWorldUpTime", LuaScriptInterface::luaGetWorldUpTime);
//createCombatArea( {area}, <optional> {extArea} )
lua_register(luaState, "createCombatArea", LuaScriptInterface::luaCreateCombatArea);
//doAreaCombatHealth(cid, type, pos, area, min, max, effect)
lua_register(luaState, "doAreaCombatHealth", LuaScriptInterface::luaDoAreaCombatHealth);
//doTargetCombatHealth(cid, target, type, min, max, effect)
lua_register(luaState, "doTargetCombatHealth", LuaScriptInterface::luaDoTargetCombatHealth);
//doAreaCombatMana(cid, pos, area, min, max, effect)
lua_register(luaState, "doAreaCombatMana", LuaScriptInterface::luaDoAreaCombatMana);
//doTargetCombatMana(cid, target, min, max, effect)
lua_register(luaState, "doTargetCombatMana", LuaScriptInterface::luaDoTargetCombatMana);
//doAreaCombatCondition(cid, pos, area, condition, effect)
lua_register(luaState, "doAreaCombatCondition", LuaScriptInterface::luaDoAreaCombatCondition);
//doTargetCombatCondition(cid, target, condition, effect)
lua_register(luaState, "doTargetCombatCondition", LuaScriptInterface::luaDoTargetCombatCondition);
//doAreaCombatDispel(cid, pos, area, type, effect)
lua_register(luaState, "doAreaCombatDispel", LuaScriptInterface::luaDoAreaCombatDispel);
//doTargetCombatDispel(cid, target, type, effect)
lua_register(luaState, "doTargetCombatDispel", LuaScriptInterface::luaDoTargetCombatDispel);
//doChallengeCreature(cid, target)
lua_register(luaState, "doChallengeCreature", LuaScriptInterface::luaDoChallengeCreature);
//addEvent(callback, delay, ...)
lua_register(luaState, "addEvent", LuaScriptInterface::luaAddEvent);
//stopEvent(eventid)
lua_register(luaState, "stopEvent", LuaScriptInterface::luaStopEvent);
//saveServer()
lua_register(luaState, "saveServer", LuaScriptInterface::luaSaveServer);
//cleanMap()
lua_register(luaState, "cleanMap", LuaScriptInterface::luaCleanMap);
//debugPrint(text)
lua_register(luaState, "debugPrint", LuaScriptInterface::luaDebugPrint);
//isInWar(cid, target)
lua_register(luaState, "isInWar", LuaScriptInterface::luaIsInWar);
//getWaypointPosition(name)
lua_register(luaState, "getWaypointPositionByName", LuaScriptInterface::luaGetWaypointPositionByName);
//sendChannelMessage(channelId, type, message)
lua_register(luaState, "sendChannelMessage", LuaScriptInterface::luaSendChannelMessage);
//sendGuildChannelMessage(guildId, type, message)
lua_register(luaState, "sendGuildChannelMessage", LuaScriptInterface::luaSendGuildChannelMessage);
#ifndef LUAJIT_VERSION
//bit operations for Lua, based on bitlib project release 24
//bit.bnot, bit.band, bit.bor, bit.bxor, bit.lshift, bit.rshift
luaL_register(luaState, "bit", LuaScriptInterface::luaBitReg);
#endif
//configManager table
luaL_register(luaState, "configManager", LuaScriptInterface::luaConfigManagerTable);
//db table
luaL_register(luaState, "db", LuaScriptInterface::luaDatabaseTable);
//result table
luaL_register(luaState, "result", LuaScriptInterface::luaResultTable);
/* New functions */
//registerClass(className, baseClass, newFunction)
//registerTable(tableName)
//registerMethod(className, functionName, function)
//registerMetaMethod(className, functionName, function)
//registerGlobalMethod(functionName, function)
//registerVariable(tableName, name, value)
//registerGlobalVariable(name, value)
//registerEnum(value)
//registerEnumIn(tableName, value)
// Enums
registerEnum(ACCOUNT_TYPE_NORMAL)
registerEnum(ACCOUNT_TYPE_TUTOR)
registerEnum(ACCOUNT_TYPE_SENIORTUTOR)
registerEnum(ACCOUNT_TYPE_GAMEMASTER)
registerEnum(ACCOUNT_TYPE_GOD)
registerEnum(BUG_CATEGORY_MAP)
registerEnum(BUG_CATEGORY_TYPO)
registerEnum(BUG_CATEGORY_TECHNICAL)
registerEnum(BUG_CATEGORY_OTHER)
registerEnum(CALLBACK_PARAM_LEVELMAGICVALUE)
registerEnum(CALLBACK_PARAM_SKILLVALUE)
registerEnum(CALLBACK_PARAM_TARGETTILE)
registerEnum(CALLBACK_PARAM_TARGETCREATURE)
registerEnum(COMBAT_FORMULA_UNDEFINED)
registerEnum(COMBAT_FORMULA_LEVELMAGIC)
registerEnum(COMBAT_FORMULA_SKILL)
registerEnum(COMBAT_FORMULA_DAMAGE)
registerEnum(DIRECTION_NORTH)
registerEnum(DIRECTION_EAST)
registerEnum(DIRECTION_SOUTH)
registerEnum(DIRECTION_WEST)
registerEnum(DIRECTION_SOUTHWEST)
registerEnum(DIRECTION_SOUTHEAST)
registerEnum(DIRECTION_NORTHWEST)
registerEnum(DIRECTION_NORTHEAST)
registerEnum(COMBAT_NONE)
registerEnum(COMBAT_PHYSICALDAMAGE)
registerEnum(COMBAT_ENERGYDAMAGE)
registerEnum(COMBAT_EARTHDAMAGE)
registerEnum(COMBAT_FIREDAMAGE)
registerEnum(COMBAT_UNDEFINEDDAMAGE)
registerEnum(COMBAT_LIFEDRAIN)
registerEnum(COMBAT_MANADRAIN)
registerEnum(COMBAT_HEALING)
registerEnum(COMBAT_DROWNDAMAGE)
registerEnum(COMBAT_ICEDAMAGE)
registerEnum(COMBAT_HOLYDAMAGE)
registerEnum(COMBAT_DEATHDAMAGE)
registerEnum(COMBAT_PARAM_TYPE)
registerEnum(COMBAT_PARAM_EFFECT)
registerEnum(COMBAT_PARAM_DISTANCEEFFECT)
registerEnum(COMBAT_PARAM_BLOCKSHIELD)
registerEnum(COMBAT_PARAM_BLOCKARMOR)
registerEnum(COMBAT_PARAM_TARGETCASTERORTOPMOST)
registerEnum(COMBAT_PARAM_CREATEITEM)
registerEnum(COMBAT_PARAM_AGGRESSIVE)
registerEnum(COMBAT_PARAM_DISPEL)
registerEnum(COMBAT_PARAM_USECHARGES)
registerEnum(CONDITION_NONE)
registerEnum(CONDITION_POISON)
registerEnum(CONDITION_FIRE)
registerEnum(CONDITION_ENERGY)
registerEnum(CONDITION_BLEEDING)
registerEnum(CONDITION_HASTE)
registerEnum(CONDITION_PARALYZE)
registerEnum(CONDITION_OUTFIT)
registerEnum(CONDITION_INVISIBLE)
registerEnum(CONDITION_LIGHT)
registerEnum(CONDITION_MANASHIELD)
registerEnum(CONDITION_INFIGHT)
registerEnum(CONDITION_DRUNK)
registerEnum(CONDITION_EXHAUST_WEAPON)
registerEnum(CONDITION_REGENERATION)
registerEnum(CONDITION_SOUL)
registerEnum(CONDITION_DROWN)
registerEnum(CONDITION_MUTED)
registerEnum(CONDITION_CHANNELMUTEDTICKS)
registerEnum(CONDITION_YELLTICKS)
registerEnum(CONDITION_ATTRIBUTES)
registerEnum(CONDITION_FREEZING)
registerEnum(CONDITION_DAZZLED)
registerEnum(CONDITION_CURSED)
registerEnum(CONDITION_EXHAUST_COMBAT)
registerEnum(CONDITION_EXHAUST_HEAL)
registerEnum(CONDITION_PACIFIED)
registerEnum(CONDITION_SPELLCOOLDOWN)
registerEnum(CONDITION_SPELLGROUPCOOLDOWN)
registerEnum(CONDITIONID_DEFAULT)
registerEnum(CONDITIONID_COMBAT)
registerEnum(CONDITIONID_HEAD)
registerEnum(CONDITIONID_NECKLACE)
registerEnum(CONDITIONID_BACKPACK)
registerEnum(CONDITIONID_ARMOR)
registerEnum(CONDITIONID_RIGHT)
registerEnum(CONDITIONID_LEFT)
registerEnum(CONDITIONID_LEGS)
registerEnum(CONDITIONID_FEET)
registerEnum(CONDITIONID_RING)
registerEnum(CONDITIONID_AMMO)
registerEnum(CONDITION_PARAM_OWNER)
registerEnum(CONDITION_PARAM_TICKS)
registerEnum(CONDITION_PARAM_HEALTHGAIN)
registerEnum(CONDITION_PARAM_HEALTHTICKS)
registerEnum(CONDITION_PARAM_MANAGAIN)
registerEnum(CONDITION_PARAM_MANATICKS)
registerEnum(CONDITION_PARAM_DELAYED)
registerEnum(CONDITION_PARAM_SPEED)
registerEnum(CONDITION_PARAM_LIGHT_LEVEL)
registerEnum(CONDITION_PARAM_LIGHT_COLOR)
registerEnum(CONDITION_PARAM_SOULGAIN)
registerEnum(CONDITION_PARAM_SOULTICKS)
registerEnum(CONDITION_PARAM_MINVALUE)
registerEnum(CONDITION_PARAM_MAXVALUE)
registerEnum(CONDITION_PARAM_STARTVALUE)
registerEnum(CONDITION_PARAM_TICKINTERVAL)
registerEnum(CONDITION_PARAM_FORCEUPDATE)
registerEnum(CONDITION_PARAM_SKILL_MELEE)
registerEnum(CONDITION_PARAM_SKILL_FIST)
registerEnum(CONDITION_PARAM_SKILL_CLUB)
registerEnum(CONDITION_PARAM_SKILL_SWORD)
registerEnum(CONDITION_PARAM_SKILL_AXE)
registerEnum(CONDITION_PARAM_SKILL_DISTANCE)
registerEnum(CONDITION_PARAM_SKILL_SHIELD)
registerEnum(CONDITION_PARAM_SKILL_FISHING)
registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAGICPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTSPERCENT)
registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTSPERCENT)
registerEnum(CONDITION_PARAM_STAT_MAGICPOINTSPERCENT)
registerEnum(CONDITION_PARAM_PERIODICDAMAGE)
registerEnum(CONDITION_PARAM_SKILL_MELEEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_FISTPERCENT)
registerEnum(CONDITION_PARAM_SKILL_CLUBPERCENT)
registerEnum(CONDITION_PARAM_SKILL_SWORDPERCENT)
registerEnum(CONDITION_PARAM_SKILL_AXEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_DISTANCEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_SHIELDPERCENT)
registerEnum(CONDITION_PARAM_SKILL_FISHINGPERCENT)
registerEnum(CONDITION_PARAM_BUFF_SPELL)
registerEnum(CONDITION_PARAM_SUBID)
registerEnum(CONDITION_PARAM_FIELD)
registerEnum(CONST_ME_NONE)
registerEnum(CONST_ME_DRAWBLOOD)
registerEnum(CONST_ME_LOSEENERGY)
registerEnum(CONST_ME_POFF)
registerEnum(CONST_ME_BLOCKHIT)
registerEnum(CONST_ME_EXPLOSIONAREA)
registerEnum(CONST_ME_EXPLOSIONHIT)
registerEnum(CONST_ME_FIREAREA)
registerEnum(CONST_ME_YELLOW_RINGS)
registerEnum(CONST_ME_GREEN_RINGS)
registerEnum(CONST_ME_HITAREA)
registerEnum(CONST_ME_TELEPORT)
registerEnum(CONST_ME_ENERGYHIT)
registerEnum(CONST_ME_MAGIC_BLUE)
registerEnum(CONST_ME_MAGIC_RED)
registerEnum(CONST_ME_MAGIC_GREEN)
registerEnum(CONST_ME_HITBYFIRE)
registerEnum(CONST_ME_HITBYPOISON)
registerEnum(CONST_ME_MORTAREA)
registerEnum(CONST_ME_SOUND_GREEN)
registerEnum(CONST_ME_SOUND_RED)
registerEnum(CONST_ME_POISONAREA)
registerEnum(CONST_ME_SOUND_YELLOW)
registerEnum(CONST_ME_SOUND_PURPLE)
registerEnum(CONST_ME_SOUND_BLUE)
registerEnum(CONST_ME_SOUND_WHITE)
registerEnum(CONST_ME_BUBBLES)
registerEnum(CONST_ME_CRAPS)
registerEnum(CONST_ME_GIFT_WRAPS)
registerEnum(CONST_ME_FIREWORK_YELLOW)
registerEnum(CONST_ME_FIREWORK_RED)
registerEnum(CONST_ME_FIREWORK_BLUE)
registerEnum(CONST_ME_STUN)
registerEnum(CONST_ME_SLEEP)
registerEnum(CONST_ME_WATERCREATURE)
registerEnum(CONST_ME_GROUNDSHAKER)
registerEnum(CONST_ME_HEARTS)
registerEnum(CONST_ME_FIREATTACK)
registerEnum(CONST_ME_ENERGYAREA)
registerEnum(CONST_ME_SMALLCLOUDS)
registerEnum(CONST_ME_HOLYDAMAGE)
registerEnum(CONST_ME_BIGCLOUDS)
registerEnum(CONST_ME_ICEAREA)
registerEnum(CONST_ME_ICETORNADO)
registerEnum(CONST_ME_ICEATTACK)
registerEnum(CONST_ME_STONES)
registerEnum(CONST_ME_SMALLPLANTS)
registerEnum(CONST_ME_CARNIPHILA)
registerEnum(CONST_ME_PURPLEENERGY)
registerEnum(CONST_ME_YELLOWENERGY)
registerEnum(CONST_ME_HOLYAREA)
registerEnum(CONST_ME_BIGPLANTS)
registerEnum(CONST_ME_CAKE)
registerEnum(CONST_ME_GIANTICE)
registerEnum(CONST_ME_WATERSPLASH)
registerEnum(CONST_ME_PLANTATTACK)
registerEnum(CONST_ME_TUTORIALARROW)
registerEnum(CONST_ME_TUTORIALSQUARE)
registerEnum(CONST_ME_MIRRORHORIZONTAL)
registerEnum(CONST_ME_MIRRORVERTICAL)
registerEnum(CONST_ME_SKULLHORIZONTAL)
registerEnum(CONST_ME_SKULLVERTICAL)
registerEnum(CONST_ME_ASSASSIN)
registerEnum(CONST_ME_STEPSHORIZONTAL)
registerEnum(CONST_ME_BLOODYSTEPS)
registerEnum(CONST_ME_STEPSVERTICAL)
registerEnum(CONST_ME_YALAHARIGHOST)
registerEnum(CONST_ME_BATS)
registerEnum(CONST_ME_SMOKE)
registerEnum(CONST_ME_INSECTS)
registerEnum(CONST_ME_DRAGONHEAD)
registerEnum(CONST_ME_ORCSHAMAN)
registerEnum(CONST_ME_ORCSHAMAN_FIRE)
registerEnum(CONST_ME_THUNDER)
registerEnum(CONST_ME_FERUMBRAS)
registerEnum(CONST_ME_CONFETTI_HORIZONTAL)
registerEnum(CONST_ME_CONFETTI_VERTICAL)
registerEnum(CONST_ME_BLACKSMOKE)
registerEnum(CONST_ME_REDSMOKE)
registerEnum(CONST_ME_YELLOWSMOKE)
registerEnum(CONST_ME_GREENSMOKE)
registerEnum(CONST_ME_PURPLESMOKE)
registerEnum(CONST_ANI_NONE)
registerEnum(CONST_ANI_SPEAR)
registerEnum(CONST_ANI_BOLT)
registerEnum(CONST_ANI_ARROW)
registerEnum(CONST_ANI_FIRE)
registerEnum(CONST_ANI_ENERGY)
registerEnum(CONST_ANI_POISONARROW)
registerEnum(CONST_ANI_BURSTARROW)
registerEnum(CONST_ANI_THROWINGSTAR)
registerEnum(CONST_ANI_THROWINGKNIFE)
registerEnum(CONST_ANI_SMALLSTONE)
registerEnum(CONST_ANI_DEATH)
registerEnum(CONST_ANI_LARGEROCK)
registerEnum(CONST_ANI_SNOWBALL)
registerEnum(CONST_ANI_POWERBOLT)
registerEnum(CONST_ANI_POISON)
registerEnum(CONST_ANI_INFERNALBOLT)
registerEnum(CONST_ANI_HUNTINGSPEAR)
registerEnum(CONST_ANI_ENCHANTEDSPEAR)
registerEnum(CONST_ANI_REDSTAR)
registerEnum(CONST_ANI_GREENSTAR)
registerEnum(CONST_ANI_ROYALSPEAR)
registerEnum(CONST_ANI_SNIPERARROW)
registerEnum(CONST_ANI_ONYXARROW)
registerEnum(CONST_ANI_PIERCINGBOLT)
registerEnum(CONST_ANI_WHIRLWINDSWORD)
registerEnum(CONST_ANI_WHIRLWINDAXE)
registerEnum(CONST_ANI_WHIRLWINDCLUB)
registerEnum(CONST_ANI_ETHEREALSPEAR)
registerEnum(CONST_ANI_ICE)
registerEnum(CONST_ANI_EARTH)
registerEnum(CONST_ANI_HOLY)
registerEnum(CONST_ANI_SUDDENDEATH)
registerEnum(CONST_ANI_FLASHARROW)
registerEnum(CONST_ANI_FLAMMINGARROW)
registerEnum(CONST_ANI_SHIVERARROW)
registerEnum(CONST_ANI_ENERGYBALL)
registerEnum(CONST_ANI_SMALLICE)
registerEnum(CONST_ANI_SMALLHOLY)
registerEnum(CONST_ANI_SMALLEARTH)
registerEnum(CONST_ANI_EARTHARROW)
registerEnum(CONST_ANI_EXPLOSION)
registerEnum(CONST_ANI_CAKE)
registerEnum(CONST_ANI_TARSALARROW)
registerEnum(CONST_ANI_VORTEXBOLT)
registerEnum(CONST_ANI_PRISMATICBOLT)
registerEnum(CONST_ANI_CRYSTALLINEARROW)
registerEnum(CONST_ANI_DRILLBOLT)
registerEnum(CONST_ANI_ENVENOMEDARROW)
registerEnum(CONST_ANI_GLOOTHSPEAR)
registerEnum(CONST_ANI_SIMPLEARROW)
registerEnum(CONST_ANI_WEAPONTYPE)
registerEnum(CONST_PROP_BLOCKSOLID)
registerEnum(CONST_PROP_HASHEIGHT)
registerEnum(CONST_PROP_BLOCKPROJECTILE)
registerEnum(CONST_PROP_BLOCKPATH)
registerEnum(CONST_PROP_ISVERTICAL)
registerEnum(CONST_PROP_ISHORIZONTAL)
registerEnum(CONST_PROP_MOVEABLE)
registerEnum(CONST_PROP_IMMOVABLEBLOCKSOLID)
registerEnum(CONST_PROP_IMMOVABLEBLOCKPATH)
registerEnum(CONST_PROP_IMMOVABLENOFIELDBLOCKPATH)
registerEnum(CONST_PROP_NOFIELDBLOCKPATH)
registerEnum(CONST_PROP_SUPPORTHANGABLE)
registerEnum(CONST_SLOT_HEAD)
registerEnum(CONST_SLOT_NECKLACE)
registerEnum(CONST_SLOT_BACKPACK)
registerEnum(CONST_SLOT_ARMOR)
registerEnum(CONST_SLOT_RIGHT)
registerEnum(CONST_SLOT_LEFT)
registerEnum(CONST_SLOT_LEGS)
registerEnum(CONST_SLOT_FEET)
registerEnum(CONST_SLOT_RING)
registerEnum(CONST_SLOT_AMMO)
registerEnum(CREATURE_EVENT_NONE)
registerEnum(CREATURE_EVENT_LOGIN)
registerEnum(CREATURE_EVENT_LOGOUT)
registerEnum(CREATURE_EVENT_THINK)
registerEnum(CREATURE_EVENT_PREPAREDEATH)
registerEnum(CREATURE_EVENT_DEATH)
registerEnum(CREATURE_EVENT_KILL)
registerEnum(CREATURE_EVENT_ADVANCE)
registerEnum(CREATURE_EVENT_MODALWINDOW)
registerEnum(CREATURE_EVENT_TEXTEDIT)
registerEnum(CREATURE_EVENT_HEALTHCHANGE)
registerEnum(CREATURE_EVENT_MANACHANGE)
registerEnum(CREATURE_EVENT_EXTENDED_OPCODE)
registerEnum(GAME_STATE_STARTUP)
registerEnum(GAME_STATE_INIT)
registerEnum(GAME_STATE_NORMAL)
registerEnum(GAME_STATE_CLOSED)
registerEnum(GAME_STATE_SHUTDOWN)
registerEnum(GAME_STATE_CLOSING)
registerEnum(GAME_STATE_MAINTAIN)
registerEnum(MESSAGE_STATUS_CONSOLE_BLUE)
registerEnum(MESSAGE_STATUS_CONSOLE_RED)
registerEnum(MESSAGE_STATUS_DEFAULT)
registerEnum(MESSAGE_STATUS_WARNING)
registerEnum(MESSAGE_EVENT_ADVANCE)
registerEnum(MESSAGE_STATUS_SMALL)
registerEnum(MESSAGE_INFO_DESCR)
registerEnum(MESSAGE_DAMAGE_DEALT)
registerEnum(MESSAGE_DAMAGE_RECEIVED)
registerEnum(MESSAGE_HEALED)
registerEnum(MESSAGE_EXPERIENCE)
registerEnum(MESSAGE_DAMAGE_OTHERS)
registerEnum(MESSAGE_HEALED_OTHERS)
registerEnum(MESSAGE_EXPERIENCE_OTHERS)
registerEnum(MESSAGE_EVENT_DEFAULT)
registerEnum(MESSAGE_EVENT_ORANGE)
registerEnum(MESSAGE_STATUS_CONSOLE_ORANGE)
registerEnum(CREATURETYPE_PLAYER)
registerEnum(CREATURETYPE_MONSTER)
registerEnum(CREATURETYPE_NPC)
registerEnum(CREATURETYPE_SUMMON_OWN)
registerEnum(CREATURETYPE_SUMMON_OTHERS)
registerEnum(CLIENTOS_LINUX)
registerEnum(CLIENTOS_WINDOWS)
registerEnum(CLIENTOS_FLASH)
registerEnum(CLIENTOS_OTCLIENT_LINUX)
registerEnum(CLIENTOS_OTCLIENT_WINDOWS)
registerEnum(CLIENTOS_OTCLIENT_MAC)
registerEnum(FIGHTMODE_ATTACK)
registerEnum(FIGHTMODE_BALANCED)
registerEnum(FIGHTMODE_DEFENSE)
registerEnum(ITEM_ATTRIBUTE_NONE)
registerEnum(ITEM_ATTRIBUTE_ACTIONID)
registerEnum(ITEM_ATTRIBUTE_UNIQUEID)
registerEnum(ITEM_ATTRIBUTE_DESCRIPTION)
registerEnum(ITEM_ATTRIBUTE_TEXT)
registerEnum(ITEM_ATTRIBUTE_DATE)
registerEnum(ITEM_ATTRIBUTE_WRITER)
registerEnum(ITEM_ATTRIBUTE_NAME)
registerEnum(ITEM_ATTRIBUTE_ARTICLE)
registerEnum(ITEM_ATTRIBUTE_PLURALNAME)
registerEnum(ITEM_ATTRIBUTE_WEIGHT)
registerEnum(ITEM_ATTRIBUTE_ATTACK)
registerEnum(ITEM_ATTRIBUTE_DEFENSE)
registerEnum(ITEM_ATTRIBUTE_EXTRADEFENSE)
registerEnum(ITEM_ATTRIBUTE_ARMOR)
registerEnum(ITEM_ATTRIBUTE_HITCHANCE)
registerEnum(ITEM_ATTRIBUTE_SHOOTRANGE)
registerEnum(ITEM_ATTRIBUTE_OWNER)
registerEnum(ITEM_ATTRIBUTE_DURATION)
registerEnum(ITEM_ATTRIBUTE_DECAYSTATE)
registerEnum(ITEM_ATTRIBUTE_CORPSEOWNER)
registerEnum(ITEM_ATTRIBUTE_CHARGES)
registerEnum(ITEM_ATTRIBUTE_FLUIDTYPE)
registerEnum(ITEM_ATTRIBUTE_DOORID)
registerEnum(ITEM_TYPE_DEPOT)
registerEnum(ITEM_TYPE_MAILBOX)
registerEnum(ITEM_TYPE_TRASHHOLDER)
registerEnum(ITEM_TYPE_CONTAINER)
registerEnum(ITEM_TYPE_DOOR)
registerEnum(ITEM_TYPE_MAGICFIELD)
registerEnum(ITEM_TYPE_TELEPORT)
registerEnum(ITEM_TYPE_BED)
registerEnum(ITEM_TYPE_KEY)
registerEnum(ITEM_TYPE_RUNE)
registerEnum(ITEM_BAG)
registerEnum(ITEM_GOLD_COIN)
registerEnum(ITEM_PLATINUM_COIN)
registerEnum(ITEM_CRYSTAL_COIN)
registerEnum(ITEM_AMULETOFLOSS)
registerEnum(ITEM_PARCEL)
registerEnum(ITEM_LABEL)
registerEnum(ITEM_FIREFIELD_PVP_FULL)
registerEnum(ITEM_FIREFIELD_PVP_MEDIUM)
registerEnum(ITEM_FIREFIELD_PVP_SMALL)
registerEnum(ITEM_FIREFIELD_PERSISTENT_FULL)
registerEnum(ITEM_FIREFIELD_PERSISTENT_MEDIUM)
registerEnum(ITEM_FIREFIELD_PERSISTENT_SMALL)
registerEnum(ITEM_FIREFIELD_NOPVP)
registerEnum(ITEM_POISONFIELD_PVP)
registerEnum(ITEM_POISONFIELD_PERSISTENT)
registerEnum(ITEM_POISONFIELD_NOPVP)
registerEnum(ITEM_ENERGYFIELD_PVP)
registerEnum(ITEM_ENERGYFIELD_PERSISTENT)
registerEnum(ITEM_ENERGYFIELD_NOPVP)
registerEnum(ITEM_MAGICWALL)
registerEnum(ITEM_MAGICWALL_PERSISTENT)
registerEnum(ITEM_MAGICWALL_SAFE)
registerEnum(ITEM_WILDGROWTH)
registerEnum(ITEM_WILDGROWTH_PERSISTENT)
registerEnum(ITEM_WILDGROWTH_SAFE)
registerEnum(PlayerFlag_CannotUseCombat)
registerEnum(PlayerFlag_CannotAttackPlayer)
registerEnum(PlayerFlag_CannotAttackMonster)
registerEnum(PlayerFlag_CannotBeAttacked)
registerEnum(PlayerFlag_CanConvinceAll)
registerEnum(PlayerFlag_CanSummonAll)
registerEnum(PlayerFlag_CanIllusionAll)
registerEnum(PlayerFlag_CanSenseInvisibility)
registerEnum(PlayerFlag_IgnoredByMonsters)
registerEnum(PlayerFlag_NotGainInFight)
registerEnum(PlayerFlag_HasInfiniteMana)
registerEnum(PlayerFlag_HasInfiniteSoul)
registerEnum(PlayerFlag_HasNoExhaustion)
registerEnum(PlayerFlag_CannotUseSpells)
registerEnum(PlayerFlag_CannotPickupItem)
registerEnum(PlayerFlag_CanAlwaysLogin)
registerEnum(PlayerFlag_CanBroadcast)
registerEnum(PlayerFlag_CanEditHouses)
registerEnum(PlayerFlag_CannotBeBanned)
registerEnum(PlayerFlag_CannotBePushed)
registerEnum(PlayerFlag_HasInfiniteCapacity)
registerEnum(PlayerFlag_CanPushAllCreatures)
registerEnum(PlayerFlag_CanTalkRedPrivate)
registerEnum(PlayerFlag_CanTalkRedChannel)
registerEnum(PlayerFlag_TalkOrangeHelpChannel)
registerEnum(PlayerFlag_NotGainExperience)
registerEnum(PlayerFlag_NotGainMana)
registerEnum(PlayerFlag_NotGainHealth)
registerEnum(PlayerFlag_NotGainSkill)
registerEnum(PlayerFlag_SetMaxSpeed)
registerEnum(PlayerFlag_SpecialVIP)
registerEnum(PlayerFlag_NotGenerateLoot)
registerEnum(PlayerFlag_CanTalkRedChannelAnonymous)
registerEnum(PlayerFlag_IgnoreProtectionZone)
registerEnum(PlayerFlag_IgnoreSpellCheck)
registerEnum(PlayerFlag_IgnoreWeaponCheck)
registerEnum(PlayerFlag_CannotBeMuted)
registerEnum(PlayerFlag_IsAlwaysPremium)
registerEnum(PLAYERSEX_FEMALE)
registerEnum(PLAYERSEX_MALE)
registerEnum(VOCATION_NONE)
registerEnum(SKILL_FIST)
registerEnum(SKILL_CLUB)
registerEnum(SKILL_SWORD)
registerEnum(SKILL_AXE)
registerEnum(SKILL_DISTANCE)
registerEnum(SKILL_SHIELD)
registerEnum(SKILL_FISHING)
registerEnum(SKILL_MAGLEVEL)
registerEnum(SKILL_LEVEL)
registerEnum(SKULL_NONE)
registerEnum(SKULL_YELLOW)
registerEnum(SKULL_GREEN)
registerEnum(SKULL_WHITE)
registerEnum(SKULL_RED)
registerEnum(SKULL_BLACK)
registerEnum(SKULL_ORANGE)
registerEnum(TALKTYPE_SAY)
registerEnum(TALKTYPE_WHISPER)
registerEnum(TALKTYPE_YELL)
registerEnum(TALKTYPE_PRIVATE_FROM)
registerEnum(TALKTYPE_PRIVATE_TO)
registerEnum(TALKTYPE_CHANNEL_Y)
registerEnum(TALKTYPE_CHANNEL_O)
registerEnum(TALKTYPE_PRIVATE_NP)
registerEnum(TALKTYPE_PRIVATE_PN)
registerEnum(TALKTYPE_BROADCAST)
registerEnum(TALKTYPE_CHANNEL_R1)
registerEnum(TALKTYPE_PRIVATE_RED_FROM)
registerEnum(TALKTYPE_PRIVATE_RED_TO)
registerEnum(TALKTYPE_MONSTER_SAY)
registerEnum(TALKTYPE_MONSTER_YELL)
registerEnum(TALKTYPE_CHANNEL_R2)
registerEnum(TEXTCOLOR_BLUE)
registerEnum(TEXTCOLOR_LIGHTGREEN)
registerEnum(TEXTCOLOR_LIGHTBLUE)
registerEnum(TEXTCOLOR_MAYABLUE)
registerEnum(TEXTCOLOR_DARKRED)
registerEnum(TEXTCOLOR_LIGHTGREY)
registerEnum(TEXTCOLOR_SKYBLUE)
registerEnum(TEXTCOLOR_PURPLE)
registerEnum(TEXTCOLOR_RED)
registerEnum(TEXTCOLOR_ORANGE)
registerEnum(TEXTCOLOR_YELLOW)
registerEnum(TEXTCOLOR_WHITE_EXP)
registerEnum(TEXTCOLOR_NONE)
registerEnum(TILESTATE_NONE)
registerEnum(TILESTATE_PROTECTIONZONE)
registerEnum(TILESTATE_NOPVPZONE)
registerEnum(TILESTATE_NOLOGOUT)
registerEnum(TILESTATE_PVPZONE)
registerEnum(TILESTATE_FLOORCHANGE)
registerEnum(TILESTATE_FLOORCHANGE_DOWN)
registerEnum(TILESTATE_FLOORCHANGE_NORTH)
registerEnum(TILESTATE_FLOORCHANGE_SOUTH)
registerEnum(TILESTATE_FLOORCHANGE_EAST)
registerEnum(TILESTATE_FLOORCHANGE_WEST)
registerEnum(TILESTATE_TELEPORT)
registerEnum(TILESTATE_MAGICFIELD)
registerEnum(TILESTATE_MAILBOX)
registerEnum(TILESTATE_TRASHHOLDER)
registerEnum(TILESTATE_BED)
registerEnum(TILESTATE_DEPOT)
registerEnum(TILESTATE_BLOCKSOLID)
registerEnum(TILESTATE_BLOCKPATH)
registerEnum(TILESTATE_IMMOVABLEBLOCKSOLID)
registerEnum(TILESTATE_IMMOVABLEBLOCKPATH)
registerEnum(TILESTATE_IMMOVABLENOFIELDBLOCKPATH)
registerEnum(TILESTATE_NOFIELDBLOCKPATH)
registerEnum(TILESTATE_FLOORCHANGE_SOUTH_ALT)
registerEnum(TILESTATE_FLOORCHANGE_EAST_ALT)
registerEnum(TILESTATE_SUPPORTS_HANGABLE)
registerEnum(WEAPON_NONE)
registerEnum(WEAPON_SWORD)
registerEnum(WEAPON_CLUB)
registerEnum(WEAPON_AXE)
registerEnum(WEAPON_SHIELD)
registerEnum(WEAPON_DISTANCE)
registerEnum(WEAPON_WAND)
registerEnum(WEAPON_AMMO)
registerEnum(WORLD_TYPE_NO_PVP)
registerEnum(WORLD_TYPE_PVP)
registerEnum(WORLD_TYPE_PVP_ENFORCED)
// Use with container:addItem, container:addItemEx and possibly other functions.
registerEnum(FLAG_NOLIMIT)
registerEnum(FLAG_IGNOREBLOCKITEM)
registerEnum(FLAG_IGNOREBLOCKCREATURE)
registerEnum(FLAG_CHILDISOWNER)
registerEnum(FLAG_PATHFINDING)
registerEnum(FLAG_IGNOREFIELDDAMAGE)
registerEnum(FLAG_IGNORENOTMOVEABLE)
registerEnum(FLAG_IGNOREAUTOSTACK)
// Use with itemType:getSlotPosition
registerEnum(SLOTP_WHEREEVER)
registerEnum(SLOTP_HEAD)
registerEnum(SLOTP_NECKLACE)
registerEnum(SLOTP_BACKPACK)
registerEnum(SLOTP_ARMOR)
registerEnum(SLOTP_RIGHT)
registerEnum(SLOTP_LEFT)
registerEnum(SLOTP_LEGS)
registerEnum(SLOTP_FEET)
registerEnum(SLOTP_RING)
registerEnum(SLOTP_AMMO)
registerEnum(SLOTP_DEPOT)
registerEnum(SLOTP_TWO_HAND)
// Use with combat functions
registerEnum(ORIGIN_NONE)
registerEnum(ORIGIN_CONDITION)
registerEnum(ORIGIN_SPELL)
registerEnum(ORIGIN_MELEE)
registerEnum(ORIGIN_RANGED)
// Use with house:getAccessList, house:setAccessList
registerEnum(GUEST_LIST)
registerEnum(SUBOWNER_LIST)
// Use with npc:setSpeechBubble
registerEnum(SPEECHBUBBLE_NONE)
registerEnum(SPEECHBUBBLE_NORMAL)
registerEnum(SPEECHBUBBLE_TRADE)
registerEnum(SPEECHBUBBLE_QUEST)
registerEnum(SPEECHBUBBLE_QUESTTRADER)
// Use with player:addMapMark
registerEnum(MAPMARK_TICK)
registerEnum(MAPMARK_QUESTION)
registerEnum(MAPMARK_EXCLAMATION)
registerEnum(MAPMARK_STAR)
registerEnum(MAPMARK_CROSS)
registerEnum(MAPMARK_TEMPLE)
registerEnum(MAPMARK_KISS)
registerEnum(MAPMARK_SHOVEL)
registerEnum(MAPMARK_SWORD)
registerEnum(MAPMARK_FLAG)
registerEnum(MAPMARK_LOCK)
registerEnum(MAPMARK_BAG)
registerEnum(MAPMARK_SKULL)
registerEnum(MAPMARK_DOLLAR)
registerEnum(MAPMARK_REDNORTH)
registerEnum(MAPMARK_REDSOUTH)
registerEnum(MAPMARK_REDEAST)
registerEnum(MAPMARK_REDWEST)
registerEnum(MAPMARK_GREENNORTH)
registerEnum(MAPMARK_GREENSOUTH)
// Use with Game.getReturnMessage
registerEnum(RETURNVALUE_NOERROR)
registerEnum(RETURNVALUE_NOTPOSSIBLE)
registerEnum(RETURNVALUE_NOTENOUGHROOM)
registerEnum(RETURNVALUE_PLAYERISPZLOCKED)
registerEnum(RETURNVALUE_PLAYERISNOTINVITED)
registerEnum(RETURNVALUE_CANNOTTHROW)
registerEnum(RETURNVALUE_THEREISNOWAY)
registerEnum(RETURNVALUE_DESTINATIONOUTOFREACH)
registerEnum(RETURNVALUE_CREATUREBLOCK)
registerEnum(RETURNVALUE_NOTMOVEABLE)
registerEnum(RETURNVALUE_DROPTWOHANDEDITEM)
registerEnum(RETURNVALUE_BOTHHANDSNEEDTOBEFREE)
registerEnum(RETURNVALUE_CANONLYUSEONEWEAPON)
registerEnum(RETURNVALUE_NEEDEXCHANGE)
registerEnum(RETURNVALUE_CANNOTBEDRESSED)
registerEnum(RETURNVALUE_PUTTHISOBJECTINYOURHAND)
registerEnum(RETURNVALUE_PUTTHISOBJECTINBOTHHANDS)
registerEnum(RETURNVALUE_TOOFARAWAY)
registerEnum(RETURNVALUE_FIRSTGODOWNSTAIRS)
registerEnum(RETURNVALUE_FIRSTGOUPSTAIRS)
registerEnum(RETURNVALUE_CONTAINERNOTENOUGHROOM)
registerEnum(RETURNVALUE_NOTENOUGHCAPACITY)
registerEnum(RETURNVALUE_CANNOTPICKUP)
registerEnum(RETURNVALUE_THISISIMPOSSIBLE)
registerEnum(RETURNVALUE_DEPOTISFULL)
registerEnum(RETURNVALUE_CREATUREDOESNOTEXIST)
registerEnum(RETURNVALUE_CANNOTUSETHISOBJECT)
registerEnum(RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE)
registerEnum(RETURNVALUE_NOTREQUIREDLEVELTOUSERUNE)
registerEnum(RETURNVALUE_YOUAREALREADYTRADING)
registerEnum(RETURNVALUE_THISPLAYERISALREADYTRADING)
registerEnum(RETURNVALUE_YOUMAYNOTLOGOUTDURINGAFIGHT)
registerEnum(RETURNVALUE_DIRECTPLAYERSHOOT)
registerEnum(RETURNVALUE_NOTENOUGHLEVEL)
registerEnum(RETURNVALUE_NOTENOUGHMAGICLEVEL)
registerEnum(RETURNVALUE_NOTENOUGHMANA)
registerEnum(RETURNVALUE_NOTENOUGHSOUL)
registerEnum(RETURNVALUE_YOUAREEXHAUSTED)
registerEnum(RETURNVALUE_PLAYERISNOTREACHABLE)
registerEnum(RETURNVALUE_CANONLYUSETHISRUNEONCREATURES)
registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISPLAYER)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONWHILEINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISCREATURE)
registerEnum(RETURNVALUE_YOUCANONLYUSEITONCREATURES)
registerEnum(RETURNVALUE_CREATUREISNOTREACHABLE)
registerEnum(RETURNVALUE_TURNSECUREMODETOATTACKUNMARKEDPLAYERS)
registerEnum(RETURNVALUE_YOUNEEDPREMIUMACCOUNT)
registerEnum(RETURNVALUE_YOUNEEDTOLEARNTHISSPELL)
registerEnum(RETURNVALUE_YOURVOCATIONCANNOTUSETHISSPELL)
registerEnum(RETURNVALUE_YOUNEEDAWEAPONTOUSETHISSPELL)
registerEnum(RETURNVALUE_PLAYERISPZLOCKEDLEAVEPVPZONE)
registerEnum(RETURNVALUE_PLAYERISPZLOCKEDENTERPVPZONE)
registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINANOPVPZONE)
registerEnum(RETURNVALUE_YOUCANNOTLOGOUTHERE)
registerEnum(RETURNVALUE_YOUNEEDAMAGICITEMTOCASTSPELL)
registerEnum(RETURNVALUE_CANNOTCONJUREITEMHERE)
registerEnum(RETURNVALUE_YOUNEEDTOSPLITYOURSPEARS)
registerEnum(RETURNVALUE_NAMEISTOOAMBIGUOUS)
registerEnum(RETURNVALUE_CANONLYUSEONESHIELD)
registerEnum(RETURNVALUE_NOPARTYMEMBERSINRANGE)
registerEnum(RETURNVALUE_YOUARENOTTHEOWNER)
registerEnum(RETURNVALUE_TRADEPLAYERFARAWAY)
registerEnum(RETURNVALUE_YOUDONTOWNTHISHOUSE)
registerEnum(RETURNVALUE_TRADEPLAYERALREADYOWNSAHOUSE)
registerEnum(RETURNVALUE_TRADEPLAYERHIGHESTBIDDER)
registerEnum(RETURNVALUE_YOUCANNOTTRADETHISHOUSE)
registerEnum(RELOAD_TYPE_ALL)
registerEnum(RELOAD_TYPE_ACTIONS)
registerEnum(RELOAD_TYPE_CHAT)
registerEnum(RELOAD_TYPE_COMMANDS)
registerEnum(RELOAD_TYPE_CONFIG)
registerEnum(RELOAD_TYPE_CREATURESCRIPTS)
registerEnum(RELOAD_TYPE_EVENTS)
registerEnum(RELOAD_TYPE_GLOBAL)
registerEnum(RELOAD_TYPE_GLOBALEVENTS)
registerEnum(RELOAD_TYPE_ITEMS)
registerEnum(RELOAD_TYPE_MONSTERS)
registerEnum(RELOAD_TYPE_MOUNTS)
registerEnum(RELOAD_TYPE_MOVEMENTS)
registerEnum(RELOAD_TYPE_NPCS)
registerEnum(RELOAD_TYPE_QUESTS)
registerEnum(RELOAD_TYPE_RAIDS)
registerEnum(RELOAD_TYPE_SPELLS)
registerEnum(RELOAD_TYPE_TALKACTIONS)
registerEnum(RELOAD_TYPE_WEAPONS)
// _G
registerGlobalVariable("INDEX_WHEREEVER", INDEX_WHEREEVER);
registerGlobalBoolean("VIRTUAL_PARENT", true);
registerGlobalMethod("isType", LuaScriptInterface::luaIsType);
registerGlobalMethod("rawgetmetatable", LuaScriptInterface::luaRawGetMetatable);
// configKeys
registerTable("configKeys");
registerEnumIn("configKeys", ConfigManager::ALLOW_CHANGEOUTFIT)
registerEnumIn("configKeys", ConfigManager::ONE_PLAYER_ON_ACCOUNT)
registerEnumIn("configKeys", ConfigManager::AIMBOT_HOTKEY_ENABLED)
registerEnumIn("configKeys", ConfigManager::REMOVE_RUNE_CHARGES)
registerEnumIn("configKeys", ConfigManager::EXPERIENCE_FROM_PLAYERS)
registerEnumIn("configKeys", ConfigManager::FREE_PREMIUM)
registerEnumIn("configKeys", ConfigManager::REPLACE_KICK_ON_LOGIN)
registerEnumIn("configKeys", ConfigManager::ALLOW_CLONES)
registerEnumIn("configKeys", ConfigManager::BIND_ONLY_GLOBAL_ADDRESS)
registerEnumIn("configKeys", ConfigManager::OPTIMIZE_DATABASE)
registerEnumIn("configKeys", ConfigManager::MARKET_PREMIUM)
registerEnumIn("configKeys", ConfigManager::EMOTE_SPELLS)
registerEnumIn("configKeys", ConfigManager::STAMINA_SYSTEM)
registerEnumIn("configKeys", ConfigManager::WARN_UNSAFE_SCRIPTS)
registerEnumIn("configKeys", ConfigManager::CONVERT_UNSAFE_SCRIPTS)
registerEnumIn("configKeys", ConfigManager::CLASSIC_EQUIPMENT_SLOTS)
registerEnumIn("configKeys", ConfigManager::MAP_NAME)
registerEnumIn("configKeys", ConfigManager::HOUSE_RENT_PERIOD)
registerEnumIn("configKeys", ConfigManager::SERVER_NAME)
registerEnumIn("configKeys", ConfigManager::OWNER_NAME)
registerEnumIn("configKeys", ConfigManager::OWNER_EMAIL)
registerEnumIn("configKeys", ConfigManager::URL)
registerEnumIn("configKeys", ConfigManager::LOCATION)
registerEnumIn("configKeys", ConfigManager::IP)
registerEnumIn("configKeys", ConfigManager::MOTD)
registerEnumIn("configKeys", ConfigManager::WORLD_TYPE)
registerEnumIn("configKeys", ConfigManager::MYSQL_HOST)
registerEnumIn("configKeys", ConfigManager::MYSQL_USER)
registerEnumIn("configKeys", ConfigManager::MYSQL_PASS)
registerEnumIn("configKeys", ConfigManager::MYSQL_DB)
registerEnumIn("configKeys", ConfigManager::MYSQL_SOCK)
registerEnumIn("configKeys", ConfigManager::DEFAULT_PRIORITY)
registerEnumIn("configKeys", ConfigManager::MAP_AUTHOR)
registerEnumIn("configKeys", ConfigManager::SQL_PORT)
registerEnumIn("configKeys", ConfigManager::MAX_PLAYERS)
registerEnumIn("configKeys", ConfigManager::PZ_LOCKED)
registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRANGE)
registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRADIUS)
registerEnumIn("configKeys", ConfigManager::RATE_EXPERIENCE)
registerEnumIn("configKeys", ConfigManager::RATE_SKILL)
registerEnumIn("configKeys", ConfigManager::RATE_LOOT)
registerEnumIn("configKeys", ConfigManager::RATE_MAGIC)
registerEnumIn("configKeys", ConfigManager::RATE_SPAWN)
registerEnumIn("configKeys", ConfigManager::HOUSE_PRICE)
registerEnumIn("configKeys", ConfigManager::KILLS_TO_RED)
registerEnumIn("configKeys", ConfigManager::KILLS_TO_BLACK)
registerEnumIn("configKeys", ConfigManager::MAX_MESSAGEBUFFER)
registerEnumIn("configKeys", ConfigManager::ACTIONS_DELAY_INTERVAL)
registerEnumIn("configKeys", ConfigManager::EX_ACTIONS_DELAY_INTERVAL)
registerEnumIn("configKeys", ConfigManager::KICK_AFTER_MINUTES)
registerEnumIn("configKeys", ConfigManager::PROTECTION_LEVEL)
registerEnumIn("configKeys", ConfigManager::DEATH_LOSE_PERCENT)
registerEnumIn("configKeys", ConfigManager::STATUSQUERY_TIMEOUT)
registerEnumIn("configKeys", ConfigManager::FRAG_TIME)
registerEnumIn("configKeys", ConfigManager::WHITE_SKULL_TIME)
registerEnumIn("configKeys", ConfigManager::GAME_PORT)
registerEnumIn("configKeys", ConfigManager::LOGIN_PORT)
registerEnumIn("configKeys", ConfigManager::STATUS_PORT)
registerEnumIn("configKeys", ConfigManager::STAIRHOP_DELAY)
registerEnumIn("configKeys", ConfigManager::MARKET_OFFER_DURATION)
registerEnumIn("configKeys", ConfigManager::CHECK_EXPIRED_MARKET_OFFERS_EACH_MINUTES)
registerEnumIn("configKeys", ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER)
registerEnumIn("configKeys", ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE)
registerEnumIn("configKeys", ConfigManager::MAX_PACKETS_PER_SECOND)
// os
registerMethod("os", "mtime", LuaScriptInterface::luaSystemTime);
// table
registerMethod("table", "create", LuaScriptInterface::luaTableCreate);
// Game
registerTable("Game");
registerMethod("Game", "getSpectators", LuaScriptInterface::luaGameGetSpectators);
registerMethod("Game", "getPlayers", LuaScriptInterface::luaGameGetPlayers);
registerMethod("Game", "loadMap", LuaScriptInterface::luaGameLoadMap);
registerMethod("Game", "getExperienceStage", LuaScriptInterface::luaGameGetExperienceStage);
registerMethod("Game", "getMonsterCount", LuaScriptInterface::luaGameGetMonsterCount);
registerMethod("Game", "getPlayerCount", LuaScriptInterface::luaGameGetPlayerCount);
registerMethod("Game", "getNpcCount", LuaScriptInterface::luaGameGetNpcCount);
registerMethod("Game", "getTowns", LuaScriptInterface::luaGameGetTowns);
registerMethod("Game", "getHouses", LuaScriptInterface::luaGameGetHouses);
registerMethod("Game", "getGameState", LuaScriptInterface::luaGameGetGameState);
registerMethod("Game", "setGameState", LuaScriptInterface::luaGameSetGameState);
registerMethod("Game", "getWorldType", LuaScriptInterface::luaGameGetWorldType);
registerMethod("Game", "setWorldType", LuaScriptInterface::luaGameSetWorldType);
registerMethod("Game", "getReturnMessage", LuaScriptInterface::luaGameGetReturnMessage);
registerMethod("Game", "createItem", LuaScriptInterface::luaGameCreateItem);
registerMethod("Game", "createContainer", LuaScriptInterface::luaGameCreateContainer);
registerMethod("Game", "createMonster", LuaScriptInterface::luaGameCreateMonster);
registerMethod("Game", "createNpc", LuaScriptInterface::luaGameCreateNpc);
registerMethod("Game", "createTile", LuaScriptInterface::luaGameCreateTile);
registerMethod("Game", "startRaid", LuaScriptInterface::luaGameStartRaid);
registerMethod("Game", "getClientVersion", LuaScriptInterface::luaGameGetClientVersion);
registerMethod("Game", "reload", LuaScriptInterface::luaGameReload);
// Variant
registerClass("Variant", "", LuaScriptInterface::luaVariantCreate);
registerMethod("Variant", "getNumber", LuaScriptInterface::luaVariantGetNumber);
registerMethod("Variant", "getString", LuaScriptInterface::luaVariantGetString);
registerMethod("Variant", "getPosition", LuaScriptInterface::luaVariantGetPosition);
// Position
registerClass("Position", "", LuaScriptInterface::luaPositionCreate);
registerMetaMethod("Position", "__add", LuaScriptInterface::luaPositionAdd);
registerMetaMethod("Position", "__sub", LuaScriptInterface::luaPositionSub);
registerMetaMethod("Position", "__eq", LuaScriptInterface::luaPositionCompare);
registerMethod("Position", "getDistance", LuaScriptInterface::luaPositionGetDistance);
registerMethod("Position", "isSightClear", LuaScriptInterface::luaPositionIsSightClear);
registerMethod("Position", "sendMagicEffect", LuaScriptInterface::luaPositionSendMagicEffect);
registerMethod("Position", "sendDistanceEffect", LuaScriptInterface::luaPositionSendDistanceEffect);
// Tile
registerClass("Tile", "", LuaScriptInterface::luaTileCreate);
registerMetaMethod("Tile", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Tile", "getPosition", LuaScriptInterface::luaTileGetPosition);
registerMethod("Tile", "getGround", LuaScriptInterface::luaTileGetGround);
registerMethod("Tile", "getThing", LuaScriptInterface::luaTileGetThing);
registerMethod("Tile", "getThingCount", LuaScriptInterface::luaTileGetThingCount);
registerMethod("Tile", "getTopVisibleThing", LuaScriptInterface::luaTileGetTopVisibleThing);
registerMethod("Tile", "getTopTopItem", LuaScriptInterface::luaTileGetTopTopItem);
registerMethod("Tile", "getTopDownItem", LuaScriptInterface::luaTileGetTopDownItem);
registerMethod("Tile", "getFieldItem", LuaScriptInterface::luaTileGetFieldItem);
registerMethod("Tile", "getItemById", LuaScriptInterface::luaTileGetItemById);
registerMethod("Tile", "getItemByType", LuaScriptInterface::luaTileGetItemByType);
registerMethod("Tile", "getItemByTopOrder", LuaScriptInterface::luaTileGetItemByTopOrder);
registerMethod("Tile", "getItemCountById", LuaScriptInterface::luaTileGetItemCountById);
registerMethod("Tile", "getBottomCreature", LuaScriptInterface::luaTileGetBottomCreature);
registerMethod("Tile", "getTopCreature", LuaScriptInterface::luaTileGetTopCreature);
registerMethod("Tile", "getBottomVisibleCreature", LuaScriptInterface::luaTileGetBottomVisibleCreature);
registerMethod("Tile", "getTopVisibleCreature", LuaScriptInterface::luaTileGetTopVisibleCreature);
registerMethod("Tile", "getItems", LuaScriptInterface::luaTileGetItems);
registerMethod("Tile", "getItemCount", LuaScriptInterface::luaTileGetItemCount);
registerMethod("Tile", "getDownItemCount", LuaScriptInterface::luaTileGetDownItemCount);
registerMethod("Tile", "getTopItemCount", LuaScriptInterface::luaTileGetTopItemCount);
registerMethod("Tile", "getCreatures", LuaScriptInterface::luaTileGetCreatures);
registerMethod("Tile", "getCreatureCount", LuaScriptInterface::luaTileGetCreatureCount);
registerMethod("Tile", "getThingIndex", LuaScriptInterface::luaTileGetThingIndex);
registerMethod("Tile", "hasProperty", LuaScriptInterface::luaTileHasProperty);
registerMethod("Tile", "hasFlag", LuaScriptInterface::luaTileHasFlag);
registerMethod("Tile", "queryAdd", LuaScriptInterface::luaTileQueryAdd);
registerMethod("Tile", "getHouse", LuaScriptInterface::luaTileGetHouse);
// NetworkMessage
registerClass("NetworkMessage", "", LuaScriptInterface::luaNetworkMessageCreate);
registerMetaMethod("NetworkMessage", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("NetworkMessage", "__gc", LuaScriptInterface::luaNetworkMessageDelete);
registerMethod("NetworkMessage", "delete", LuaScriptInterface::luaNetworkMessageDelete);
registerMethod("NetworkMessage", "getByte", LuaScriptInterface::luaNetworkMessageGetByte);
registerMethod("NetworkMessage", "getU16", LuaScriptInterface::luaNetworkMessageGetU16);
registerMethod("NetworkMessage", "getU32", LuaScriptInterface::luaNetworkMessageGetU32);
registerMethod("NetworkMessage", "getU64", LuaScriptInterface::luaNetworkMessageGetU64);
registerMethod("NetworkMessage", "getString", LuaScriptInterface::luaNetworkMessageGetString);
registerMethod("NetworkMessage", "getPosition", LuaScriptInterface::luaNetworkMessageGetPosition);
registerMethod("NetworkMessage", "addByte", LuaScriptInterface::luaNetworkMessageAddByte);
registerMethod("NetworkMessage", "addU16", LuaScriptInterface::luaNetworkMessageAddU16);
registerMethod("NetworkMessage", "addU32", LuaScriptInterface::luaNetworkMessageAddU32);
registerMethod("NetworkMessage", "addU64", LuaScriptInterface::luaNetworkMessageAddU64);
registerMethod("NetworkMessage", "addString", LuaScriptInterface::luaNetworkMessageAddString);
registerMethod("NetworkMessage", "addPosition", LuaScriptInterface::luaNetworkMessageAddPosition);
registerMethod("NetworkMessage", "addDouble", LuaScriptInterface::luaNetworkMessageAddDouble);
registerMethod("NetworkMessage", "addItem", LuaScriptInterface::luaNetworkMessageAddItem);
registerMethod("NetworkMessage", "addItemId", LuaScriptInterface::luaNetworkMessageAddItemId);
registerMethod("NetworkMessage", "reset", LuaScriptInterface::luaNetworkMessageReset);
registerMethod("NetworkMessage", "skipBytes", LuaScriptInterface::luaNetworkMessageSkipBytes);
registerMethod("NetworkMessage", "sendToPlayer", LuaScriptInterface::luaNetworkMessageSendToPlayer);
// ModalWindow
registerClass("ModalWindow", "", LuaScriptInterface::luaModalWindowCreate);
registerMetaMethod("ModalWindow", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("ModalWindow", "__gc", LuaScriptInterface::luaModalWindowDelete);
registerMethod("ModalWindow", "delete", LuaScriptInterface::luaModalWindowDelete);
registerMethod("ModalWindow", "getId", LuaScriptInterface::luaModalWindowGetId);
registerMethod("ModalWindow", "getTitle", LuaScriptInterface::luaModalWindowGetTitle);
registerMethod("ModalWindow", "getMessage", LuaScriptInterface::luaModalWindowGetMessage);
registerMethod("ModalWindow", "setTitle", LuaScriptInterface::luaModalWindowSetTitle);
registerMethod("ModalWindow", "setMessage", LuaScriptInterface::luaModalWindowSetMessage);
registerMethod("ModalWindow", "getButtonCount", LuaScriptInterface::luaModalWindowGetButtonCount);
registerMethod("ModalWindow", "getChoiceCount", LuaScriptInterface::luaModalWindowGetChoiceCount);
registerMethod("ModalWindow", "addButton", LuaScriptInterface::luaModalWindowAddButton);
registerMethod("ModalWindow", "addChoice", LuaScriptInterface::luaModalWindowAddChoice);
registerMethod("ModalWindow", "getDefaultEnterButton", LuaScriptInterface::luaModalWindowGetDefaultEnterButton);
registerMethod("ModalWindow", "setDefaultEnterButton", LuaScriptInterface::luaModalWindowSetDefaultEnterButton);
registerMethod("ModalWindow", "getDefaultEscapeButton", LuaScriptInterface::luaModalWindowGetDefaultEscapeButton);
registerMethod("ModalWindow", "setDefaultEscapeButton", LuaScriptInterface::luaModalWindowSetDefaultEscapeButton);
registerMethod("ModalWindow", "hasPriority", LuaScriptInterface::luaModalWindowHasPriority);
registerMethod("ModalWindow", "setPriority", LuaScriptInterface::luaModalWindowSetPriority);
registerMethod("ModalWindow", "sendToPlayer", LuaScriptInterface::luaModalWindowSendToPlayer);
// Item
registerClass("Item", "", LuaScriptInterface::luaItemCreate);
registerMetaMethod("Item", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Item", "isItem", LuaScriptInterface::luaItemIsItem);
registerMethod("Item", "getParent", LuaScriptInterface::luaItemGetParent);
registerMethod("Item", "getTopParent", LuaScriptInterface::luaItemGetTopParent);
registerMethod("Item", "getId", LuaScriptInterface::luaItemGetId);
registerMethod("Item", "clone", LuaScriptInterface::luaItemClone);
registerMethod("Item", "split", LuaScriptInterface::luaItemSplit);
registerMethod("Item", "remove", LuaScriptInterface::luaItemRemove);
registerMethod("Item", "getUniqueId", LuaScriptInterface::luaItemGetUniqueId);
registerMethod("Item", "getActionId", LuaScriptInterface::luaItemGetActionId);
registerMethod("Item", "setActionId", LuaScriptInterface::luaItemSetActionId);
registerMethod("Item", "getCount", LuaScriptInterface::luaItemGetCount);
registerMethod("Item", "getCharges", LuaScriptInterface::luaItemGetCharges);
registerMethod("Item", "getFluidType", LuaScriptInterface::luaItemGetFluidType);
registerMethod("Item", "getWeight", LuaScriptInterface::luaItemGetWeight);
registerMethod("Item", "getSubType", LuaScriptInterface::luaItemGetSubType);
registerMethod("Item", "getName", LuaScriptInterface::luaItemGetName);
registerMethod("Item", "getPluralName", LuaScriptInterface::luaItemGetPluralName);
registerMethod("Item", "getArticle", LuaScriptInterface::luaItemGetArticle);
registerMethod("Item", "getPosition", LuaScriptInterface::luaItemGetPosition);
registerMethod("Item", "getTile", LuaScriptInterface::luaItemGetTile);
registerMethod("Item", "hasAttribute", LuaScriptInterface::luaItemHasAttribute);
registerMethod("Item", "getAttribute", LuaScriptInterface::luaItemGetAttribute);
registerMethod("Item", "setAttribute", LuaScriptInterface::luaItemSetAttribute);
registerMethod("Item", "removeAttribute", LuaScriptInterface::luaItemRemoveAttribute);
registerMethod("Item", "moveTo", LuaScriptInterface::luaItemMoveTo);
registerMethod("Item", "transform", LuaScriptInterface::luaItemTransform);
registerMethod("Item", "decay", LuaScriptInterface::luaItemDecay);
registerMethod("Item", "getDescription", LuaScriptInterface::luaItemGetDescription);
registerMethod("Item", "hasProperty", LuaScriptInterface::luaItemHasProperty);
// Container
registerClass("Container", "Item", LuaScriptInterface::luaContainerCreate);
registerMetaMethod("Container", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Container", "getSize", LuaScriptInterface::luaContainerGetSize);
registerMethod("Container", "getCapacity", LuaScriptInterface::luaContainerGetCapacity);
registerMethod("Container", "getEmptySlots", LuaScriptInterface::luaContainerGetEmptySlots);
registerMethod("Container", "getItemHoldingCount", LuaScriptInterface::luaContainerGetItemHoldingCount);
registerMethod("Container", "getItemCountById", LuaScriptInterface::luaContainerGetItemCountById);
registerMethod("Container", "getItem", LuaScriptInterface::luaContainerGetItem);
registerMethod("Container", "hasItem", LuaScriptInterface::luaContainerHasItem);
registerMethod("Container", "addItem", LuaScriptInterface::luaContainerAddItem);
registerMethod("Container", "addItemEx", LuaScriptInterface::luaContainerAddItemEx);
// Teleport
registerClass("Teleport", "Item", LuaScriptInterface::luaTeleportCreate);
registerMetaMethod("Teleport", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Teleport", "getDestination", LuaScriptInterface::luaTeleportGetDestination);
registerMethod("Teleport", "setDestination", LuaScriptInterface::luaTeleportSetDestination);
// Creature
registerClass("Creature", "", LuaScriptInterface::luaCreatureCreate);
registerMetaMethod("Creature", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Creature", "getEvents", LuaScriptInterface::luaCreatureGetEvents);
registerMethod("Creature", "registerEvent", LuaScriptInterface::luaCreatureRegisterEvent);
registerMethod("Creature", "unregisterEvent", LuaScriptInterface::luaCreatureUnregisterEvent);
registerMethod("Creature", "isRemoved", LuaScriptInterface::luaCreatureIsRemoved);
registerMethod("Creature", "isCreature", LuaScriptInterface::luaCreatureIsCreature);
registerMethod("Creature", "isInGhostMode", LuaScriptInterface::luaCreatureIsInGhostMode);
registerMethod("Creature", "isHealthHidden", LuaScriptInterface::luaCreatureIsHealthHidden);
registerMethod("Creature", "isImmune", LuaScriptInterface::luaCreatureIsImmune);
registerMethod("Creature", "canSee", LuaScriptInterface::luaCreatureCanSee);
registerMethod("Creature", "canSeeCreature", LuaScriptInterface::luaCreatureCanSeeCreature);
registerMethod("Creature", "getParent", LuaScriptInterface::luaCreatureGetParent);
registerMethod("Creature", "getId", LuaScriptInterface::luaCreatureGetId);
registerMethod("Creature", "getName", LuaScriptInterface::luaCreatureGetName);
registerMethod("Creature", "getTarget", LuaScriptInterface::luaCreatureGetTarget);
registerMethod("Creature", "setTarget", LuaScriptInterface::luaCreatureSetTarget);
registerMethod("Creature", "getFollowCreature", LuaScriptInterface::luaCreatureGetFollowCreature);
registerMethod("Creature", "setFollowCreature", LuaScriptInterface::luaCreatureSetFollowCreature);
registerMethod("Creature", "getMaster", LuaScriptInterface::luaCreatureGetMaster);
registerMethod("Creature", "setMaster", LuaScriptInterface::luaCreatureSetMaster);
registerMethod("Creature", "getLight", LuaScriptInterface::luaCreatureGetLight);
registerMethod("Creature", "setLight", LuaScriptInterface::luaCreatureSetLight);
registerMethod("Creature", "getSpeed", LuaScriptInterface::luaCreatureGetSpeed);
registerMethod("Creature", "getBaseSpeed", LuaScriptInterface::luaCreatureGetBaseSpeed);
registerMethod("Creature", "changeSpeed", LuaScriptInterface::luaCreatureChangeSpeed);
registerMethod("Creature", "setDropLoot", LuaScriptInterface::luaCreatureSetDropLoot);
registerMethod("Creature", "getPosition", LuaScriptInterface::luaCreatureGetPosition);
registerMethod("Creature", "getTile", LuaScriptInterface::luaCreatureGetTile);
registerMethod("Creature", "getDirection", LuaScriptInterface::luaCreatureGetDirection);
registerMethod("Creature", "setDirection", LuaScriptInterface::luaCreatureSetDirection);
registerMethod("Creature", "getHealth", LuaScriptInterface::luaCreatureGetHealth);
registerMethod("Creature", "addHealth", LuaScriptInterface::luaCreatureAddHealth);
registerMethod("Creature", "getMaxHealth", LuaScriptInterface::luaCreatureGetMaxHealth);
registerMethod("Creature", "setMaxHealth", LuaScriptInterface::luaCreatureSetMaxHealth);
registerMethod("Creature", "setHiddenHealth", LuaScriptInterface::luaCreatureSetHiddenHealth);
registerMethod("Creature", "getMana", LuaScriptInterface::luaCreatureGetMana);
registerMethod("Creature", "addMana", LuaScriptInterface::luaCreatureAddMana);
registerMethod("Creature", "getMaxMana", LuaScriptInterface::luaCreatureGetMaxMana);
registerMethod("Creature", "getSkull", LuaScriptInterface::luaCreatureGetSkull);
registerMethod("Creature", "setSkull", LuaScriptInterface::luaCreatureSetSkull);
registerMethod("Creature", "getOutfit", LuaScriptInterface::luaCreatureGetOutfit);
registerMethod("Creature", "setOutfit", LuaScriptInterface::luaCreatureSetOutfit);
registerMethod("Creature", "getCondition", LuaScriptInterface::luaCreatureGetCondition);
registerMethod("Creature", "addCondition", LuaScriptInterface::luaCreatureAddCondition);
registerMethod("Creature", "removeCondition", LuaScriptInterface::luaCreatureRemoveCondition);
registerMethod("Creature", "remove", LuaScriptInterface::luaCreatureRemove);
registerMethod("Creature", "teleportTo", LuaScriptInterface::luaCreatureTeleportTo);
registerMethod("Creature", "say", LuaScriptInterface::luaCreatureSay);
registerMethod("Creature", "getDamageMap", LuaScriptInterface::luaCreatureGetDamageMap);
registerMethod("Creature", "getSummons", LuaScriptInterface::luaCreatureGetSummons);
registerMethod("Creature", "getDescription", LuaScriptInterface::luaCreatureGetDescription);
registerMethod("Creature", "getPathTo", LuaScriptInterface::luaCreatureGetPathTo);
registerMethod("Creature", "move", LuaScriptInterface::luaCreatureMove);
// Player
registerClass("Player", "Creature", LuaScriptInterface::luaPlayerCreate);
registerMetaMethod("Player", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Player", "isPlayer", LuaScriptInterface::luaPlayerIsPlayer);
registerMethod("Player", "getGuid", LuaScriptInterface::luaPlayerGetGuid);
registerMethod("Player", "getIp", LuaScriptInterface::luaPlayerGetIp);
registerMethod("Player", "getAccountId", LuaScriptInterface::luaPlayerGetAccountId);
registerMethod("Player", "getLastLoginSaved", LuaScriptInterface::luaPlayerGetLastLoginSaved);
registerMethod("Player", "getLastLogout", LuaScriptInterface::luaPlayerGetLastLogout);
registerMethod("Player", "getAccountType", LuaScriptInterface::luaPlayerGetAccountType);
registerMethod("Player", "setAccountType", LuaScriptInterface::luaPlayerSetAccountType);
registerMethod("Player", "getCapacity", LuaScriptInterface::luaPlayerGetCapacity);
registerMethod("Player", "setCapacity", LuaScriptInterface::luaPlayerSetCapacity);
registerMethod("Player", "getFreeCapacity", LuaScriptInterface::luaPlayerGetFreeCapacity);
registerMethod("Player", "getDepotChest", LuaScriptInterface::luaPlayerGetDepotChest);
registerMethod("Player", "getInbox", LuaScriptInterface::luaPlayerGetInbox);
registerMethod("Player", "getSkullTime", LuaScriptInterface::luaPlayerGetSkullTime);
registerMethod("Player", "setSkullTime", LuaScriptInterface::luaPlayerSetSkullTime);
registerMethod("Player", "getDeathPenalty", LuaScriptInterface::luaPlayerGetDeathPenalty);
registerMethod("Player", "getExperience", LuaScriptInterface::luaPlayerGetExperience);
registerMethod("Player", "addExperience", LuaScriptInterface::luaPlayerAddExperience);
registerMethod("Player", "removeExperience", LuaScriptInterface::luaPlayerRemoveExperience);
registerMethod("Player", "getLevel", LuaScriptInterface::luaPlayerGetLevel);
registerMethod("Player", "getMagicLevel", LuaScriptInterface::luaPlayerGetMagicLevel);
registerMethod("Player", "getBaseMagicLevel", LuaScriptInterface::luaPlayerGetBaseMagicLevel);
registerMethod("Player", "setMaxMana", LuaScriptInterface::luaPlayerSetMaxMana);
registerMethod("Player", "getManaSpent", LuaScriptInterface::luaPlayerGetManaSpent);
registerMethod("Player", "addManaSpent", LuaScriptInterface::luaPlayerAddManaSpent);
registerMethod("Player", "getBaseMaxHealth", LuaScriptInterface::luaPlayerGetBaseMaxHealth);
registerMethod("Player", "getBaseMaxMana", LuaScriptInterface::luaPlayerGetBaseMaxMana);
registerMethod("Player", "getSkillLevel", LuaScriptInterface::luaPlayerGetSkillLevel);
registerMethod("Player", "getEffectiveSkillLevel", LuaScriptInterface::luaPlayerGetEffectiveSkillLevel);
registerMethod("Player", "getSkillPercent", LuaScriptInterface::luaPlayerGetSkillPercent);
registerMethod("Player", "getSkillTries", LuaScriptInterface::luaPlayerGetSkillTries);
registerMethod("Player", "addSkillTries", LuaScriptInterface::luaPlayerAddSkillTries);
registerMethod("Player", "addOfflineTrainingTime", LuaScriptInterface::luaPlayerAddOfflineTrainingTime);
registerMethod("Player", "getOfflineTrainingTime", LuaScriptInterface::luaPlayerGetOfflineTrainingTime);
registerMethod("Player", "removeOfflineTrainingTime", LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime);
registerMethod("Player", "addOfflineTrainingTries", LuaScriptInterface::luaPlayerAddOfflineTrainingTries);
registerMethod("Player", "getOfflineTrainingSkill", LuaScriptInterface::luaPlayerGetOfflineTrainingSkill);
registerMethod("Player", "setOfflineTrainingSkill", LuaScriptInterface::luaPlayerSetOfflineTrainingSkill);
registerMethod("Player", "getItemCount", LuaScriptInterface::luaPlayerGetItemCount);
registerMethod("Player", "getItemById", LuaScriptInterface::luaPlayerGetItemById);
registerMethod("Player", "getVocation", LuaScriptInterface::luaPlayerGetVocation);
registerMethod("Player", "setVocation", LuaScriptInterface::luaPlayerSetVocation);
registerMethod("Player", "getSex", LuaScriptInterface::luaPlayerGetSex);
registerMethod("Player", "setSex", LuaScriptInterface::luaPlayerSetSex);
registerMethod("Player", "getTown", LuaScriptInterface::luaPlayerGetTown);
registerMethod("Player", "setTown", LuaScriptInterface::luaPlayerSetTown);
registerMethod("Player", "getGuild", LuaScriptInterface::luaPlayerGetGuild);
registerMethod("Player", "setGuild", LuaScriptInterface::luaPlayerSetGuild);
registerMethod("Player", "getGuildLevel", LuaScriptInterface::luaPlayerGetGuildLevel);
registerMethod("Player", "setGuildLevel", LuaScriptInterface::luaPlayerSetGuildLevel);
registerMethod("Player", "getGuildNick", LuaScriptInterface::luaPlayerGetGuildNick);
registerMethod("Player", "setGuildNick", LuaScriptInterface::luaPlayerSetGuildNick);
registerMethod("Player", "getGroup", LuaScriptInterface::luaPlayerGetGroup);
registerMethod("Player", "setGroup", LuaScriptInterface::luaPlayerSetGroup);
registerMethod("Player", "getStamina", LuaScriptInterface::luaPlayerGetStamina);
registerMethod("Player", "setStamina", LuaScriptInterface::luaPlayerSetStamina);
registerMethod("Player", "getSoul", LuaScriptInterface::luaPlayerGetSoul);
registerMethod("Player", "addSoul", LuaScriptInterface::luaPlayerAddSoul);
registerMethod("Player", "getMaxSoul", LuaScriptInterface::luaPlayerGetMaxSoul);
registerMethod("Player", "getBankBalance", LuaScriptInterface::luaPlayerGetBankBalance);
registerMethod("Player", "setBankBalance", LuaScriptInterface::luaPlayerSetBankBalance);
registerMethod("Player", "getStorageValue", LuaScriptInterface::luaPlayerGetStorageValue);
registerMethod("Player", "setStorageValue", LuaScriptInterface::luaPlayerSetStorageValue);
registerMethod("Player", "addItem", LuaScriptInterface::luaPlayerAddItem);
registerMethod("Player", "addItemEx", LuaScriptInterface::luaPlayerAddItemEx);
registerMethod("Player", "removeItem", LuaScriptInterface::luaPlayerRemoveItem);
registerMethod("Player", "getMoney", LuaScriptInterface::luaPlayerGetMoney);
registerMethod("Player", "addMoney", LuaScriptInterface::luaPlayerAddMoney);
registerMethod("Player", "removeMoney", LuaScriptInterface::luaPlayerRemoveMoney);
registerMethod("Player", "showTextDialog", LuaScriptInterface::luaPlayerShowTextDialog);
registerMethod("Player", "sendTextMessage", LuaScriptInterface::luaPlayerSendTextMessage);
registerMethod("Player", "sendChannelMessage", LuaScriptInterface::luaPlayerSendChannelMessage);
registerMethod("Player", "sendPrivateMessage", LuaScriptInterface::luaPlayerSendPrivateMessage);
registerMethod("Player", "channelSay", LuaScriptInterface::luaPlayerChannelSay);
registerMethod("Player", "openChannel", LuaScriptInterface::luaPlayerOpenChannel);
registerMethod("Player", "getSlotItem", LuaScriptInterface::luaPlayerGetSlotItem);
registerMethod("Player", "getParty", LuaScriptInterface::luaPlayerGetParty);
registerMethod("Player", "addOutfit", LuaScriptInterface::luaPlayerAddOutfit);
registerMethod("Player", "addOutfitAddon", LuaScriptInterface::luaPlayerAddOutfitAddon);
registerMethod("Player", "removeOutfit", LuaScriptInterface::luaPlayerRemoveOutfit);
registerMethod("Player", "removeOutfitAddon", LuaScriptInterface::luaPlayerRemoveOutfitAddon);
registerMethod("Player", "hasOutfit", LuaScriptInterface::luaPlayerHasOutfit);
registerMethod("Player", "sendOutfitWindow", LuaScriptInterface::luaPlayerSendOutfitWindow);
registerMethod("Player", "addMount", LuaScriptInterface::luaPlayerAddMount);
registerMethod("Player", "removeMount", LuaScriptInterface::luaPlayerRemoveMount);
registerMethod("Player", "hasMount", LuaScriptInterface::luaPlayerHasMount);
registerMethod("Player", "getPremiumDays", LuaScriptInterface::luaPlayerGetPremiumDays);
registerMethod("Player", "addPremiumDays", LuaScriptInterface::luaPlayerAddPremiumDays);
registerMethod("Player", "removePremiumDays", LuaScriptInterface::luaPlayerRemovePremiumDays);
registerMethod("Player", "hasBlessing", LuaScriptInterface::luaPlayerHasBlessing);
registerMethod("Player", "addBlessing", LuaScriptInterface::luaPlayerAddBlessing);
registerMethod("Player", "removeBlessing", LuaScriptInterface::luaPlayerRemoveBlessing);
registerMethod("Player", "canLearnSpell", LuaScriptInterface::luaPlayerCanLearnSpell);
registerMethod("Player", "learnSpell", LuaScriptInterface::luaPlayerLearnSpell);
registerMethod("Player", "forgetSpell", LuaScriptInterface::luaPlayerForgetSpell);
registerMethod("Player", "hasLearnedSpell", LuaScriptInterface::luaPlayerHasLearnedSpell);
registerMethod("Player", "sendTutorial", LuaScriptInterface::luaPlayerSendTutorial);
registerMethod("Player", "addMapMark", LuaScriptInterface::luaPlayerAddMapMark);
registerMethod("Player", "save", LuaScriptInterface::luaPlayerSave);
registerMethod("Player", "popupFYI", LuaScriptInterface::luaPlayerPopupFYI);
registerMethod("Player", "isPzLocked", LuaScriptInterface::luaPlayerIsPzLocked);
registerMethod("Player", "getClient", LuaScriptInterface::luaPlayerGetClient);
registerMethod("Player", "getHouse", LuaScriptInterface::luaPlayerGetHouse);
registerMethod("Player", "sendHouseWindow", LuaScriptInterface::luaPlayerSendHouseWindow);
registerMethod("Player", "setEditHouse", LuaScriptInterface::luaPlayerSetEditHouse);
registerMethod("Player", "setGhostMode", LuaScriptInterface::luaPlayerSetGhostMode);
registerMethod("Player", "getContainerId", LuaScriptInterface::luaPlayerGetContainerId);
registerMethod("Player", "getContainerById", LuaScriptInterface::luaPlayerGetContainerById);
registerMethod("Player", "getContainerIndex", LuaScriptInterface::luaPlayerGetContainerIndex);
registerMethod("Player", "getInstantSpells", LuaScriptInterface::luaPlayerGetInstantSpells);
registerMethod("Player", "canCast", LuaScriptInterface::luaPlayerCanCast);
registerMethod("Player", "hasChaseMode", LuaScriptInterface::luaPlayerHasChaseMode);
registerMethod("Player", "hasSecureMode", LuaScriptInterface::luaPlayerHasSecureMode);
registerMethod("Player", "getFightMode", LuaScriptInterface::luaPlayerGetFightMode);
// Monster
registerClass("Monster", "Creature", LuaScriptInterface::luaMonsterCreate);
registerMetaMethod("Monster", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Monster", "isMonster", LuaScriptInterface::luaMonsterIsMonster);
registerMethod("Monster", "getType", LuaScriptInterface::luaMonsterGetType);
registerMethod("Monster", "getSpawnPosition", LuaScriptInterface::luaMonsterGetSpawnPosition);
registerMethod("Monster", "isInSpawnRange", LuaScriptInterface::luaMonsterIsInSpawnRange);
registerMethod("Monster", "isIdle", LuaScriptInterface::luaMonsterIsIdle);
registerMethod("Monster", "setIdle", LuaScriptInterface::luaMonsterSetIdle);
registerMethod("Monster", "isTarget", LuaScriptInterface::luaMonsterIsTarget);
registerMethod("Monster", "isOpponent", LuaScriptInterface::luaMonsterIsOpponent);
registerMethod("Monster", "isFriend", LuaScriptInterface::luaMonsterIsFriend);
registerMethod("Monster", "addFriend", LuaScriptInterface::luaMonsterAddFriend);
registerMethod("Monster", "removeFriend", LuaScriptInterface::luaMonsterRemoveFriend);
registerMethod("Monster", "getFriendList", LuaScriptInterface::luaMonsterGetFriendList);
registerMethod("Monster", "getFriendCount", LuaScriptInterface::luaMonsterGetFriendCount);
registerMethod("Monster", "addTarget", LuaScriptInterface::luaMonsterAddTarget);
registerMethod("Monster", "removeTarget", LuaScriptInterface::luaMonsterRemoveTarget);
registerMethod("Monster", "getTargetList", LuaScriptInterface::luaMonsterGetTargetList);
registerMethod("Monster", "getTargetCount", LuaScriptInterface::luaMonsterGetTargetCount);
registerMethod("Monster", "selectTarget", LuaScriptInterface::luaMonsterSelectTarget);
registerMethod("Monster", "searchTarget", LuaScriptInterface::luaMonsterSearchTarget);
// Npc
registerClass("Npc", "Creature", LuaScriptInterface::luaNpcCreate);
registerMetaMethod("Npc", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Npc", "isNpc", LuaScriptInterface::luaNpcIsNpc);
registerMethod("Npc", "setMasterPos", LuaScriptInterface::luaNpcSetMasterPos);
registerMethod("Npc", "getSpeechBubble", LuaScriptInterface::luaNpcGetSpeechBubble);
registerMethod("Npc", "setSpeechBubble", LuaScriptInterface::luaNpcSetSpeechBubble);
// Guild
registerClass("Guild", "", LuaScriptInterface::luaGuildCreate);
registerMetaMethod("Guild", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Guild", "getId", LuaScriptInterface::luaGuildGetId);
registerMethod("Guild", "getName", LuaScriptInterface::luaGuildGetName);
registerMethod("Guild", "getMembersOnline", LuaScriptInterface::luaGuildGetMembersOnline);
registerMethod("Guild", "addRank", LuaScriptInterface::luaGuildAddRank);
registerMethod("Guild", "getRankById", LuaScriptInterface::luaGuildGetRankById);
registerMethod("Guild", "getRankByLevel", LuaScriptInterface::luaGuildGetRankByLevel);
registerMethod("Guild", "getMotd", LuaScriptInterface::luaGuildGetMotd);
registerMethod("Guild", "setMotd", LuaScriptInterface::luaGuildSetMotd);
// Group
registerClass("Group", "", LuaScriptInterface::luaGroupCreate);
registerMetaMethod("Group", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Group", "getId", LuaScriptInterface::luaGroupGetId);
registerMethod("Group", "getName", LuaScriptInterface::luaGroupGetName);
registerMethod("Group", "getFlags", LuaScriptInterface::luaGroupGetFlags);
registerMethod("Group", "getAccess", LuaScriptInterface::luaGroupGetAccess);
registerMethod("Group", "getMaxDepotItems", LuaScriptInterface::luaGroupGetMaxDepotItems);
registerMethod("Group", "getMaxVipEntries", LuaScriptInterface::luaGroupGetMaxVipEntries);
// Vocation
registerClass("Vocation", "", LuaScriptInterface::luaVocationCreate);
registerMetaMethod("Vocation", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Vocation", "getId", LuaScriptInterface::luaVocationGetId);
registerMethod("Vocation", "getClientId", LuaScriptInterface::luaVocationGetClientId);
registerMethod("Vocation", "getName", LuaScriptInterface::luaVocationGetName);
registerMethod("Vocation", "getDescription", LuaScriptInterface::luaVocationGetDescription);
registerMethod("Vocation", "getRequiredSkillTries", LuaScriptInterface::luaVocationGetRequiredSkillTries);
registerMethod("Vocation", "getRequiredManaSpent", LuaScriptInterface::luaVocationGetRequiredManaSpent);
registerMethod("Vocation", "getCapacityGain", LuaScriptInterface::luaVocationGetCapacityGain);
registerMethod("Vocation", "getHealthGain", LuaScriptInterface::luaVocationGetHealthGain);
registerMethod("Vocation", "getHealthGainTicks", LuaScriptInterface::luaVocationGetHealthGainTicks);
registerMethod("Vocation", "getHealthGainAmount", LuaScriptInterface::luaVocationGetHealthGainAmount);
registerMethod("Vocation", "getManaGain", LuaScriptInterface::luaVocationGetManaGain);
registerMethod("Vocation", "getManaGainTicks", LuaScriptInterface::luaVocationGetManaGainTicks);
registerMethod("Vocation", "getManaGainAmount", LuaScriptInterface::luaVocationGetManaGainAmount);
registerMethod("Vocation", "getMaxSoul", LuaScriptInterface::luaVocationGetMaxSoul);
registerMethod("Vocation", "getSoulGainTicks", LuaScriptInterface::luaVocationGetSoulGainTicks);
registerMethod("Vocation", "getAttackSpeed", LuaScriptInterface::luaVocationGetAttackSpeed);
registerMethod("Vocation", "getBaseSpeed", LuaScriptInterface::luaVocationGetBaseSpeed);
registerMethod("Vocation", "getDemotion", LuaScriptInterface::luaVocationGetDemotion);
registerMethod("Vocation", "getPromotion", LuaScriptInterface::luaVocationGetPromotion);
// Town
registerClass("Town", "", LuaScriptInterface::luaTownCreate);
registerMetaMethod("Town", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Town", "getId", LuaScriptInterface::luaTownGetId);
registerMethod("Town", "getName", LuaScriptInterface::luaTownGetName);
registerMethod("Town", "getTemplePosition", LuaScriptInterface::luaTownGetTemplePosition);
// House
registerClass("House", "", LuaScriptInterface::luaHouseCreate);
registerMetaMethod("House", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("House", "getId", LuaScriptInterface::luaHouseGetId);
registerMethod("House", "getName", LuaScriptInterface::luaHouseGetName);
registerMethod("House", "getTown", LuaScriptInterface::luaHouseGetTown);
registerMethod("House", "getExitPosition", LuaScriptInterface::luaHouseGetExitPosition);
registerMethod("House", "getRent", LuaScriptInterface::luaHouseGetRent);
registerMethod("House", "getOwnerGuid", LuaScriptInterface::luaHouseGetOwnerGuid);
registerMethod("House", "setOwnerGuid", LuaScriptInterface::luaHouseSetOwnerGuid);
registerMethod("House", "startTrade", LuaScriptInterface::luaHouseStartTrade);
registerMethod("House", "getBeds", LuaScriptInterface::luaHouseGetBeds);
registerMethod("House", "getBedCount", LuaScriptInterface::luaHouseGetBedCount);
registerMethod("House", "getDoors", LuaScriptInterface::luaHouseGetDoors);
registerMethod("House", "getDoorCount", LuaScriptInterface::luaHouseGetDoorCount);
registerMethod("House", "getDoorIdByPosition", LuaScriptInterface::luaHouseGetDoorIdByPosition);
registerMethod("House", "getTiles", LuaScriptInterface::luaHouseGetTiles);
registerMethod("House", "getTileCount", LuaScriptInterface::luaHouseGetTileCount);
registerMethod("House", "canEditAccessList", LuaScriptInterface::luaHouseCanEditAccessList);
registerMethod("House", "getAccessList", LuaScriptInterface::luaHouseGetAccessList);
registerMethod("House", "setAccessList", LuaScriptInterface::luaHouseSetAccessList);
registerMethod("House", "kickPlayer", LuaScriptInterface::luaHouseKickPlayer);
// ItemType
registerClass("ItemType", "", LuaScriptInterface::luaItemTypeCreate);
registerMetaMethod("ItemType", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("ItemType", "isCorpse", LuaScriptInterface::luaItemTypeIsCorpse);
registerMethod("ItemType", "isDoor", LuaScriptInterface::luaItemTypeIsDoor);
registerMethod("ItemType", "isContainer", LuaScriptInterface::luaItemTypeIsContainer);
registerMethod("ItemType", "isFluidContainer", LuaScriptInterface::luaItemTypeIsFluidContainer);
registerMethod("ItemType", "isMovable", LuaScriptInterface::luaItemTypeIsMovable);
registerMethod("ItemType", "isRune", LuaScriptInterface::luaItemTypeIsRune);
registerMethod("ItemType", "isStackable", LuaScriptInterface::luaItemTypeIsStackable);
registerMethod("ItemType", "isReadable", LuaScriptInterface::luaItemTypeIsReadable);
registerMethod("ItemType", "isWritable", LuaScriptInterface::luaItemTypeIsWritable);
registerMethod("ItemType", "getType", LuaScriptInterface::luaItemTypeGetType);
registerMethod("ItemType", "getId", LuaScriptInterface::luaItemTypeGetId);
registerMethod("ItemType", "getClientId", LuaScriptInterface::luaItemTypeGetClientId);
registerMethod("ItemType", "getName", LuaScriptInterface::luaItemTypeGetName);
registerMethod("ItemType", "getPluralName", LuaScriptInterface::luaItemTypeGetPluralName);
registerMethod("ItemType", "getArticle", LuaScriptInterface::luaItemTypeGetArticle);
registerMethod("ItemType", "getDescription", LuaScriptInterface::luaItemTypeGetDescription);
registerMethod("ItemType", "getSlotPosition", LuaScriptInterface::luaItemTypeGetSlotPosition);
registerMethod("ItemType", "getCharges", LuaScriptInterface::luaItemTypeGetCharges);
registerMethod("ItemType", "getFluidSource", LuaScriptInterface::luaItemTypeGetFluidSource);
registerMethod("ItemType", "getCapacity", LuaScriptInterface::luaItemTypeGetCapacity);
registerMethod("ItemType", "getWeight", LuaScriptInterface::luaItemTypeGetWeight);
registerMethod("ItemType", "getHitChance", LuaScriptInterface::luaItemTypeGetHitChance);
registerMethod("ItemType", "getShootRange", LuaScriptInterface::luaItemTypeGetShootRange);
registerMethod("ItemType", "getAttack", LuaScriptInterface::luaItemTypeGetAttack);
registerMethod("ItemType", "getDefense", LuaScriptInterface::luaItemTypeGetDefense);
registerMethod("ItemType", "getExtraDefense", LuaScriptInterface::luaItemTypeGetExtraDefense);
registerMethod("ItemType", "getArmor", LuaScriptInterface::luaItemTypeGetArmor);
registerMethod("ItemType", "getWeaponType", LuaScriptInterface::luaItemTypeGetWeaponType);
registerMethod("ItemType", "getElementType", LuaScriptInterface::luaItemTypeGetElementType);
registerMethod("ItemType", "getElementDamage", LuaScriptInterface::luaItemTypeGetElementDamage);
registerMethod("ItemType", "getTransformEquipId", LuaScriptInterface::luaItemTypeGetTransformEquipId);
registerMethod("ItemType", "getTransformDeEquipId", LuaScriptInterface::luaItemTypeGetTransformDeEquipId);
registerMethod("ItemType", "getDestroyId", LuaScriptInterface::luaItemTypeGetDestroyId);
registerMethod("ItemType", "getDecayId", LuaScriptInterface::luaItemTypeGetDecayId);
registerMethod("ItemType", "getRequiredLevel", LuaScriptInterface::luaItemTypeGetRequiredLevel);
registerMethod("ItemType", "hasSubType", LuaScriptInterface::luaItemTypeHasSubType);
// Combat
registerClass("Combat", "", LuaScriptInterface::luaCombatCreate);
registerMetaMethod("Combat", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Combat", "setParameter", LuaScriptInterface::luaCombatSetParameter);
registerMethod("Combat", "setFormula", LuaScriptInterface::luaCombatSetFormula);
registerMethod("Combat", "setArea", LuaScriptInterface::luaCombatSetArea);
registerMethod("Combat", "setCondition", LuaScriptInterface::luaCombatSetCondition);
registerMethod("Combat", "setCallback", LuaScriptInterface::luaCombatSetCallback);
registerMethod("Combat", "setOrigin", LuaScriptInterface::luaCombatSetOrigin);
registerMethod("Combat", "execute", LuaScriptInterface::luaCombatExecute);
// Condition
registerClass("Condition", "", LuaScriptInterface::luaConditionCreate);
registerMetaMethod("Condition", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("Condition", "__gc", LuaScriptInterface::luaConditionDelete);
registerMethod("Condition", "delete", LuaScriptInterface::luaConditionDelete);
registerMethod("Condition", "getId", LuaScriptInterface::luaConditionGetId);
registerMethod("Condition", "getSubId", LuaScriptInterface::luaConditionGetSubId);
registerMethod("Condition", "getType", LuaScriptInterface::luaConditionGetType);
registerMethod("Condition", "getIcons", LuaScriptInterface::luaConditionGetIcons);
registerMethod("Condition", "getEndTime", LuaScriptInterface::luaConditionGetEndTime);
registerMethod("Condition", "clone", LuaScriptInterface::luaConditionClone);
registerMethod("Condition", "getTicks", LuaScriptInterface::luaConditionGetTicks);
registerMethod("Condition", "setTicks", LuaScriptInterface::luaConditionSetTicks);
registerMethod("Condition", "setParameter", LuaScriptInterface::luaConditionSetParameter);
registerMethod("Condition", "setFormula", LuaScriptInterface::luaConditionSetFormula);
registerMethod("Condition", "setOutfit", LuaScriptInterface::luaConditionSetOutfit);
registerMethod("Condition", "addDamage", LuaScriptInterface::luaConditionAddDamage);
// MonsterType
registerClass("MonsterType", "", LuaScriptInterface::luaMonsterTypeCreate);
registerMetaMethod("MonsterType", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("MonsterType", "isAttackable", LuaScriptInterface::luaMonsterTypeIsAttackable);
registerMethod("MonsterType", "isConvinceable", LuaScriptInterface::luaMonsterTypeIsConvinceable);
registerMethod("MonsterType", "isSummonable", LuaScriptInterface::luaMonsterTypeIsSummonable);
registerMethod("MonsterType", "isIllusionable", LuaScriptInterface::luaMonsterTypeIsIllusionable);
registerMethod("MonsterType", "isHostile", LuaScriptInterface::luaMonsterTypeIsHostile);
registerMethod("MonsterType", "isPushable", LuaScriptInterface::luaMonsterTypeIsPushable);
registerMethod("MonsterType", "isHealthShown", LuaScriptInterface::luaMonsterTypeIsHealthShown);
registerMethod("MonsterType", "canPushItems", LuaScriptInterface::luaMonsterTypeCanPushItems);
registerMethod("MonsterType", "canPushCreatures", LuaScriptInterface::luaMonsterTypeCanPushCreatures);
registerMethod("MonsterType", "getName", LuaScriptInterface::luaMonsterTypeGetName);
registerMethod("MonsterType", "getNameDescription", LuaScriptInterface::luaMonsterTypeGetNameDescription);
registerMethod("MonsterType", "getHealth", LuaScriptInterface::luaMonsterTypeGetHealth);
registerMethod("MonsterType", "getMaxHealth", LuaScriptInterface::luaMonsterTypeGetMaxHealth);
registerMethod("MonsterType", "getRunHealth", LuaScriptInterface::luaMonsterTypeGetRunHealth);
registerMethod("MonsterType", "getExperience", LuaScriptInterface::luaMonsterTypeGetExperience);
registerMethod("MonsterType", "getCombatImmunities", LuaScriptInterface::luaMonsterTypeGetCombatImmunities);
registerMethod("MonsterType", "getConditionImmunities", LuaScriptInterface::luaMonsterTypeGetConditionImmunities);
registerMethod("MonsterType", "getAttackList", LuaScriptInterface::luaMonsterTypeGetAttackList);
registerMethod("MonsterType", "getDefenseList", LuaScriptInterface::luaMonsterTypeGetDefenseList);
registerMethod("MonsterType", "getElementList", LuaScriptInterface::luaMonsterTypeGetElementList);
registerMethod("MonsterType", "getVoices", LuaScriptInterface::luaMonsterTypeGetVoices);
registerMethod("MonsterType", "getLoot", LuaScriptInterface::luaMonsterTypeGetLoot);
registerMethod("MonsterType", "getCreatureEvents", LuaScriptInterface::luaMonsterTypeGetCreatureEvents);
registerMethod("MonsterType", "getSummonList", LuaScriptInterface::luaMonsterTypeGetSummonList);
registerMethod("MonsterType", "getMaxSummons", LuaScriptInterface::luaMonsterTypeGetMaxSummons);
registerMethod("MonsterType", "getArmor", LuaScriptInterface::luaMonsterTypeGetArmor);
registerMethod("MonsterType", "getDefense", LuaScriptInterface::luaMonsterTypeGetDefense);
registerMethod("MonsterType", "getOutfit", LuaScriptInterface::luaMonsterTypeGetOutfit);
registerMethod("MonsterType", "getRace", LuaScriptInterface::luaMonsterTypeGetRace);
registerMethod("MonsterType", "getCorpseId", LuaScriptInterface::luaMonsterTypeGetCorpseId);
registerMethod("MonsterType", "getManaCost", LuaScriptInterface::luaMonsterTypeGetManaCost);
registerMethod("MonsterType", "getBaseSpeed", LuaScriptInterface::luaMonsterTypeGetBaseSpeed);
registerMethod("MonsterType", "getLight", LuaScriptInterface::luaMonsterTypeGetLight);
registerMethod("MonsterType", "getStaticAttackChance", LuaScriptInterface::luaMonsterTypeGetStaticAttackChance);
registerMethod("MonsterType", "getTargetDistance", LuaScriptInterface::luaMonsterTypeGetTargetDistance);
registerMethod("MonsterType", "getYellChance", LuaScriptInterface::luaMonsterTypeGetYellChance);
registerMethod("MonsterType", "getYellSpeedTicks", LuaScriptInterface::luaMonsterTypeGetYellSpeedTicks);
registerMethod("MonsterType", "getChangeTargetChance", LuaScriptInterface::luaMonsterTypeGetChangeTargetChance);
registerMethod("MonsterType", "getChangeTargetSpeed", LuaScriptInterface::luaMonsterTypeGetChangeTargetSpeed);
// Party
registerClass("Party", "", nullptr);
registerMetaMethod("Party", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Party", "disband", LuaScriptInterface::luaPartyDisband);
registerMethod("Party", "getLeader", LuaScriptInterface::luaPartyGetLeader);
registerMethod("Party", "setLeader", LuaScriptInterface::luaPartySetLeader);
registerMethod("Party", "getMembers", LuaScriptInterface::luaPartyGetMembers);
registerMethod("Party", "getMemberCount", LuaScriptInterface::luaPartyGetMemberCount);
registerMethod("Party", "getInvitees", LuaScriptInterface::luaPartyGetInvitees);
registerMethod("Party", "getInviteeCount", LuaScriptInterface::luaPartyGetInviteeCount);
registerMethod("Party", "addInvite", LuaScriptInterface::luaPartyAddInvite);
registerMethod("Party", "removeInvite", LuaScriptInterface::luaPartyRemoveInvite);
registerMethod("Party", "addMember", LuaScriptInterface::luaPartyAddMember);
registerMethod("Party", "removeMember", LuaScriptInterface::luaPartyRemoveMember);
registerMethod("Party", "isSharedExperienceActive", LuaScriptInterface::luaPartyIsSharedExperienceActive);
registerMethod("Party", "isSharedExperienceEnabled", LuaScriptInterface::luaPartyIsSharedExperienceEnabled);
registerMethod("Party", "shareExperience", LuaScriptInterface::luaPartyShareExperience);
registerMethod("Party", "setSharedExperience", LuaScriptInterface::luaPartySetSharedExperience);
// Spells
registerClass("Spell", "", LuaScriptInterface::luaSpellCreate);
registerMetaMethod("Spell", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Spell", "getManaCost", LuaScriptInterface::luaSpellGetManaCost);
registerMethod("Spell", "getSoulCost", LuaScriptInterface::luaSpellGetSoulCost);
registerMethod("Spell", "isPremium", LuaScriptInterface::luaSpellIsPremium);
registerMethod("Spell", "isLearnable", LuaScriptInterface::luaSpellIsLearnable);
}
#undef registerEnum
#undef registerEnumIn
void LuaScriptInterface::registerClass(const std::string& className, const std::string& baseClass, lua_CFunction newFunction/* = nullptr*/)
{
// className = {}
lua_newtable(luaState);
lua_pushvalue(luaState, -1);
lua_setglobal(luaState, className.c_str());
int methods = lua_gettop(luaState);
// methodsTable = {}
lua_newtable(luaState);
int methodsTable = lua_gettop(luaState);
if (newFunction) {
// className.__call = newFunction
lua_pushcfunction(luaState, newFunction);
lua_setfield(luaState, methodsTable, "__call");
}
uint32_t parents = 0;
if (!baseClass.empty()) {
lua_getglobal(luaState, baseClass.c_str());
lua_rawgeti(luaState, -1, 'p');
parents = getNumber<uint32_t>(luaState, -1) + 1;
lua_pop(luaState, 1);
lua_setfield(luaState, methodsTable, "__index");
}
// setmetatable(className, methodsTable)
lua_setmetatable(luaState, methods);
// className.metatable = {}
luaL_newmetatable(luaState, className.c_str());
int metatable = lua_gettop(luaState);
// className.metatable.__metatable = className
lua_pushvalue(luaState, methods);
lua_setfield(luaState, metatable, "__metatable");
// className.metatable.__index = className
lua_pushvalue(luaState, methods);
lua_setfield(luaState, metatable, "__index");
// className.metatable['h'] = hash
lua_pushnumber(luaState, std::hash<std::string>()(className));
lua_rawseti(luaState, metatable, 'h');
// className.metatable['p'] = parents
lua_pushnumber(luaState, parents);
lua_rawseti(luaState, metatable, 'p');
// className.metatable['t'] = type
if (className == "Item") {
lua_pushnumber(luaState, LuaData_Item);
} else if (className == "Container") {
lua_pushnumber(luaState, LuaData_Container);
} else if (className == "Teleport") {
lua_pushnumber(luaState, LuaData_Teleport);
} else if (className == "Player") {
lua_pushnumber(luaState, LuaData_Player);
} else if (className == "Monster") {
lua_pushnumber(luaState, LuaData_Monster);
} else if (className == "Npc") {
lua_pushnumber(luaState, LuaData_Npc);
} else if (className == "Tile") {
lua_pushnumber(luaState, LuaData_Tile);
} else {
lua_pushnumber(luaState, LuaData_Unknown);
}
lua_rawseti(luaState, metatable, 't');
// pop className, className.metatable
lua_pop(luaState, 2);
}
void LuaScriptInterface::registerTable(const std::string& tableName)
{
// _G[tableName] = {}
lua_newtable(luaState);
lua_setglobal(luaState, tableName.c_str());
}
void LuaScriptInterface::registerMethod(const std::string& globalName, const std::string& methodName, lua_CFunction func)
{
// globalName.methodName = func
lua_getglobal(luaState, globalName.c_str());
lua_pushcfunction(luaState, func);
lua_setfield(luaState, -2, methodName.c_str());
// pop globalName
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerMetaMethod(const std::string& className, const std::string& methodName, lua_CFunction func)
{
// className.metatable.methodName = func
luaL_getmetatable(luaState, className.c_str());
lua_pushcfunction(luaState, func);
lua_setfield(luaState, -2, methodName.c_str());
// pop className.metatable
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerGlobalMethod(const std::string& functionName, lua_CFunction func)
{
// _G[functionName] = func
lua_pushcfunction(luaState, func);
lua_setglobal(luaState, functionName.c_str());
}
void LuaScriptInterface::registerVariable(const std::string& tableName, const std::string& name, lua_Number value)
{
// tableName.name = value
lua_getglobal(luaState, tableName.c_str());
setField(luaState, name.c_str(), value);
// pop tableName
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerGlobalVariable(const std::string& name, lua_Number value)
{
// _G[name] = value
lua_pushnumber(luaState, value);
lua_setglobal(luaState, name.c_str());
}
void LuaScriptInterface::registerGlobalBoolean(const std::string& name, bool value)
{
// _G[name] = value
pushBoolean(luaState, value);
lua_setglobal(luaState, name.c_str());
}
int LuaScriptInterface::luaGetPlayerFlagValue(lua_State* L)
{
//getPlayerFlagValue(cid, flag)
Player* player = getPlayer(L, 1);
if (player) {
PlayerFlags flag = getNumber<PlayerFlags>(L, 2);
pushBoolean(L, player->hasFlag(flag));
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoPlayerAddItem(lua_State* L)
{
//doPlayerAddItem(cid, itemid, <optional: default: 1> count/subtype, <optional: default: 1> canDropOnMap)
//doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype)
Player* player = getPlayer(L, 1);
if (!player) {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t itemId = getNumber<uint16_t>(L, 2);
int32_t count = getNumber<int32_t>(L, 3, 1);
bool canDropOnMap = getBoolean(L, 4, true);
uint16_t subType = getNumber<uint16_t>(L, 5, 1);
const ItemType& it = Item::items[itemId];
int32_t itemCount;
auto parameters = lua_gettop(L);
if (parameters > 4) {
//subtype already supplied, count then is the amount
itemCount = std::max<int32_t>(1, count);
} else if (it.hasSubType()) {
if (it.stackable) {
itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100));
} else {
itemCount = 1;
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
while (itemCount > 0) {
uint16_t stackCount = subType;
if (it.stackable && stackCount > 100) {
stackCount = 100;
}
Item* newItem = Item::CreateItem(itemId, stackCount);
if (!newItem) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (it.stackable) {
subType -= stackCount;
}
ReturnValue ret = g_game.internalPlayerAddItem(player, newItem, canDropOnMap);
if (ret != RETURNVALUE_NOERROR) {
delete newItem;
pushBoolean(L, false);
return 1;
}
if (--itemCount == 0) {
if (newItem->getParent()) {
uint32_t uid = getScriptEnv()->addThing(newItem);
lua_pushnumber(L, uid);
return 1;
} else {
//stackable item stacked with existing object, newItem will be released
pushBoolean(L, false);
return 1;
}
}
}
pushBoolean(L, false);
return 1;
}
int LuaScriptInterface::luaDoTileAddItemEx(lua_State* L)
{
//doTileAddItemEx(pos, uid)
const Position& pos = getPosition(L, 1);
Tile* tile = g_game.map.getTile(pos);
if (!tile) {
std::ostringstream ss;
ss << pos << ' ' << getErrorDesc(LUA_ERROR_TILE_NOT_FOUND);
reportErrorFunc(ss.str());
pushBoolean(L, false);
return 1;
}
uint32_t uid = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(uid);
if (!item) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc("Item already has a parent");
pushBoolean(L, false);
return 1;
}
lua_pushnumber(L, g_game.internalAddItem(tile, item));
return 1;
}
int LuaScriptInterface::luaDebugPrint(lua_State* L)
{
//debugPrint(text)
reportErrorFunc(getString(L, -1));
return 0;
}
int LuaScriptInterface::luaGetWorldTime(lua_State* L)
{
//getWorldTime()
uint32_t time = g_game.getLightHour();
lua_pushnumber(L, time);
return 1;
}
int LuaScriptInterface::luaGetWorldLight(lua_State* L)
{
//getWorldLight()
LightInfo lightInfo;
g_game.getWorldLightInfo(lightInfo);
lua_pushnumber(L, lightInfo.level);
lua_pushnumber(L, lightInfo.color);
return 2;
}
int LuaScriptInterface::luaGetWorldUpTime(lua_State* L)
{
//getWorldUpTime()
uint64_t uptime = (OTSYS_TIME() - ProtocolStatus::start) / 1000;
lua_pushnumber(L, uptime);
return 1;
}
bool LuaScriptInterface::getArea(lua_State* L, std::list<uint32_t>& list, uint32_t& rows)
{
lua_pushnil(L);
for (rows = 0; lua_next(L, -2) != 0; ++rows) {
if (!isTable(L, -1)) {
return false;
}
lua_pushnil(L);
while (lua_next(L, -2) != 0) {
if (!isNumber(L, -1)) {
return false;
}
list.push_back(getNumber<uint32_t>(L, -1));
lua_pop(L, 1);
}
lua_pop(L, 1);
}
lua_pop(L, 1);
return (rows != 0);
}
int LuaScriptInterface::luaCreateCombatArea(lua_State* L)
{
//createCombatArea( {area}, <optional> {extArea} )
ScriptEnvironment* env = getScriptEnv();
if (env->getScriptId() != EVENT_ID_LOADING) {
reportErrorFunc("This function can only be used while loading the script.");
pushBoolean(L, false);
return 1;
}
uint32_t areaId = g_luaEnvironment.createAreaObject(env->getScriptInterface());
AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
int parameters = lua_gettop(L);
if (parameters >= 2) {
uint32_t rowsExtArea;
std::list<uint32_t> listExtArea;
if (!isTable(L, 2) || !getArea(L, listExtArea, rowsExtArea)) {
reportErrorFunc("Invalid extended area table.");
pushBoolean(L, false);
return 1;
}
area->setupExtArea(listExtArea, rowsExtArea);
}
uint32_t rowsArea = 0;
std::list<uint32_t> listArea;
if (!isTable(L, 1) || !getArea(L, listArea, rowsArea)) {
reportErrorFunc("Invalid area table.");
pushBoolean(L, false);
return 1;
}
area->setupArea(listArea, rowsArea);
lua_pushnumber(L, areaId);
return 1;
}
int LuaScriptInterface::luaDoAreaCombatHealth(lua_State* L)
{
//doAreaCombatHealth(cid, type, pos, area, min, max, effect[, origin = ORIGIN_SPELL])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 4);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatType_t combatType = getNumber<CombatType_t>(L, 2);
CombatParams params;
params.combatType = combatType;
params.impactEffect = getNumber<uint8_t>(L, 7);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 8, ORIGIN_SPELL);
damage.primary.type = combatType;
damage.primary.value = normal_random(getNumber<int32_t>(L, 6), getNumber<int32_t>(L, 5));
Combat::doCombatHealth(creature, getPosition(L, 3), area, damage, params);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombatHealth(lua_State* L)
{
//doTargetCombatHealth(cid, target, type, min, max, effect[, origin = ORIGIN_SPELL])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatType_t combatType = getNumber<CombatType_t>(L, 3);
CombatParams params;
params.combatType = combatType;
params.impactEffect = getNumber<uint8_t>(L, 6);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 7, ORIGIN_SPELL);
damage.primary.type = combatType;
damage.primary.value = normal_random(getNumber<int32_t>(L, 4), getNumber<int32_t>(L, 5));
Combat::doCombatHealth(creature, target, damage, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoAreaCombatMana(lua_State* L)
{
//doAreaCombatMana(cid, pos, area, min, max, effect[, origin = ORIGIN_SPELL])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 3);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 6);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 7, ORIGIN_SPELL);
damage.primary.type = COMBAT_MANADRAIN;
damage.primary.value = normal_random(getNumber<int32_t>(L, 4), getNumber<int32_t>(L, 5));
Position pos = getPosition(L, 2);
Combat::doCombatMana(creature, pos, area, damage, params);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombatMana(lua_State* L)
{
//doTargetCombatMana(cid, target, min, max, effect[, origin = ORIGIN_SPELL)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 5);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 6, ORIGIN_SPELL);
damage.primary.type = COMBAT_MANADRAIN;
damage.primary.value = normal_random(getNumber<int32_t>(L, 3), getNumber<int32_t>(L, 4));
Combat::doCombatMana(creature, target, damage, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoAreaCombatCondition(lua_State* L)
{
//doAreaCombatCondition(cid, pos, area, condition, effect)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
const Condition* condition = getUserdata<Condition>(L, 4);
if (!condition) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CONDITION_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 3);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 5);
params.conditionList.emplace_front(condition);
Combat::doCombatCondition(creature, getPosition(L, 2), area, params);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombatCondition(lua_State* L)
{
//doTargetCombatCondition(cid, target, condition, effect)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
const Condition* condition = getUserdata<Condition>(L, 3);
if (!condition) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CONDITION_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 4);
params.conditionList.emplace_front(condition);
Combat::doCombatCondition(creature, target, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoAreaCombatDispel(lua_State* L)
{
//doAreaCombatDispel(cid, pos, area, type, effect)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 3);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatParams params;
params.impactEffect = getNumber<uint8_t>(L, 5);
params.dispelType = getNumber<ConditionType_t>(L, 4);
Combat::doCombatDispel(creature, getPosition(L, 2), area, params);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombatDispel(lua_State* L)
{
//doTargetCombatDispel(cid, target, type, effect)
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatParams params;
params.dispelType = getNumber<ConditionType_t>(L, 3);
params.impactEffect = getNumber<uint8_t>(L, 4);
Combat::doCombatDispel(creature, target, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoChallengeCreature(lua_State* L)
{
//doChallengeCreature(cid, target)
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
target->challengeCreature(creature);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaIsValidUID(lua_State* L)
{
//isValidUID(uid)
pushBoolean(L, getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1)) != nullptr);
return 1;
}
int LuaScriptInterface::luaIsDepot(lua_State* L)
{
//isDepot(uid)
Container* container = getScriptEnv()->getContainerByUID(getNumber<uint32_t>(L, -1));
pushBoolean(L, container && container->getDepotLocker());
return 1;
}
int LuaScriptInterface::luaIsMoveable(lua_State* L)
{
//isMoveable(uid)
//isMovable(uid)
Thing* thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1));
pushBoolean(L, thing && thing->isPushable());
return 1;
}
int LuaScriptInterface::luaDoAddContainerItem(lua_State* L)
{
//doAddContainerItem(uid, itemid, <optional> count/subtype)
uint32_t uid = getNumber<uint32_t>(L, 1);
ScriptEnvironment* env = getScriptEnv();
Container* container = env->getContainerByUID(uid);
if (!container) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t itemId = getNumber<uint16_t>(L, 2);
const ItemType& it = Item::items[itemId];
int32_t itemCount = 1;
int32_t subType = 1;
uint32_t count = getNumber<uint32_t>(L, 3, 1);
if (it.hasSubType()) {
if (it.stackable) {
itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100));
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
while (itemCount > 0) {
int32_t stackCount = std::min<int32_t>(100, subType);
Item* newItem = Item::CreateItem(itemId, stackCount);
if (!newItem) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (it.stackable) {
subType -= stackCount;
}
ReturnValue ret = g_game.internalAddItem(container, newItem);
if (ret != RETURNVALUE_NOERROR) {
delete newItem;
pushBoolean(L, false);
return 1;
}
if (--itemCount == 0) {
if (newItem->getParent()) {
lua_pushnumber(L, env->addThing(newItem));
} else {
//stackable item stacked with existing object, newItem will be released
pushBoolean(L, false);
}
return 1;
}
}
pushBoolean(L, false);
return 1;
}
int LuaScriptInterface::luaGetDepotId(lua_State* L)
{
//getDepotId(uid)
uint32_t uid = getNumber<uint32_t>(L, -1);
Container* container = getScriptEnv()->getContainerByUID(uid);
if (!container) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
DepotLocker* depotLocker = container->getDepotLocker();
if (!depotLocker) {
reportErrorFunc("Depot not found");
pushBoolean(L, false);
return 1;
}
lua_pushnumber(L, depotLocker->getDepotId());
return 1;
}
int LuaScriptInterface::luaDoSetCreatureLight(lua_State* L)
{
//doSetCreatureLight(cid, lightLevel, lightColor, time)
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t level = getNumber<uint16_t>(L, 2);
uint16_t color = getNumber<uint16_t>(L, 3);
uint32_t time = getNumber<uint32_t>(L, 4);
Condition* condition = Condition::createCondition(CONDITIONID_COMBAT, CONDITION_LIGHT, time, level | (color << 8));
creature->addCondition(condition);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaAddEvent(lua_State* L)
{
//addEvent(callback, delay, ...)
lua_State* globalState = g_luaEnvironment.getLuaState();
if (!globalState) {
reportErrorFunc("No valid script interface!");
pushBoolean(L, false);
return 1;
} else if (globalState != L) {
lua_xmove(L, globalState, lua_gettop(L));
}
int parameters = lua_gettop(globalState);
if (!isFunction(globalState, -parameters)) { //-parameters means the first parameter from left to right
reportErrorFunc("callback parameter should be a function.");
pushBoolean(L, false);
return 1;
}
if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS) || g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) {
std::vector<std::pair<int32_t, LuaDataType>> indexes;
for (int i = 3; i <= parameters; ++i) {
if (lua_getmetatable(globalState, i) == 0) {
continue;
}
lua_rawgeti(L, -1, 't');
LuaDataType type = getNumber<LuaDataType>(L, -1);
if (type != LuaData_Unknown && type != LuaData_Tile) {
indexes.push_back({i, type});
}
lua_pop(globalState, 2);
}
if (!indexes.empty()) {
if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS)) {
bool plural = indexes.size() > 1;
std::string warningString = "Argument";
if (plural) {
warningString += 's';
}
for (const auto& entry : indexes) {
if (entry == indexes.front()) {
warningString += ' ';
} else if (entry == indexes.back()) {
warningString += " and ";
} else {
warningString += ", ";
}
warningString += '#';
warningString += std::to_string(entry.first);
}
if (plural) {
warningString += " are unsafe";
} else {
warningString += " is unsafe";
}
reportErrorFunc(warningString);
}
if (g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) {
for (const auto& entry : indexes) {
switch (entry.second) {
case LuaData_Item:
case LuaData_Container:
case LuaData_Teleport: {
lua_getglobal(globalState, "Item");
lua_getfield(globalState, -1, "getUniqueId");
break;
}
case LuaData_Player:
case LuaData_Monster:
case LuaData_Npc: {
lua_getglobal(globalState, "Creature");
lua_getfield(globalState, -1, "getId");
break;
}
default:
break;
}
lua_replace(globalState, -2);
lua_pushvalue(globalState, entry.first);
lua_call(globalState, 1, 1);
lua_replace(globalState, entry.first);
}
}
}
}
LuaTimerEventDesc eventDesc;
for (int i = 0; i < parameters - 2; ++i) { //-2 because addEvent needs at least two parameters
eventDesc.parameters.push_back(luaL_ref(globalState, LUA_REGISTRYINDEX));
}
uint32_t delay = std::max<uint32_t>(100, getNumber<uint32_t>(globalState, 2));
lua_pop(globalState, 1);
eventDesc.function = luaL_ref(globalState, LUA_REGISTRYINDEX);
eventDesc.scriptId = getScriptEnv()->getScriptId();
auto& lastTimerEventId = g_luaEnvironment.lastEventTimerId;
eventDesc.eventId = g_scheduler.addEvent(createSchedulerTask(
delay, std::bind(&LuaEnvironment::executeTimerEvent, &g_luaEnvironment, lastTimerEventId)
));
g_luaEnvironment.timerEvents.emplace(lastTimerEventId, std::move(eventDesc));
lua_pushnumber(L, lastTimerEventId++);
return 1;
}
int LuaScriptInterface::luaStopEvent(lua_State* L)
{
//stopEvent(eventid)
lua_State* globalState = g_luaEnvironment.getLuaState();
if (!globalState) {
reportErrorFunc("No valid script interface!");
pushBoolean(L, false);
return 1;
}
uint32_t eventId = getNumber<uint32_t>(L, 1);
auto& timerEvents = g_luaEnvironment.timerEvents;
auto it = timerEvents.find(eventId);
if (it == timerEvents.end()) {
pushBoolean(L, false);
return 1;
}
LuaTimerEventDesc timerEventDesc = std::move(it->second);
timerEvents.erase(it);
g_scheduler.stopEvent(timerEventDesc.eventId);
luaL_unref(globalState, LUA_REGISTRYINDEX, timerEventDesc.function);
for (auto parameter : timerEventDesc.parameters) {
luaL_unref(globalState, LUA_REGISTRYINDEX, parameter);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGetCreatureCondition(lua_State* L)
{
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
ConditionType_t condition = getNumber<ConditionType_t>(L, 2);
uint32_t subId = getNumber<uint32_t>(L, 3, 0);
pushBoolean(L, creature->hasCondition(condition, subId));
return 1;
}
int LuaScriptInterface::luaSaveServer(lua_State* L)
{
g_game.saveGameState();
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCleanMap(lua_State* L)
{
lua_pushnumber(L, g_game.map.clean());
return 1;
}
int LuaScriptInterface::luaIsInWar(lua_State* L)
{
//isInWar(cid, target)
Player* player = getPlayer(L, 1);
if (!player) {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Player* targetPlayer = getPlayer(L, 2);
if (!targetPlayer) {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, player->isInWar(targetPlayer));
return 1;
}
int LuaScriptInterface::luaGetWaypointPositionByName(lua_State* L)
{
//getWaypointPositionByName(name)
auto& waypoints = g_game.map.waypoints;
auto it = waypoints.find(getString(L, -1));
if (it != waypoints.end()) {
pushPosition(L, it->second);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaSendChannelMessage(lua_State* L)
{
//sendChannelMessage(channelId, type, message)
uint32_t channelId = getNumber<uint32_t>(L, 1);
ChatChannel* channel = g_chat->getChannelById(channelId);
if (!channel) {
pushBoolean(L, false);
return 1;
}
SpeakClasses type = getNumber<SpeakClasses>(L, 2);
std::string message = getString(L, 3);
channel->sendToAll(message, type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaSendGuildChannelMessage(lua_State* L)
{
//sendGuildChannelMessage(guildId, type, message)
uint32_t guildId = getNumber<uint32_t>(L, 1);
ChatChannel* channel = g_chat->getGuildChannelById(guildId);
if (!channel) {
pushBoolean(L, false);
return 1;
}
SpeakClasses type = getNumber<SpeakClasses>(L, 2);
std::string message = getString(L, 3);
channel->sendToAll(message, type);
pushBoolean(L, true);
return 1;
}
std::string LuaScriptInterface::escapeString(const std::string& string)
{
std::string s = string;
replaceString(s, "\\", "\\\\");
replaceString(s, "\"", "\\\"");
replaceString(s, "'", "\\'");
replaceString(s, "[[", "\\[[");
return s;
}
#ifndef LUAJIT_VERSION
const luaL_Reg LuaScriptInterface::luaBitReg[] = {
//{"tobit", LuaScriptInterface::luaBitToBit},
{"bnot", LuaScriptInterface::luaBitNot},
{"band", LuaScriptInterface::luaBitAnd},
{"bor", LuaScriptInterface::luaBitOr},
{"bxor", LuaScriptInterface::luaBitXor},
{"lshift", LuaScriptInterface::luaBitLeftShift},
{"rshift", LuaScriptInterface::luaBitRightShift},
//{"arshift", LuaScriptInterface::luaBitArithmeticalRightShift},
//{"rol", LuaScriptInterface::luaBitRotateLeft},
//{"ror", LuaScriptInterface::luaBitRotateRight},
//{"bswap", LuaScriptInterface::luaBitSwapEndian},
//{"tohex", LuaScriptInterface::luaBitToHex},
{nullptr, nullptr}
};
int LuaScriptInterface::luaBitNot(lua_State* L)
{
lua_pushnumber(L, ~getNumber<uint32_t>(L, -1));
return 1;
}
#define MULTIOP(name, op) \
int LuaScriptInterface::luaBit##name(lua_State* L) \
{ \
int n = lua_gettop(L); \
uint32_t w = getNumber<uint32_t>(L, -1); \
for (int i = 1; i < n; ++i) \
w op getNumber<uint32_t>(L, i); \
lua_pushnumber(L, w); \
return 1; \
}
MULTIOP(And, &= )
MULTIOP(Or, |= )
MULTIOP(Xor, ^= )
#define SHIFTOP(name, op) \
int LuaScriptInterface::luaBit##name(lua_State* L) \
{ \
uint32_t n1 = getNumber<uint32_t>(L, 1), n2 = getNumber<uint32_t>(L, 2); \
lua_pushnumber(L, (n1 op n2)); \
return 1; \
}
SHIFTOP(LeftShift, << )
SHIFTOP(RightShift, >> )
#endif
const luaL_Reg LuaScriptInterface::luaConfigManagerTable[] = {
{"getString", LuaScriptInterface::luaConfigManagerGetString},
{"getNumber", LuaScriptInterface::luaConfigManagerGetNumber},
{"getBoolean", LuaScriptInterface::luaConfigManagerGetBoolean},
{nullptr, nullptr}
};
int LuaScriptInterface::luaConfigManagerGetString(lua_State* L)
{
pushString(L, g_config.getString(getNumber<ConfigManager::string_config_t>(L, -1)));
return 1;
}
int LuaScriptInterface::luaConfigManagerGetNumber(lua_State* L)
{
lua_pushnumber(L, g_config.getNumber(getNumber<ConfigManager::integer_config_t>(L, -1)));
return 1;
}
int LuaScriptInterface::luaConfigManagerGetBoolean(lua_State* L)
{
pushBoolean(L, g_config.getBoolean(getNumber<ConfigManager::boolean_config_t>(L, -1)));
return 1;
}
const luaL_Reg LuaScriptInterface::luaDatabaseTable[] = {
{"query", LuaScriptInterface::luaDatabaseExecute},
{"asyncQuery", LuaScriptInterface::luaDatabaseAsyncExecute},
{"storeQuery", LuaScriptInterface::luaDatabaseStoreQuery},
{"asyncStoreQuery", LuaScriptInterface::luaDatabaseAsyncStoreQuery},
{"escapeString", LuaScriptInterface::luaDatabaseEscapeString},
{"escapeBlob", LuaScriptInterface::luaDatabaseEscapeBlob},
{"lastInsertId", LuaScriptInterface::luaDatabaseLastInsertId},
{"tableExists", LuaScriptInterface::luaDatabaseTableExists},
{nullptr, nullptr}
};
int LuaScriptInterface::luaDatabaseExecute(lua_State* L)
{
pushBoolean(L, Database::getInstance().executeQuery(getString(L, -1)));
return 1;
}
int LuaScriptInterface::luaDatabaseAsyncExecute(lua_State* L)
{
std::function<void(DBResult_ptr, bool)> callback;
if (lua_gettop(L) > 1) {
int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX);
auto scriptId = getScriptEnv()->getScriptId();
callback = [ref, scriptId](DBResult_ptr, bool success) {
lua_State* luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return;
}
if (!LuaScriptInterface::reserveScriptEnv()) {
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
return;
}
lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref);
pushBoolean(luaState, success);
auto env = getScriptEnv();
env->setScriptId(scriptId, &g_luaEnvironment);
g_luaEnvironment.callFunction(1);
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
};
}
g_databaseTasks.addTask(getString(L, -1), callback);
return 0;
}
int LuaScriptInterface::luaDatabaseStoreQuery(lua_State* L)
{
if (DBResult_ptr res = Database::getInstance().storeQuery(getString(L, -1))) {
lua_pushnumber(L, ScriptEnvironment::addResult(res));
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDatabaseAsyncStoreQuery(lua_State* L)
{
std::function<void(DBResult_ptr, bool)> callback;
if (lua_gettop(L) > 1) {
int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX);
auto scriptId = getScriptEnv()->getScriptId();
callback = [ref, scriptId](DBResult_ptr result, bool) {
lua_State* luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return;
}
if (!LuaScriptInterface::reserveScriptEnv()) {
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
return;
}
lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref);
if (result) {
lua_pushnumber(luaState, ScriptEnvironment::addResult(result));
} else {
pushBoolean(luaState, false);
}
auto env = getScriptEnv();
env->setScriptId(scriptId, &g_luaEnvironment);
g_luaEnvironment.callFunction(1);
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
};
}
g_databaseTasks.addTask(getString(L, -1), callback, true);
return 0;
}
int LuaScriptInterface::luaDatabaseEscapeString(lua_State* L)
{
pushString(L, Database::getInstance().escapeString(getString(L, -1)));
return 1;
}
int LuaScriptInterface::luaDatabaseEscapeBlob(lua_State* L)
{
uint32_t length = getNumber<uint32_t>(L, 2);
pushString(L, Database::getInstance().escapeBlob(getString(L, 1).c_str(), length));
return 1;
}
int LuaScriptInterface::luaDatabaseLastInsertId(lua_State* L)
{
lua_pushnumber(L, Database::getInstance().getLastInsertId());
return 1;
}
int LuaScriptInterface::luaDatabaseTableExists(lua_State* L)
{
pushBoolean(L, DatabaseManager::tableExists(getString(L, -1)));
return 1;
}
const luaL_Reg LuaScriptInterface::luaResultTable[] = {
{"getNumber", LuaScriptInterface::luaResultGetNumber},
{"getString", LuaScriptInterface::luaResultGetString},
{"getStream", LuaScriptInterface::luaResultGetStream},
{"next", LuaScriptInterface::luaResultNext},
{"free", LuaScriptInterface::luaResultFree},
{nullptr, nullptr}
};
int LuaScriptInterface::luaResultGetNumber(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
const std::string& s = getString(L, 2);
lua_pushnumber(L, res->getNumber<int64_t>(s));
return 1;
}
int LuaScriptInterface::luaResultGetString(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
const std::string& s = getString(L, 2);
pushString(L, res->getString(s));
return 1;
}
int LuaScriptInterface::luaResultGetStream(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
unsigned long length;
const char* stream = res->getStream(getString(L, 2), length);
lua_pushlstring(L, stream, length);
lua_pushnumber(L, length);
return 2;
}
int LuaScriptInterface::luaResultNext(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, -1));
if (!res) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, res->next());
return 1;
}
int LuaScriptInterface::luaResultFree(lua_State* L)
{
pushBoolean(L, ScriptEnvironment::removeResult(getNumber<uint32_t>(L, -1)));
return 1;
}
// Userdata
int LuaScriptInterface::luaUserdataCompare(lua_State* L)
{
// userdataA == userdataB
pushBoolean(L, getUserdata<void>(L, 1) == getUserdata<void>(L, 2));
return 1;
}
// _G
int LuaScriptInterface::luaIsType(lua_State* L)
{
// isType(derived, base)
lua_getmetatable(L, -2);
lua_getmetatable(L, -2);
lua_rawgeti(L, -2, 'p');
uint_fast8_t parentsB = getNumber<uint_fast8_t>(L, 1);
lua_rawgeti(L, -3, 'h');
size_t hashB = getNumber<size_t>(L, 1);
lua_rawgeti(L, -3, 'p');
uint_fast8_t parentsA = getNumber<uint_fast8_t>(L, 1);
for (uint_fast8_t i = parentsA; i < parentsB; ++i) {
lua_getfield(L, -3, "__index");
lua_replace(L, -4);
}
lua_rawgeti(L, -4, 'h');
size_t hashA = getNumber<size_t>(L, 1);
pushBoolean(L, hashA == hashB);
return 1;
}
int LuaScriptInterface::luaRawGetMetatable(lua_State* L)
{
// rawgetmetatable(metatableName)
luaL_getmetatable(L, getString(L, 1).c_str());
return 1;
}
// os
int LuaScriptInterface::luaSystemTime(lua_State* L)
{
// os.mtime()
lua_pushnumber(L, OTSYS_TIME());
return 1;
}
// table
int LuaScriptInterface::luaTableCreate(lua_State* L)
{
// table.create(arrayLength, keyLength)
lua_createtable(L, getNumber<int32_t>(L, 1), getNumber<int32_t>(L, 2));
return 1;
}
// Game
int LuaScriptInterface::luaGameGetSpectators(lua_State* L)
{
// Game.getSpectators(position[, multifloor = false[, onlyPlayer = false[, minRangeX = 0[, maxRangeX = 0[, minRangeY = 0[, maxRangeY = 0]]]]]])
const Position& position = getPosition(L, 1);
bool multifloor = getBoolean(L, 2, false);
bool onlyPlayers = getBoolean(L, 3, false);
int32_t minRangeX = getNumber<int32_t>(L, 4, 0);
int32_t maxRangeX = getNumber<int32_t>(L, 5, 0);
int32_t minRangeY = getNumber<int32_t>(L, 6, 0);
int32_t maxRangeY = getNumber<int32_t>(L, 7, 0);
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, position, multifloor, onlyPlayers, minRangeX, maxRangeX, minRangeY, maxRangeY);
lua_createtable(L, spectators.size(), 0);
int index = 0;
for (Creature* creature : spectators) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetPlayers(lua_State* L)
{
// Game.getPlayers()
lua_createtable(L, g_game.getPlayersOnline(), 0);
int index = 0;
for (const auto& playerEntry : g_game.getPlayers()) {
pushUserdata<Player>(L, playerEntry.second);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameLoadMap(lua_State* L)
{
// Game.loadMap(path)
const std::string& path = getString(L, 1);
g_dispatcher.addTask(createTask(std::bind(&Game::loadMap, &g_game, path)));
return 0;
}
int LuaScriptInterface::luaGameGetExperienceStage(lua_State* L)
{
// Game.getExperienceStage(level)
uint32_t level = getNumber<uint32_t>(L, 1);
lua_pushnumber(L, g_game.getExperienceStage(level));
return 1;
}
int LuaScriptInterface::luaGameGetMonsterCount(lua_State* L)
{
// Game.getMonsterCount()
lua_pushnumber(L, g_game.getMonstersOnline());
return 1;
}
int LuaScriptInterface::luaGameGetPlayerCount(lua_State* L)
{
// Game.getPlayerCount()
lua_pushnumber(L, g_game.getPlayersOnline());
return 1;
}
int LuaScriptInterface::luaGameGetNpcCount(lua_State* L)
{
// Game.getNpcCount()
lua_pushnumber(L, g_game.getNpcsOnline());
return 1;
}
int LuaScriptInterface::luaGameGetTowns(lua_State* L)
{
// Game.getTowns()
const auto& towns = g_game.map.towns.getTowns();
lua_createtable(L, towns.size(), 0);
int index = 0;
for (auto townEntry : towns) {
pushUserdata<Town>(L, townEntry.second);
setMetatable(L, -1, "Town");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetHouses(lua_State* L)
{
// Game.getHouses()
const auto& houses = g_game.map.houses.getHouses();
lua_createtable(L, houses.size(), 0);
int index = 0;
for (auto houseEntry : houses) {
pushUserdata<House>(L, houseEntry.second);
setMetatable(L, -1, "House");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetGameState(lua_State* L)
{
// Game.getGameState()
lua_pushnumber(L, g_game.getGameState());
return 1;
}
int LuaScriptInterface::luaGameSetGameState(lua_State* L)
{
// Game.setGameState(state)
GameState_t state = getNumber<GameState_t>(L, 1);
g_game.setGameState(state);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameGetWorldType(lua_State* L)
{
// Game.getWorldType()
lua_pushnumber(L, g_game.getWorldType());
return 1;
}
int LuaScriptInterface::luaGameSetWorldType(lua_State* L)
{
// Game.setWorldType(type)
WorldType_t type = getNumber<WorldType_t>(L, 1);
g_game.setWorldType(type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameGetReturnMessage(lua_State* L)
{
// Game.getReturnMessage(value)
ReturnValue value = getNumber<ReturnValue>(L, 1);
pushString(L, getReturnMessage(value));
return 1;
}
int LuaScriptInterface::luaGameCreateItem(lua_State* L)
{
// Game.createItem(itemId[, count[, position]])
uint16_t count = getNumber<uint16_t>(L, 2, 1);
uint16_t id;
if (isNumber(L, 1)) {
id = getNumber<uint16_t>(L, 1);
} else {
id = Item::items.getItemIdByName(getString(L, 1));
if (id == 0) {
lua_pushnil(L);
return 1;
}
}
const ItemType& it = Item::items[id];
if (it.stackable) {
count = std::min<uint16_t>(count, 100);
}
Item* item = Item::CreateItem(id, count);
if (!item) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) >= 3) {
const Position& position = getPosition(L, 3);
Tile* tile = g_game.map.getTile(position);
if (!tile) {
delete item;
lua_pushnil(L);
return 1;
}
g_game.internalAddItem(tile, item, INDEX_WHEREEVER, FLAG_NOLIMIT);
} else {
getScriptEnv()->addTempItem(item);
item->setParent(VirtualCylinder::virtualCylinder);
}
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
int LuaScriptInterface::luaGameCreateContainer(lua_State* L)
{
// Game.createContainer(itemId, size[, position])
uint16_t size = getNumber<uint16_t>(L, 2);
uint16_t id;
if (isNumber(L, 1)) {
id = getNumber<uint16_t>(L, 1);
} else {
id = Item::items.getItemIdByName(getString(L, 1));
if (id == 0) {
lua_pushnil(L);
return 1;
}
}
Container* container = Item::CreateItemAsContainer(id, size);
if (!container) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) >= 3) {
const Position& position = getPosition(L, 3);
Tile* tile = g_game.map.getTile(position);
if (!tile) {
delete container;
lua_pushnil(L);
return 1;
}
g_game.internalAddItem(tile, container, INDEX_WHEREEVER, FLAG_NOLIMIT);
} else {
getScriptEnv()->addTempItem(container);
container->setParent(VirtualCylinder::virtualCylinder);
}
pushUserdata<Container>(L, container);
setMetatable(L, -1, "Container");
return 1;
}
int LuaScriptInterface::luaGameCreateMonster(lua_State* L)
{
// Game.createMonster(monsterName, position[, extended = false[, force = false]])
Monster* monster = Monster::createMonster(getString(L, 1));
if (!monster) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
bool extended = getBoolean(L, 3, false);
bool force = getBoolean(L, 4, false);
if (g_game.placeCreature(monster, position, extended, force)) {
pushUserdata<Monster>(L, monster);
setMetatable(L, -1, "Monster");
} else {
delete monster;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGameCreateNpc(lua_State* L)
{
// Game.createNpc(npcName, position[, extended = false[, force = false]])
Npc* npc = Npc::createNpc(getString(L, 1));
if (!npc) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
bool extended = getBoolean(L, 3, false);
bool force = getBoolean(L, 4, false);
if (g_game.placeCreature(npc, position, extended, force)) {
pushUserdata<Npc>(L, npc);
setMetatable(L, -1, "Npc");
} else {
delete npc;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGameCreateTile(lua_State* L)
{
// Game.createTile(x, y, z[, isDynamic = false])
// Game.createTile(position[, isDynamic = false])
Position position;
bool isDynamic;
if (isTable(L, 1)) {
position = getPosition(L, 1);
isDynamic = getBoolean(L, 2, false);
} else {
position.x = getNumber<uint16_t>(L, 1);
position.y = getNumber<uint16_t>(L, 2);
position.z = getNumber<uint16_t>(L, 3);
isDynamic = getBoolean(L, 4, false);
}
Tile* tile = g_game.map.getTile(position);
if (!tile) {
if (isDynamic) {
tile = new DynamicTile(position.x, position.y, position.z);
} else {
tile = new StaticTile(position.x, position.y, position.z);
}
g_game.map.setTile(position, tile);
}
pushUserdata(L, tile);
setMetatable(L, -1, "Tile");
return 1;
}
int LuaScriptInterface::luaGameStartRaid(lua_State* L)
{
// Game.startRaid(raidName)
const std::string& raidName = getString(L, 1);
Raid* raid = g_game.raids.getRaidByName(raidName);
if (!raid || !raid->isLoaded()) {
lua_pushnumber(L, RETURNVALUE_NOSUCHRAIDEXISTS);
return 1;
}
if (g_game.raids.getRunning()) {
lua_pushnumber(L, RETURNVALUE_ANOTHERRAIDISALREADYEXECUTING);
return 1;
}
g_game.raids.setRunning(raid);
raid->startRaid();
lua_pushnumber(L, RETURNVALUE_NOERROR);
return 1;
}
int LuaScriptInterface::luaGameGetClientVersion(lua_State* L)
{
// Game.getClientVersion()
lua_createtable(L, 0, 3);
setField(L, "min", CLIENT_VERSION_MIN);
setField(L, "max", CLIENT_VERSION_MAX);
setField(L, "string", CLIENT_VERSION_STR);
return 1;
}
int LuaScriptInterface::luaGameReload(lua_State* L)
{
// Game.reload(reloadType)
ReloadTypes_t reloadType = getNumber<ReloadTypes_t>(L, 1);
if (!reloadType) {
lua_pushnil(L);
return 1;
}
if (reloadType == RELOAD_TYPE_GLOBAL) {
pushBoolean(L, g_luaEnvironment.loadFile("data/global.lua") == 0);
} else {
pushBoolean(L, g_game.reload(reloadType));
}
lua_gc(g_luaEnvironment.getLuaState(), LUA_GCCOLLECT, 0);
return 1;
}
// Variant
int LuaScriptInterface::luaVariantCreate(lua_State* L)
{
// Variant(number or string or position or thing)
LuaVariant variant;
if (isUserdata(L, 2)) {
if (Thing* thing = getThing(L, 2)) {
variant.type = VARIANT_TARGETPOSITION;
variant.pos = thing->getPosition();
}
} else if (isTable(L, 2)) {
variant.type = VARIANT_POSITION;
variant.pos = getPosition(L, 2);
} else if (isNumber(L, 2)) {
variant.type = VARIANT_NUMBER;
variant.number = getNumber<uint32_t>(L, 2);
} else if (isString(L, 2)) {
variant.type = VARIANT_STRING;
variant.text = getString(L, 2);
}
pushVariant(L, variant);
return 1;
}
int LuaScriptInterface::luaVariantGetNumber(lua_State* L)
{
// Variant:getNumber()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_NUMBER) {
lua_pushnumber(L, variant.number);
} else {
lua_pushnumber(L, 0);
}
return 1;
}
int LuaScriptInterface::luaVariantGetString(lua_State* L)
{
// Variant:getString()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_STRING) {
pushString(L, variant.text);
} else {
pushString(L, std::string());
}
return 1;
}
int LuaScriptInterface::luaVariantGetPosition(lua_State* L)
{
// Variant:getPosition()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_POSITION || variant.type == VARIANT_TARGETPOSITION) {
pushPosition(L, variant.pos);
} else {
pushPosition(L, Position());
}
return 1;
}
// Position
int LuaScriptInterface::luaPositionCreate(lua_State* L)
{
// Position([x = 0[, y = 0[, z = 0[, stackpos = 0]]]])
// Position([position])
if (lua_gettop(L) <= 1) {
pushPosition(L, Position());
return 1;
}
int32_t stackpos;
if (isTable(L, 2)) {
const Position& position = getPosition(L, 2, stackpos);
pushPosition(L, position, stackpos);
} else {
uint16_t x = getNumber<uint16_t>(L, 2, 0);
uint16_t y = getNumber<uint16_t>(L, 3, 0);
uint8_t z = getNumber<uint8_t>(L, 4, 0);
stackpos = getNumber<int32_t>(L, 5, 0);
pushPosition(L, Position(x, y, z), stackpos);
}
return 1;
}
int LuaScriptInterface::luaPositionAdd(lua_State* L)
{
// positionValue = position + positionEx
int32_t stackpos;
const Position& position = getPosition(L, 1, stackpos);
Position positionEx;
if (stackpos == 0) {
positionEx = getPosition(L, 2, stackpos);
} else {
positionEx = getPosition(L, 2);
}
pushPosition(L, position + positionEx, stackpos);
return 1;
}
int LuaScriptInterface::luaPositionSub(lua_State* L)
{
// positionValue = position - positionEx
int32_t stackpos;
const Position& position = getPosition(L, 1, stackpos);
Position positionEx;
if (stackpos == 0) {
positionEx = getPosition(L, 2, stackpos);
} else {
positionEx = getPosition(L, 2);
}
pushPosition(L, position - positionEx, stackpos);
return 1;
}
int LuaScriptInterface::luaPositionCompare(lua_State* L)
{
// position == positionEx
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
pushBoolean(L, position == positionEx);
return 1;
}
int LuaScriptInterface::luaPositionGetDistance(lua_State* L)
{
// position:getDistance(positionEx)
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
lua_pushnumber(L, std::max<int32_t>(
std::max<int32_t>(
std::abs(Position::getDistanceX(position, positionEx)),
std::abs(Position::getDistanceY(position, positionEx))
),
std::abs(Position::getDistanceZ(position, positionEx))
));
return 1;
}
int LuaScriptInterface::luaPositionIsSightClear(lua_State* L)
{
// position:isSightClear(positionEx[, sameFloor = true])
bool sameFloor = getBoolean(L, 3, true);
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
pushBoolean(L, g_game.isSightClear(position, positionEx, sameFloor));
return 1;
}
int LuaScriptInterface::luaPositionSendMagicEffect(lua_State* L)
{
// position:sendMagicEffect(magicEffect[, player = nullptr])
SpectatorHashSet spectators;
if (lua_gettop(L) >= 3) {
Player* player = getPlayer(L, 3);
if (player) {
spectators.insert(player);
}
}
MagicEffectClasses magicEffect = getNumber<MagicEffectClasses>(L, 2);
const Position& position = getPosition(L, 1);
if (!spectators.empty()) {
Game::addMagicEffect(spectators, position, magicEffect);
} else {
g_game.addMagicEffect(position, magicEffect);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPositionSendDistanceEffect(lua_State* L)
{
// position:sendDistanceEffect(positionEx, distanceEffect[, player = nullptr])
SpectatorHashSet spectators;
if (lua_gettop(L) >= 4) {
Player* player = getPlayer(L, 4);
if (player) {
spectators.insert(player);
}
}
ShootType_t distanceEffect = getNumber<ShootType_t>(L, 3);
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
if (!spectators.empty()) {
Game::addDistanceEffect(spectators, position, positionEx, distanceEffect);
} else {
g_game.addDistanceEffect(position, positionEx, distanceEffect);
}
pushBoolean(L, true);
return 1;
}
// Tile
int LuaScriptInterface::luaTileCreate(lua_State* L)
{
// Tile(x, y, z)
// Tile(position)
Tile* tile;
if (isTable(L, 2)) {
tile = g_game.map.getTile(getPosition(L, 2));
} else {
uint8_t z = getNumber<uint8_t>(L, 4);
uint16_t y = getNumber<uint16_t>(L, 3);
uint16_t x = getNumber<uint16_t>(L, 2);
tile = g_game.map.getTile(x, y, z);
}
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetPosition(lua_State* L)
{
// tile:getPosition()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
pushPosition(L, tile->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetGround(lua_State* L)
{
// tile:getGround()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile && tile->getGround()) {
pushUserdata<Item>(L, tile->getGround());
setItemMetatable(L, -1, tile->getGround());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetThing(lua_State* L)
{
// tile:getThing(index)
int32_t index = getNumber<int32_t>(L, 2);
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = tile->getThing(index);
if (!thing) {
lua_pushnil(L);
return 1;
}
if (Creature* creature = thing->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else if (Item* item = thing->getItem()) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetThingCount(lua_State* L)
{
// tile:getThingCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
lua_pushnumber(L, tile->getThingCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopVisibleThing(lua_State* L)
{
// tile:getTopVisibleThing(creature)
Creature* creature = getCreature(L, 2);
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = tile->getTopVisibleThing(creature);
if (!thing) {
lua_pushnil(L);
return 1;
}
if (Creature* visibleCreature = thing->getCreature()) {
pushUserdata<Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else if (Item* visibleItem = thing->getItem()) {
pushUserdata<Item>(L, visibleItem);
setItemMetatable(L, -1, visibleItem);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopTopItem(lua_State* L)
{
// tile:getTopTopItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getTopTopItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopDownItem(lua_State* L)
{
// tile:getTopDownItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getTopDownItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetFieldItem(lua_State* L)
{
// tile:getFieldItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getFieldItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemById(lua_State* L)
{
// tile:getItemById(itemId[, subType = -1])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
Item* item = g_game.findItemOfType(tile, itemId, false, subType);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemByType(lua_State* L)
{
// tile:getItemByType(itemType)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
bool found;
ItemTypes_t itemType = getNumber<ItemTypes_t>(L, 2);
switch (itemType) {
case ITEM_TYPE_TELEPORT:
found = tile->hasFlag(TILESTATE_TELEPORT);
break;
case ITEM_TYPE_MAGICFIELD:
found = tile->hasFlag(TILESTATE_MAGICFIELD);
break;
case ITEM_TYPE_MAILBOX:
found = tile->hasFlag(TILESTATE_MAILBOX);
break;
case ITEM_TYPE_TRASHHOLDER:
found = tile->hasFlag(TILESTATE_TRASHHOLDER);
break;
case ITEM_TYPE_BED:
found = tile->hasFlag(TILESTATE_BED);
break;
case ITEM_TYPE_DEPOT:
found = tile->hasFlag(TILESTATE_DEPOT);
break;
default:
found = true;
break;
}
if (!found) {
lua_pushnil(L);
return 1;
}
if (Item* item = tile->getGround()) {
const ItemType& it = Item::items[item->getID()];
if (it.type == itemType) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
}
if (const TileItemVector* items = tile->getItemList()) {
for (Item* item : *items) {
const ItemType& it = Item::items[item->getID()];
if (it.type == itemType) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
}
}
lua_pushnil(L);
return 1;
}
int LuaScriptInterface::luaTileGetItemByTopOrder(lua_State* L)
{
// tile:getItemByTopOrder(topOrder)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
int32_t topOrder = getNumber<int32_t>(L, 2);
Item* item = tile->getItemByTopOrder(topOrder);
if (!item) {
lua_pushnil(L);
return 1;
}
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
int LuaScriptInterface::luaTileGetItemCountById(lua_State* L)
{
// tile:getItemCountById(itemId[, subType = -1])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
lua_pushnumber(L, tile->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaTileGetBottomCreature(lua_State* L)
{
// tile:getBottomCreature()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
const Creature* creature = tile->getBottomCreature();
if (!creature) {
lua_pushnil(L);
return 1;
}
pushUserdata<const Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
return 1;
}
int LuaScriptInterface::luaTileGetTopCreature(lua_State* L)
{
// tile:getTopCreature()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = tile->getTopCreature();
if (!creature) {
lua_pushnil(L);
return 1;
}
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
return 1;
}
int LuaScriptInterface::luaTileGetBottomVisibleCreature(lua_State* L)
{
// tile:getBottomVisibleCreature(creature)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Creature* visibleCreature = tile->getBottomVisibleCreature(creature);
if (visibleCreature) {
pushUserdata<const Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopVisibleCreature(lua_State* L)
{
// tile:getTopVisibleCreature(creature)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* visibleCreature = tile->getTopVisibleCreature(creature);
if (visibleCreature) {
pushUserdata<Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItems(lua_State* L)
{
// tile:getItems()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
TileItemVector* itemVector = tile->getItemList();
if (!itemVector) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, itemVector->size(), 0);
int index = 0;
for (Item* item : *itemVector) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemCount(lua_State* L)
{
// tile:getItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getItemCount());
return 1;
}
int LuaScriptInterface::luaTileGetDownItemCount(lua_State* L)
{
// tile:getDownItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
lua_pushnumber(L, tile->getDownItemCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopItemCount(lua_State* L)
{
// tile:getTopItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getTopItemCount());
return 1;
}
int LuaScriptInterface::luaTileGetCreatures(lua_State* L)
{
// tile:getCreatures()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
CreatureVector* creatureVector = tile->getCreatures();
if (!creatureVector) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creatureVector->size(), 0);
int index = 0;
for (Creature* creature : *creatureVector) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaTileGetCreatureCount(lua_State* L)
{
// tile:getCreatureCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getCreatureCount());
return 1;
}
int LuaScriptInterface::luaTileHasProperty(lua_State* L)
{
// tile:hasProperty(property[, item])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item;
if (lua_gettop(L) >= 3) {
item = getUserdata<Item>(L, 3);
} else {
item = nullptr;
}
ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2);
if (item) {
pushBoolean(L, tile->hasProperty(item, property));
} else {
pushBoolean(L, tile->hasProperty(property));
}
return 1;
}
int LuaScriptInterface::luaTileGetThingIndex(lua_State* L)
{
// tile:getThingIndex(thing)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = getThing(L, 2);
if (thing) {
lua_pushnumber(L, tile->getThingIndex(thing));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileHasFlag(lua_State* L)
{
// tile:hasFlag(flag)
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
tileflags_t flag = getNumber<tileflags_t>(L, 2);
pushBoolean(L, tile->hasFlag(flag));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileQueryAdd(lua_State* L)
{
// tile:queryAdd(thing[, flags])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = getThing(L, 2);
if (thing) {
uint32_t flags = getNumber<uint32_t>(L, 3, 0);
lua_pushnumber(L, tile->queryAdd(0, *thing, 1, flags));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetHouse(lua_State* L)
{
// tile:getHouse()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
if (HouseTile* houseTile = dynamic_cast<HouseTile*>(tile)) {
pushUserdata<House>(L, houseTile->getHouse());
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
// NetworkMessage
int LuaScriptInterface::luaNetworkMessageCreate(lua_State* L)
{
// NetworkMessage()
pushUserdata<NetworkMessage>(L, new NetworkMessage);
setMetatable(L, -1, "NetworkMessage");
return 1;
}
int LuaScriptInterface::luaNetworkMessageDelete(lua_State* L)
{
NetworkMessage** messagePtr = getRawUserdata<NetworkMessage>(L, 1);
if (messagePtr && *messagePtr) {
delete *messagePtr;
*messagePtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaNetworkMessageGetByte(lua_State* L)
{
// networkMessage:getByte()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->getByte());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU16(lua_State* L)
{
// networkMessage:getU16()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint16_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU32(lua_State* L)
{
// networkMessage:getU32()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint32_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU64(lua_State* L)
{
// networkMessage:getU64()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint64_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetString(lua_State* L)
{
// networkMessage:getString()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
pushString(L, message->getString());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetPosition(lua_State* L)
{
// networkMessage:getPosition()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
pushPosition(L, message->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddByte(lua_State* L)
{
// networkMessage:addByte(number)
uint8_t number = getNumber<uint8_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addByte(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU16(lua_State* L)
{
// networkMessage:addU16(number)
uint16_t number = getNumber<uint16_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint16_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU32(lua_State* L)
{
// networkMessage:addU32(number)
uint32_t number = getNumber<uint32_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint32_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU64(lua_State* L)
{
// networkMessage:addU64(number)
uint64_t number = getNumber<uint64_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint64_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddString(lua_State* L)
{
// networkMessage:addString(string)
const std::string& string = getString(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addString(string);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddPosition(lua_State* L)
{
// networkMessage:addPosition(position)
const Position& position = getPosition(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addPosition(position);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddDouble(lua_State* L)
{
// networkMessage:addDouble(number)
double number = getNumber<double>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addDouble(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddItem(lua_State* L)
{
// networkMessage:addItem(item)
Item* item = getUserdata<Item>(L, 2);
if (!item) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
lua_pushnil(L);
return 1;
}
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addItem(item);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddItemId(lua_State* L)
{
// networkMessage:addItemId(itemId)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (!message) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
message->addItemId(itemId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaNetworkMessageReset(lua_State* L)
{
// networkMessage:reset()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->reset();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSkipBytes(lua_State* L)
{
// networkMessage:skipBytes(number)
int16_t number = getNumber<int16_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->skipBytes(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSendToPlayer(lua_State* L)
{
// networkMessage:sendToPlayer(player)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (!message) {
lua_pushnil(L);
return 1;
}
Player* player = getPlayer(L, 2);
if (player) {
player->sendNetworkMessage(*message);
pushBoolean(L, true);
} else {
reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
lua_pushnil(L);
}
return 1;
}
// ModalWindow
int LuaScriptInterface::luaModalWindowCreate(lua_State* L)
{
// ModalWindow(id, title, message)
const std::string& message = getString(L, 4);
const std::string& title = getString(L, 3);
uint32_t id = getNumber<uint32_t>(L, 2);
pushUserdata<ModalWindow>(L, new ModalWindow(id, title, message));
setMetatable(L, -1, "ModalWindow");
return 1;
}
int LuaScriptInterface::luaModalWindowDelete(lua_State* L)
{
ModalWindow** windowPtr = getRawUserdata<ModalWindow>(L, 1);
if (windowPtr && *windowPtr) {
delete *windowPtr;
*windowPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaModalWindowGetId(lua_State* L)
{
// modalWindow:getId()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetTitle(lua_State* L)
{
// modalWindow:getTitle()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushString(L, window->title);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetMessage(lua_State* L)
{
// modalWindow:getMessage()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushString(L, window->message);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetTitle(lua_State* L)
{
// modalWindow:setTitle(text)
const std::string& text = getString(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->title = text;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetMessage(lua_State* L)
{
// modalWindow:setMessage(text)
const std::string& text = getString(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->message = text;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetButtonCount(lua_State* L)
{
// modalWindow:getButtonCount()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->buttons.size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetChoiceCount(lua_State* L)
{
// modalWindow:getChoiceCount()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->choices.size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowAddButton(lua_State* L)
{
// modalWindow:addButton(id, text)
const std::string& text = getString(L, 3);
uint8_t id = getNumber<uint8_t>(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->buttons.emplace_back(text, id);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowAddChoice(lua_State* L)
{
// modalWindow:addChoice(id, text)
const std::string& text = getString(L, 3);
uint8_t id = getNumber<uint8_t>(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->choices.emplace_back(text, id);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetDefaultEnterButton(lua_State* L)
{
// modalWindow:getDefaultEnterButton()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->defaultEnterButton);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetDefaultEnterButton(lua_State* L)
{
// modalWindow:setDefaultEnterButton(buttonId)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->defaultEnterButton = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetDefaultEscapeButton(lua_State* L)
{
// modalWindow:getDefaultEscapeButton()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->defaultEscapeButton);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetDefaultEscapeButton(lua_State* L)
{
// modalWindow:setDefaultEscapeButton(buttonId)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->defaultEscapeButton = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowHasPriority(lua_State* L)
{
// modalWindow:hasPriority()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushBoolean(L, window->priority);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetPriority(lua_State* L)
{
// modalWindow:setPriority(priority)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->priority = getBoolean(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSendToPlayer(lua_State* L)
{
// modalWindow:sendToPlayer(player)
Player* player = getPlayer(L, 2);
if (!player) {
lua_pushnil(L);
return 1;
}
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
if (!player->hasModalWindowOpen(window->id)) {
player->sendModalWindow(*window);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Item
int LuaScriptInterface::luaItemCreate(lua_State* L)
{
// Item(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(id);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemIsItem(lua_State* L)
{
// item:isItem()
pushBoolean(L, getUserdata<const Item>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaItemGetParent(lua_State* L)
{
// item:getParent()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Cylinder* parent = item->getParent();
if (!parent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, parent);
return 1;
}
int LuaScriptInterface::luaItemGetTopParent(lua_State* L)
{
// item:getTopParent()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Cylinder* topParent = item->getTopParent();
if (!topParent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, topParent);
return 1;
}
int LuaScriptInterface::luaItemGetId(lua_State* L)
{
// item:getId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemClone(lua_State* L)
{
// item:clone()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Item* clone = item->clone();
if (!clone) {
lua_pushnil(L);
return 1;
}
getScriptEnv()->addTempItem(clone);
clone->setParent(VirtualCylinder::virtualCylinder);
pushUserdata<Item>(L, clone);
setItemMetatable(L, -1, clone);
return 1;
}
int LuaScriptInterface::luaItemSplit(lua_State* L)
{
// item:split([count = 1])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item* item = *itemPtr;
if (!item || !item->isStackable()) {
lua_pushnil(L);
return 1;
}
uint16_t count = std::min<uint16_t>(getNumber<uint16_t>(L, 2, 1), item->getItemCount());
uint16_t diff = item->getItemCount() - count;
Item* splitItem = item->clone();
if (!splitItem) {
lua_pushnil(L);
return 1;
}
ScriptEnvironment* env = getScriptEnv();
uint32_t uid = env->addThing(item);
Item* newItem = g_game.transformItem(item, item->getID(), diff);
if (item->isRemoved()) {
env->removeItemByUID(uid);
}
if (newItem && newItem != item) {
env->insertItem(uid, newItem);
}
*itemPtr = newItem;
splitItem->setParent(VirtualCylinder::virtualCylinder);
env->addTempItem(splitItem);
pushUserdata<Item>(L, splitItem);
setItemMetatable(L, -1, splitItem);
return 1;
}
int LuaScriptInterface::luaItemRemove(lua_State* L)
{
// item:remove([count = -1])
Item* item = getUserdata<Item>(L, 1);
if (item) {
int32_t count = getNumber<int32_t>(L, 2, -1);
pushBoolean(L, g_game.internalRemoveItem(item, count) == RETURNVALUE_NOERROR);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetUniqueId(lua_State* L)
{
// item:getUniqueId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
uint32_t uniqueId = item->getUniqueId();
if (uniqueId == 0) {
uniqueId = getScriptEnv()->addThing(item);
}
lua_pushnumber(L, uniqueId);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetActionId(lua_State* L)
{
// item:getActionId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getActionId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetActionId(lua_State* L)
{
// item:setActionId(actionId)
uint16_t actionId = getNumber<uint16_t>(L, 2);
Item* item = getUserdata<Item>(L, 1);
if (item) {
item->setActionId(actionId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetCount(lua_State* L)
{
// item:getCount()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getItemCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetCharges(lua_State* L)
{
// item:getCharges()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getCharges());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetFluidType(lua_State* L)
{
// item:getFluidType()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getFluidType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetWeight(lua_State* L)
{
// item:getWeight()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getWeight());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetSubType(lua_State* L)
{
// item:getSubType()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getSubType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetName(lua_State* L)
{
// item:getName()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetPluralName(lua_State* L)
{
// item:getPluralName()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getPluralName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetArticle(lua_State* L)
{
// item:getArticle()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getArticle());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetPosition(lua_State* L)
{
// item:getPosition()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushPosition(L, item->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetTile(lua_State* L)
{
// item:getTile()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Tile* tile = item->getTile();
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemHasAttribute(lua_State* L)
{
// item:hasAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
pushBoolean(L, item->hasAttribute(attribute));
return 1;
}
int LuaScriptInterface::luaItemGetAttribute(lua_State* L)
{
// item:getAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
if (ItemAttributes::isIntAttrType(attribute)) {
lua_pushnumber(L, item->getIntAttr(attribute));
} else if (ItemAttributes::isStrAttrType(attribute)) {
pushString(L, item->getStrAttr(attribute));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetAttribute(lua_State* L)
{
// item:setAttribute(key, value)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
if (ItemAttributes::isIntAttrType(attribute)) {
if (attribute == ITEM_ATTRIBUTE_UNIQUEID) {
reportErrorFunc("Attempt to set protected key \"uid\"");
pushBoolean(L, false);
return 1;
}
item->setIntAttr(attribute, getNumber<int32_t>(L, 3));
pushBoolean(L, true);
} else if (ItemAttributes::isStrAttrType(attribute)) {
item->setStrAttr(attribute, getString(L, 3));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemRemoveAttribute(lua_State* L)
{
// item:removeAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
bool ret = attribute != ITEM_ATTRIBUTE_UNIQUEID;
if (ret) {
item->removeAttribute(attribute);
} else {
reportErrorFunc("Attempt to erase protected key \"uid\"");
}
pushBoolean(L, ret);
return 1;
}
int LuaScriptInterface::luaItemMoveTo(lua_State* L)
{
// item:moveTo(position or cylinder)
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item* item = *itemPtr;
if (!item || item->isRemoved()) {
lua_pushnil(L);
return 1;
}
Cylinder* toCylinder;
if (isUserdata(L, 2)) {
const LuaDataType type = getUserdataType(L, 2);
switch (type) {
case LuaData_Container:
toCylinder = getUserdata<Container>(L, 2);
break;
case LuaData_Player:
toCylinder = getUserdata<Player>(L, 2);
break;
case LuaData_Tile:
toCylinder = getUserdata<Tile>(L, 2);
break;
default:
toCylinder = nullptr;
break;
}
} else {
toCylinder = g_game.map.getTile(getPosition(L, 2));
}
if (!toCylinder) {
lua_pushnil(L);
return 1;
}
if (item->getParent() == toCylinder) {
pushBoolean(L, true);
return 1;
}
if (item->getParent() == VirtualCylinder::virtualCylinder) {
pushBoolean(L, g_game.internalAddItem(toCylinder, item) == RETURNVALUE_NOERROR);
} else {
Item* moveItem = nullptr;
ReturnValue ret = g_game.internalMoveItem(item->getParent(), toCylinder, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, FLAG_NOLIMIT | FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE | FLAG_IGNORENOTMOVEABLE);
if (moveItem) {
*itemPtr = moveItem;
}
pushBoolean(L, ret == RETURNVALUE_NOERROR);
}
return 1;
}
int LuaScriptInterface::luaItemTransform(lua_State* L)
{
// item:transform(itemId[, count/subType = -1])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item*& item = *itemPtr;
if (!item) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
pushBoolean(L, true);
return 1;
}
const ItemType& it = Item::items[itemId];
if (it.stackable) {
subType = std::min<int32_t>(subType, 100);
}
ScriptEnvironment* env = getScriptEnv();
uint32_t uid = env->addThing(item);
Item* newItem = g_game.transformItem(item, itemId, subType);
if (item->isRemoved()) {
env->removeItemByUID(uid);
}
if (newItem && newItem != item) {
env->insertItem(uid, newItem);
}
item = newItem;
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaItemDecay(lua_State* L)
{
// item:decay()
Item* item = getUserdata<Item>(L, 1);
if (item) {
g_game.startDecay(item);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetDescription(lua_State* L)
{
// item:getDescription(distance)
Item* item = getUserdata<Item>(L, 1);
if (item) {
int32_t distance = getNumber<int32_t>(L, 2);
pushString(L, item->getDescription(distance));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemHasProperty(lua_State* L)
{
// item:hasProperty(property)
Item* item = getUserdata<Item>(L, 1);
if (item) {
ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2);
pushBoolean(L, item->hasProperty(property));
} else {
lua_pushnil(L);
}
return 1;
}
// Container
int LuaScriptInterface::luaContainerCreate(lua_State* L)
{
// Container(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Container* container = getScriptEnv()->getContainerByUID(id);
if (container) {
pushUserdata(L, container);
setMetatable(L, -1, "Container");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetSize(lua_State* L)
{
// container:getSize()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetCapacity(lua_State* L)
{
// container:getCapacity()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->capacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetEmptySlots(lua_State* L)
{
// container:getEmptySlots([recursive = false])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint32_t slots = container->capacity() - container->size();
bool recursive = getBoolean(L, 2, false);
if (recursive) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if (Container* tmpContainer = (*it)->getContainer()) {
slots += tmpContainer->capacity() - tmpContainer->size();
}
}
}
lua_pushnumber(L, slots);
return 1;
}
int LuaScriptInterface::luaContainerGetItemHoldingCount(lua_State* L)
{
// container:getItemHoldingCount()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->getItemHoldingCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetItem(lua_State* L)
{
// container:getItem(index)
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint32_t index = getNumber<uint32_t>(L, 2);
Item* item = container->getItemByIndex(index);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerHasItem(lua_State* L)
{
// container:hasItem(item)
Item* item = getUserdata<Item>(L, 2);
Container* container = getUserdata<Container>(L, 1);
if (container) {
pushBoolean(L, container->isHoldingItem(item));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerAddItem(lua_State* L)
{
// container:addItem(itemId[, count/subType = 1[, index = INDEX_WHEREEVER[, flags = 0]]])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t subType = getNumber<uint32_t>(L, 3, 1);
Item* item = Item::CreateItem(itemId, std::min<uint32_t>(subType, 100));
if (!item) {
lua_pushnil(L);
return 1;
}
int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 5, 0);
ReturnValue ret = g_game.internalAddItem(container, item, index, flags);
if (ret == RETURNVALUE_NOERROR) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
delete item;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerAddItemEx(lua_State* L)
{
// container:addItemEx(item[, index = INDEX_WHEREEVER[, flags = 0]])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
lua_pushnil(L);
return 1;
}
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc("Item already has a parent");
lua_pushnil(L);
return 1;
}
int32_t index = getNumber<int32_t>(L, 3, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 4, 0);
ReturnValue ret = g_game.internalAddItem(container, item, index, flags);
if (ret == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, ret);
return 1;
}
int LuaScriptInterface::luaContainerGetItemCountById(lua_State* L)
{
// container:getItemCountById(itemId[, subType = -1])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
lua_pushnumber(L, container->getItemTypeCount(itemId, subType));
return 1;
}
// Teleport
int LuaScriptInterface::luaTeleportCreate(lua_State* L)
{
// Teleport(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(id);
if (item && item->getTeleport()) {
pushUserdata(L, item);
setMetatable(L, -1, "Teleport");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTeleportGetDestination(lua_State* L)
{
// teleport:getDestination()
Teleport* teleport = getUserdata<Teleport>(L, 1);
if (teleport) {
pushPosition(L, teleport->getDestPos());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTeleportSetDestination(lua_State* L)
{
// teleport:setDestination(position)
Teleport* teleport = getUserdata<Teleport>(L, 1);
if (teleport) {
teleport->setDestPos(getPosition(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Creature
int LuaScriptInterface::luaCreatureCreate(lua_State* L)
{
// Creature(id or name or userdata)
Creature* creature;
if (isNumber(L, 2)) {
creature = g_game.getCreatureByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
creature = g_game.getCreatureByName(getString(L, 2));
} else if (isUserdata(L, 2)) {
LuaDataType type = getUserdataType(L, 2);
if (type != LuaData_Player && type != LuaData_Monster && type != LuaData_Npc) {
lua_pushnil(L);
return 1;
}
creature = getUserdata<Creature>(L, 2);
} else {
creature = nullptr;
}
if (creature) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetEvents(lua_State* L)
{
// creature:getEvents(type)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
CreatureEventType_t eventType = getNumber<CreatureEventType_t>(L, 2);
const auto& eventList = creature->getCreatureEvents(eventType);
lua_createtable(L, eventList.size(), 0);
int index = 0;
for (CreatureEvent* event : eventList) {
pushString(L, event->getName());
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaCreatureRegisterEvent(lua_State* L)
{
// creature:registerEvent(name)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
const std::string& name = getString(L, 2);
pushBoolean(L, creature->registerCreatureEvent(name));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureUnregisterEvent(lua_State* L)
{
// creature:unregisterEvent(name)
const std::string& name = getString(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->unregisterCreatureEvent(name));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsRemoved(lua_State* L)
{
// creature:isRemoved()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isRemoved());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsCreature(lua_State* L)
{
// creature:isCreature()
pushBoolean(L, getUserdata<const Creature>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaCreatureIsInGhostMode(lua_State* L)
{
// creature:isInGhostMode()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isInGhostMode());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsHealthHidden(lua_State* L)
{
// creature:isHealthHidden()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isHealthHidden());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSee(lua_State* L)
{
// creature:canSee(position)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Position& position = getPosition(L, 2);
pushBoolean(L, creature->canSee(position));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSeeCreature(lua_State* L)
{
// creature:canSeeCreature(creature)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Creature* otherCreature = getCreature(L, 2);
pushBoolean(L, creature->canSeeCreature(otherCreature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetParent(lua_State* L)
{
// creature:getParent()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Cylinder* parent = creature->getParent();
if (!parent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, parent);
return 1;
}
int LuaScriptInterface::luaCreatureGetId(lua_State* L)
{
// creature:getId()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetName(lua_State* L)
{
// creature:getName()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushString(L, creature->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetTarget(lua_State* L)
{
// creature:getTarget()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* target = creature->getAttackedCreature();
if (target) {
pushUserdata<Creature>(L, target);
setCreatureMetatable(L, -1, target);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetTarget(lua_State* L)
{
// creature:setTarget(target)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
Creature* target = getCreature(L, 2);
pushBoolean(L, creature->setAttackedCreature(target));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetFollowCreature(lua_State* L)
{
// creature:getFollowCreature()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* followCreature = creature->getFollowCreature();
if (followCreature) {
pushUserdata<Creature>(L, followCreature);
setCreatureMetatable(L, -1, followCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetFollowCreature(lua_State* L)
{
// creature:setFollowCreature(followedCreature)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
Creature* followCreature = getCreature(L, 2);
pushBoolean(L, creature->setFollowCreature(followCreature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetMaster(lua_State* L)
{
// creature:getMaster()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* master = creature->getMaster();
if (!master) {
lua_pushnil(L);
return 1;
}
pushUserdata<Creature>(L, master);
setCreatureMetatable(L, -1, master);
return 1;
}
int LuaScriptInterface::luaCreatureSetMaster(lua_State* L)
{
// creature:setMaster(master)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* master = getCreature(L, 2);
if (master) {
pushBoolean(L, creature->convinceCreature(master));
} else {
master = creature->getMaster();
if (master) {
master->removeSummon(creature);
creature->incrementReferenceCounter();
creature->setDropLoot(true);
}
pushBoolean(L, true);
}
g_game.updateCreatureType(creature);
return 1;
}
int LuaScriptInterface::luaCreatureGetLight(lua_State* L)
{
// creature:getLight()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
LightInfo light;
creature->getCreatureLight(light);
lua_pushnumber(L, light.level);
lua_pushnumber(L, light.color);
return 2;
}
int LuaScriptInterface::luaCreatureSetLight(lua_State* L)
{
// creature:setLight(color, level)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
LightInfo light;
light.color = getNumber<uint8_t>(L, 2);
light.level = getNumber<uint8_t>(L, 3);
creature->setCreatureLight(light);
g_game.changeLight(creature);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureGetSpeed(lua_State* L)
{
// creature:getSpeed()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetBaseSpeed(lua_State* L)
{
// creature:getBaseSpeed()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getBaseSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureChangeSpeed(lua_State* L)
{
// creature:changeSpeed(delta)
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
int32_t delta = getNumber<int32_t>(L, 2);
g_game.changeSpeed(creature, delta);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSetDropLoot(lua_State* L)
{
// creature:setDropLoot(doDrop)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setDropLoot(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetPosition(lua_State* L)
{
// creature:getPosition()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushPosition(L, creature->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetTile(lua_State* L)
{
// creature:getTile()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Tile* tile = creature->getTile();
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDirection(lua_State* L)
{
// creature:getDirection()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getDirection());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetDirection(lua_State* L)
{
// creature:setDirection(direction)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, g_game.internalCreatureTurn(creature, getNumber<Direction>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetHealth(lua_State* L)
{
// creature:getHealth()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getHealth());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureAddHealth(lua_State* L)
{
// creature:addHealth(healthChange)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
CombatDamage damage;
damage.primary.value = getNumber<int32_t>(L, 2);
if (damage.primary.value >= 0) {
damage.primary.type = COMBAT_HEALING;
} else {
damage.primary.type = COMBAT_UNDEFINEDDAMAGE;
}
pushBoolean(L, g_game.combatChangeHealth(nullptr, creature, damage));
return 1;
}
int LuaScriptInterface::luaCreatureGetMaxHealth(lua_State* L)
{
// creature:getMaxHealth()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getMaxHealth());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetMaxHealth(lua_State* L)
{
// creature:setMaxHealth(maxHealth)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
creature->healthMax = getNumber<uint32_t>(L, 2);
creature->health = std::min<int32_t>(creature->health, creature->healthMax);
g_game.addCreatureHealth(creature);
Player* player = creature->getPlayer();
if (player) {
player->sendStats();
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSetHiddenHealth(lua_State* L)
{
// creature:setHiddenHealth(hide)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setHiddenHealth(getBoolean(L, 2));
g_game.addCreatureHealth(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetMana(lua_State* L)
{
// creature:getMana()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureAddMana(lua_State* L)
{
// creature:addMana(manaChange[, animationOnLoss = false])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
int32_t manaChange = getNumber<int32_t>(L, 2);
bool animationOnLoss = getBoolean(L, 3, false);
if (!animationOnLoss && manaChange < 0) {
creature->changeMana(manaChange);
} else {
g_game.combatChangeMana(nullptr, creature, manaChange, ORIGIN_NONE);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureGetMaxMana(lua_State* L)
{
// creature:getMaxMana()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getMaxMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetSkull(lua_State* L)
{
// creature:getSkull()
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getSkull());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetSkull(lua_State* L)
{
// creature:setSkull(skull)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setSkull(getNumber<Skulls_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetOutfit(lua_State* L)
{
// creature:getOutfit()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushOutfit(L, creature->getCurrentOutfit());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetOutfit(lua_State* L)
{
// creature:setOutfit(outfit)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->defaultOutfit = getOutfit(L, 2);
g_game.internalCreatureChangeOutfit(creature, creature->defaultOutfit);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetCondition(lua_State* L)
{
// creature:getCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
uint32_t subId = getNumber<uint32_t>(L, 4, 0);
Condition* condition = creature->getCondition(conditionType, conditionId, subId);
if (condition) {
pushUserdata<Condition>(L, condition);
setWeakMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureAddCondition(lua_State* L)
{
// creature:addCondition(condition[, force = false])
Creature* creature = getUserdata<Creature>(L, 1);
Condition* condition = getUserdata<Condition>(L, 2);
if (creature && condition) {
bool force = getBoolean(L, 3, false);
pushBoolean(L, creature->addCondition(condition->clone(), force));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureRemoveCondition(lua_State* L)
{
// creature:removeCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0[, force = false]]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
uint32_t subId = getNumber<uint32_t>(L, 4, 0);
Condition* condition = creature->getCondition(conditionType, conditionId, subId);
if (condition) {
bool force = getBoolean(L, 5, false);
creature->removeCondition(condition, force);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsImmune(lua_State* L)
{
// creature:isImmune(condition or conditionType)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
if (isNumber(L, 2)) {
pushBoolean(L, creature->isImmune(getNumber<ConditionType_t>(L, 2)));
} else if (Condition* condition = getUserdata<Condition>(L, 2)) {
pushBoolean(L, creature->isImmune(condition->getType()));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureRemove(lua_State* L)
{
// creature:remove()
Creature** creaturePtr = getRawUserdata<Creature>(L, 1);
if (!creaturePtr) {
lua_pushnil(L);
return 1;
}
Creature* creature = *creaturePtr;
if (!creature) {
lua_pushnil(L);
return 1;
}
Player* player = creature->getPlayer();
if (player) {
player->kickPlayer(true);
} else {
g_game.removeCreature(creature);
}
*creaturePtr = nullptr;
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureTeleportTo(lua_State* L)
{
// creature:teleportTo(position[, pushMovement = false])
bool pushMovement = getBoolean(L, 3, false);
const Position& position = getPosition(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Position oldPosition = creature->getPosition();
if (g_game.internalTeleport(creature, position, pushMovement) != RETURNVALUE_NOERROR) {
pushBoolean(L, false);
return 1;
}
if (!pushMovement) {
if (oldPosition.x == position.x) {
if (oldPosition.y < position.y) {
g_game.internalCreatureTurn(creature, DIRECTION_SOUTH);
} else {
g_game.internalCreatureTurn(creature, DIRECTION_NORTH);
}
} else if (oldPosition.x > position.x) {
g_game.internalCreatureTurn(creature, DIRECTION_WEST);
} else if (oldPosition.x < position.x) {
g_game.internalCreatureTurn(creature, DIRECTION_EAST);
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSay(lua_State* L)
{
// creature:say(text, type[, ghost = false[, target = nullptr[, position]]])
int parameters = lua_gettop(L);
Position position;
if (parameters >= 6) {
position = getPosition(L, 6);
if (!position.x || !position.y) {
reportErrorFunc("Invalid position specified.");
pushBoolean(L, false);
return 1;
}
}
Creature* target = nullptr;
if (parameters >= 5) {
target = getCreature(L, 5);
}
bool ghost = getBoolean(L, 4, false);
SpeakClasses type = getNumber<SpeakClasses>(L, 3);
const std::string& text = getString(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
SpectatorHashSet spectators;
if (target) {
spectators.insert(target);
}
if (position.x != 0) {
pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators, &position));
} else {
pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators));
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDamageMap(lua_State* L)
{
// creature:getDamageMap()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creature->damageMap.size(), 0);
for (auto damageEntry : creature->damageMap) {
lua_createtable(L, 0, 2);
setField(L, "total", damageEntry.second.total);
setField(L, "ticks", damageEntry.second.ticks);
lua_rawseti(L, -2, damageEntry.first);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetSummons(lua_State* L)
{
// creature:getSummons()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creature->getSummonCount(), 0);
int index = 0;
for (Creature* summon : creature->getSummons()) {
pushUserdata<Creature>(L, summon);
setCreatureMetatable(L, -1, summon);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDescription(lua_State* L)
{
// creature:getDescription(distance)
int32_t distance = getNumber<int32_t>(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushString(L, creature->getDescription(distance));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetPathTo(lua_State* L)
{
// creature:getPathTo(pos[, minTargetDist = 0[, maxTargetDist = 1[, fullPathSearch = true[, clearSight = true[, maxSearchDist = 0]]]]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
FindPathParams fpp;
fpp.minTargetDist = getNumber<int32_t>(L, 3, 0);
fpp.maxTargetDist = getNumber<int32_t>(L, 4, 1);
fpp.fullPathSearch = getBoolean(L, 5, fpp.fullPathSearch);
fpp.clearSight = getBoolean(L, 6, fpp.clearSight);
fpp.maxSearchDist = getNumber<int32_t>(L, 7, fpp.maxSearchDist);
std::forward_list<Direction> dirList;
if (creature->getPathTo(position, dirList, fpp)) {
lua_newtable(L);
int index = 0;
for (Direction dir : dirList) {
lua_pushnumber(L, dir);
lua_rawseti(L, -2, ++index);
}
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaCreatureMove(lua_State* L)
{
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Direction direction = getNumber<Direction>(L, 2);
if (direction > DIRECTION_LAST) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, g_game.internalMoveCreature(creature, direction, FLAG_NOLIMIT));
return 1;
}
// Player
int LuaScriptInterface::luaPlayerCreate(lua_State* L)
{
// Player(id or name or userdata)
Player* player;
if (isNumber(L, 2)) {
player = g_game.getPlayerByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
ReturnValue ret = g_game.getPlayerByNameWildcard(getString(L, 2), player);
if (ret != RETURNVALUE_NOERROR) {
lua_pushnil(L);
lua_pushnumber(L, ret);
return 2;
}
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Player) {
lua_pushnil(L);
return 1;
}
player = getUserdata<Player>(L, 2);
} else {
player = nullptr;
}
if (player) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerIsPlayer(lua_State* L)
{
// player:isPlayer()
pushBoolean(L, getUserdata<const Player>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaPlayerGetGuid(lua_State* L)
{
// player:getGuid()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getGUID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetIp(lua_State* L)
{
// player:getIp()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getIP());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetAccountId(lua_State* L)
{
// player:getAccountId()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getAccount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLastLoginSaved(lua_State* L)
{
// player:getLastLoginSaved()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLastLoginSaved());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLastLogout(lua_State* L)
{
// player:getLastLogout()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLastLogout());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetAccountType(lua_State* L)
{
// player:getAccountType()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getAccountType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetAccountType(lua_State* L)
{
// player:setAccountType(accountType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->accountType = getNumber<AccountType_t>(L, 2);
IOLoginData::setAccountType(player->getAccount(), player->accountType);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetCapacity(lua_State* L)
{
// player:getCapacity()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getCapacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetCapacity(lua_State* L)
{
// player:setCapacity(capacity)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->capacity = getNumber<uint32_t>(L, 2);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetFreeCapacity(lua_State* L)
{
// player:getFreeCapacity()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getFreeCapacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetDepotChest(lua_State* L)
{
// player:getDepotChest(depotId[, autoCreate = false])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t depotId = getNumber<uint32_t>(L, 2);
bool autoCreate = getBoolean(L, 3, false);
DepotChest* depotChest = player->getDepotChest(depotId, autoCreate);
if (depotChest) {
pushUserdata<Item>(L, depotChest);
setItemMetatable(L, -1, depotChest);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetInbox(lua_State* L)
{
// player:getInbox()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Inbox* inbox = player->getInbox();
if (inbox) {
pushUserdata<Item>(L, inbox);
setItemMetatable(L, -1, inbox);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkullTime(lua_State* L)
{
// player:getSkullTime()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSkullTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetSkullTime(lua_State* L)
{
// player:setSkullTime(skullTime)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setSkullTicks(getNumber<int64_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetDeathPenalty(lua_State* L)
{
// player:getDeathPenalty()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, static_cast<uint32_t>(player->getLostPercent() * 100));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetExperience(lua_State* L)
{
// player:getExperience()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getExperience());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddExperience(lua_State* L)
{
// player:addExperience(experience[, sendText = false])
Player* player = getUserdata<Player>(L, 1);
if (player) {
int64_t experience = getNumber<int64_t>(L, 2);
bool sendText = getBoolean(L, 3, false);
player->addExperience(nullptr, experience, sendText);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveExperience(lua_State* L)
{
// player:removeExperience(experience[, sendText = false])
Player* player = getUserdata<Player>(L, 1);
if (player) {
int64_t experience = getNumber<int64_t>(L, 2);
bool sendText = getBoolean(L, 3, false);
player->removeExperience(experience, sendText);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLevel(lua_State* L)
{
// player:getLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMagicLevel(lua_State* L)
{
// player:getMagicLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMagicLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMagicLevel(lua_State* L)
{
// player:getBaseMagicLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getBaseMagicLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetMaxMana(lua_State* L)
{
// player:setMaxMana(maxMana)
Player* player = getPlayer(L, 1);
if (player) {
player->manaMax = getNumber<int32_t>(L, 2);
player->mana = std::min<int32_t>(player->mana, player->manaMax);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetManaSpent(lua_State* L)
{
// player:getManaSpent()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSpentMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddManaSpent(lua_State* L)
{
// player:addManaSpent(amount)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->addManaSpent(getNumber<uint64_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMaxHealth(lua_State* L)
{
// player:getBaseMaxHealth()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->healthMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMaxMana(lua_State* L)
{
// player:getBaseMaxMana()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->manaMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillLevel(lua_State* L)
{
// player:getSkillLevel(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetEffectiveSkillLevel(lua_State* L)
{
// player:getEffectiveSkillLevel(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->getSkillLevel(skillType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillPercent(lua_State* L)
{
// player:getSkillPercent(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].percent);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillTries(lua_State* L)
{
// player:getSkillTries(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].tries);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSkillTries(lua_State* L)
{
// player:addSkillTries(skillType, tries)
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
player->addSkillAdvance(skillType, tries);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOfflineTrainingTime(lua_State* L)
{
// player:addOfflineTrainingTime(time)
Player* player = getUserdata<Player>(L, 1);
if (player) {
int32_t time = getNumber<int32_t>(L, 2);
player->addOfflineTrainingTime(time);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetOfflineTrainingTime(lua_State* L)
{
// player:getOfflineTrainingTime()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getOfflineTrainingTime());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime(lua_State* L)
{
// player:removeOfflineTrainingTime(time)
Player* player = getUserdata<Player>(L, 1);
if (player) {
int32_t time = getNumber<int32_t>(L, 2);
player->removeOfflineTrainingTime(time);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOfflineTrainingTries(lua_State* L)
{
// player:addOfflineTrainingTries(skillType, tries)
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
pushBoolean(L, player->addOfflineTrainingTries(skillType, tries));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetOfflineTrainingSkill(lua_State* L)
{
// player:getOfflineTrainingSkill()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getOfflineTrainingSkill());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetOfflineTrainingSkill(lua_State* L)
{
// player:setOfflineTrainingSkill(skillId)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint32_t skillId = getNumber<uint32_t>(L, 2);
player->setOfflineTrainingSkill(skillId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetItemCount(lua_State* L)
{
// player:getItemCount(itemId[, subType = -1])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
lua_pushnumber(L, player->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaPlayerGetItemById(lua_State* L)
{
// player:getItemById(itemId, deepSearch[, subType = -1])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
bool deepSearch = getBoolean(L, 3);
int32_t subType = getNumber<int32_t>(L, 4, -1);
Item* item = g_game.findItemOfType(player, itemId, deepSearch, subType);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetVocation(lua_State* L)
{
// player:getVocation()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Vocation>(L, player->getVocation());
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetVocation(lua_State* L)
{
// player:setVocation(id or name or userdata)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Vocation* vocation;
if (isNumber(L, 2)) {
vocation = g_vocations.getVocation(getNumber<uint16_t>(L, 2));
} else if (isString(L, 2)) {
vocation = g_vocations.getVocation(g_vocations.getVocationId(getString(L, 2)));
} else if (isUserdata(L, 2)) {
vocation = getUserdata<Vocation>(L, 2);
} else {
vocation = nullptr;
}
if (!vocation) {
pushBoolean(L, false);
return 1;
}
player->setVocation(vocation->getId());
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetSex(lua_State* L)
{
// player:getSex()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSex());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetSex(lua_State* L)
{
// player:setSex(newSex)
Player* player = getUserdata<Player>(L, 1);
if (player) {
PlayerSex_t newSex = getNumber<PlayerSex_t>(L, 2);
player->setSex(newSex);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetTown(lua_State* L)
{
// player:getTown()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Town>(L, player->getTown());
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetTown(lua_State* L)
{
// player:setTown(town)
Town* town = getUserdata<Town>(L, 2);
if (!town) {
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setTown(town);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGuild(lua_State* L)
{
// player:getGuild()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Guild* guild = player->getGuild();
if (!guild) {
lua_pushnil(L);
return 1;
}
pushUserdata<Guild>(L, guild);
setMetatable(L, -1, "Guild");
return 1;
}
int LuaScriptInterface::luaPlayerSetGuild(lua_State* L)
{
// player:setGuild(guild)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
player->setGuild(getUserdata<Guild>(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetGuildLevel(lua_State* L)
{
// player:getGuildLevel()
Player* player = getUserdata<Player>(L, 1);
if (player && player->getGuild()) {
lua_pushnumber(L, player->getGuildRank()->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGuildLevel(lua_State* L)
{
// player:setGuildLevel(level)
uint8_t level = getNumber<uint8_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (!player || !player->getGuild()) {
lua_pushnil(L);
return 1;
}
const GuildRank* rank = player->getGuild()->getRankByLevel(level);
if (!rank) {
pushBoolean(L, false);
} else {
player->setGuildRank(rank);
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGuildNick(lua_State* L)
{
// player:getGuildNick()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushString(L, player->getGuildNick());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGuildNick(lua_State* L)
{
// player:setGuildNick(nick)
const std::string& nick = getString(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setGuildNick(nick);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGroup(lua_State* L)
{
// player:getGroup()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Group>(L, player->getGroup());
setMetatable(L, -1, "Group");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGroup(lua_State* L)
{
// player:setGroup(group)
Group* group = getUserdata<Group>(L, 2);
if (!group) {
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setGroup(group);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetStamina(lua_State* L)
{
// player:getStamina()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getStaminaMinutes());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetStamina(lua_State* L)
{
// player:setStamina(stamina)
uint16_t stamina = getNumber<uint16_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->staminaMinutes = std::min<uint16_t>(2520, stamina);
player->sendStats();
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSoul(lua_State* L)
{
// player:getSoul()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSoul());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSoul(lua_State* L)
{
// player:addSoul(soulChange)
int32_t soulChange = getNumber<int32_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->changeSoul(soulChange);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMaxSoul(lua_State* L)
{
// player:getMaxSoul()
Player* player = getUserdata<Player>(L, 1);
if (player && player->vocation) {
lua_pushnumber(L, player->vocation->getSoulMax());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBankBalance(lua_State* L)
{
// player:getBankBalance()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getBankBalance());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetBankBalance(lua_State* L)
{
// player:setBankBalance(bankBalance)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setBankBalance(getNumber<uint64_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetStorageValue(lua_State* L)
{
// player:getStorageValue(key)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t key = getNumber<uint32_t>(L, 2);
int32_t value;
if (player->getStorageValue(key, value)) {
lua_pushnumber(L, value);
} else {
lua_pushnumber(L, -1);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetStorageValue(lua_State* L)
{
// player:setStorageValue(key, value)
int32_t value = getNumber<int32_t>(L, 3);
uint32_t key = getNumber<uint32_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) {
std::ostringstream ss;
ss << "Accessing reserved range: " << key;
reportErrorFunc(ss.str());
pushBoolean(L, false);
return 1;
}
if (player) {
player->addStorageValue(key, value);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddItem(lua_State* L)
{
// player:addItem(itemId[, count = 1[, canDropOnMap = true[, subType = 1[, slot = CONST_SLOT_WHEREEVER]]]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
pushBoolean(L, false);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t count = getNumber<int32_t>(L, 3, 1);
int32_t subType = getNumber<int32_t>(L, 5, 1);
const ItemType& it = Item::items[itemId];
int32_t itemCount = 1;
int parameters = lua_gettop(L);
if (parameters >= 4) {
itemCount = std::max<int32_t>(1, count);
} else if (it.hasSubType()) {
if (it.stackable) {
itemCount = std::ceil(count / 100.f);
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
bool hasTable = itemCount > 1;
if (hasTable) {
lua_newtable(L);
} else if (itemCount == 0) {
lua_pushnil(L);
return 1;
}
bool canDropOnMap = getBoolean(L, 4, true);
slots_t slot = getNumber<slots_t>(L, 6, CONST_SLOT_WHEREEVER);
for (int32_t i = 1; i <= itemCount; ++i) {
int32_t stackCount = subType;
if (it.stackable) {
stackCount = std::min<int32_t>(stackCount, 100);
subType -= stackCount;
}
Item* item = Item::CreateItem(itemId, stackCount);
if (!item) {
if (!hasTable) {
lua_pushnil(L);
}
return 1;
}
ReturnValue ret = g_game.internalPlayerAddItem(player, item, canDropOnMap, slot);
if (ret != RETURNVALUE_NOERROR) {
delete item;
if (!hasTable) {
lua_pushnil(L);
}
return 1;
}
if (hasTable) {
lua_pushnumber(L, i);
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_settable(L, -3);
} else {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
}
}
return 1;
}
int LuaScriptInterface::luaPlayerAddItemEx(lua_State* L)
{
// player:addItemEx(item[, canDropOnMap = false[, index = INDEX_WHEREEVER[, flags = 0]]])
// player:addItemEx(item[, canDropOnMap = true[, slot = CONST_SLOT_WHEREEVER]])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc("Item already has a parent");
pushBoolean(L, false);
return 1;
}
bool canDropOnMap = getBoolean(L, 3, false);
ReturnValue returnValue;
if (canDropOnMap) {
slots_t slot = getNumber<slots_t>(L, 4, CONST_SLOT_WHEREEVER);
returnValue = g_game.internalPlayerAddItem(player, item, true, slot);
} else {
int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 5, 0);
returnValue = g_game.internalAddItem(player, item, index, flags);
}
if (returnValue == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, returnValue);
return 1;
}
int LuaScriptInterface::luaPlayerRemoveItem(lua_State* L)
{
// player:removeItem(itemId, count[, subType = -1[, ignoreEquipped = false]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t count = getNumber<uint32_t>(L, 3);
int32_t subType = getNumber<int32_t>(L, 4, -1);
bool ignoreEquipped = getBoolean(L, 5, false);
pushBoolean(L, player->removeItemOfType(itemId, count, subType, ignoreEquipped));
return 1;
}
int LuaScriptInterface::luaPlayerGetMoney(lua_State* L)
{
// player:getMoney()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMoney());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMoney(lua_State* L)
{
// player:addMoney(money)
uint64_t money = getNumber<uint64_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
g_game.addMoney(player, money);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveMoney(lua_State* L)
{
// player:removeMoney(money)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint64_t money = getNumber<uint64_t>(L, 2);
pushBoolean(L, g_game.removeMoney(player, money));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerShowTextDialog(lua_State* L)
{
// player:showTextDialog(itemId[, text[, canWrite[, length]]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int32_t length = getNumber<int32_t>(L, 5, -1);
bool canWrite = getBoolean(L, 4, false);
std::string text;
int parameters = lua_gettop(L);
if (parameters >= 3) {
text = getString(L, 3);
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
Item* item = Item::CreateItem(itemId);
if (!item) {
reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (length < 0) {
length = Item::items[item->getID()].maxTextLen;
}
if (!text.empty()) {
item->setText(text);
length = std::max<int32_t>(text.size(), length);
}
item->setParent(player);
player->setWriteItem(item, length);
player->sendTextWindow(item, length, canWrite);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSendTextMessage(lua_State* L)
{
// player:sendTextMessage(type, text[, position, primaryValue = 0, primaryColor = TEXTCOLOR_NONE[, secondaryValue = 0, secondaryColor = TEXTCOLOR_NONE]])
int parameters = lua_gettop(L);
TextMessage message(getNumber<MessageClasses>(L, 2), getString(L, 3));
if (parameters >= 6) {
message.position = getPosition(L, 4);
message.primary.value = getNumber<int32_t>(L, 5);
message.primary.color = getNumber<TextColor_t>(L, 6);
}
if (parameters >= 8) {
message.secondary.value = getNumber<int32_t>(L, 7);
message.secondary.color = getNumber<TextColor_t>(L, 8);
}
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->sendTextMessage(message);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendChannelMessage(lua_State* L)
{
// player:sendChannelMessage(author, text, type, channelId)
uint16_t channelId = getNumber<uint16_t>(L, 5);
SpeakClasses type = getNumber<SpeakClasses>(L, 4);
const std::string& text = getString(L, 3);
const std::string& author = getString(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->sendChannelMessage(author, text, type, channelId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendPrivateMessage(lua_State* L)
{
// player:sendPrivateMessage(speaker, text[, type])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
const Player* speaker = getUserdata<const Player>(L, 2);
const std::string& text = getString(L, 3);
SpeakClasses type = getNumber<SpeakClasses>(L, 4, TALKTYPE_PRIVATE_FROM);
player->sendPrivateMessage(speaker, type, text);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerChannelSay(lua_State* L)
{
// player:channelSay(speaker, type, text, channelId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Creature* speaker = getCreature(L, 2);
SpeakClasses type = getNumber<SpeakClasses>(L, 3);
const std::string& text = getString(L, 4);
uint16_t channelId = getNumber<uint16_t>(L, 5);
player->sendToChannel(speaker, type, text, channelId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerOpenChannel(lua_State* L)
{
// player:openChannel(channelId)
uint16_t channelId = getNumber<uint16_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
g_game.playerOpenChannel(player->getID(), channelId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSlotItem(lua_State* L)
{
// player:getSlotItem(slot)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t slot = getNumber<uint32_t>(L, 2);
Thing* thing = player->getThing(slot);
if (!thing) {
lua_pushnil(L);
return 1;
}
Item* item = thing->getItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetParty(lua_State* L)
{
// player:getParty()
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Party* party = player->getParty();
if (party) {
pushUserdata<Party>(L, party);
setMetatable(L, -1, "Party");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOutfit(lua_State* L)
{
// player:addOutfit(lookType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->addOutfit(getNumber<uint16_t>(L, 2), 0);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOutfitAddon(lua_State* L)
{
// player:addOutfitAddon(lookType, addon)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3);
player->addOutfit(lookType, addon);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOutfit(lua_State* L)
{
// player:removeOutfit(lookType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
pushBoolean(L, player->removeOutfit(lookType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOutfitAddon(lua_State* L)
{
// player:removeOutfitAddon(lookType, addon)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3);
pushBoolean(L, player->removeOutfitAddon(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasOutfit(lua_State* L)
{
// player:hasOutfit(lookType[, addon = 0])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3, 0);
pushBoolean(L, player->canWear(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendOutfitWindow(lua_State* L)
{
// player:sendOutfitWindow()
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->sendOutfitWindow();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMount(lua_State* L) {
// player:addMount(mountId or mountName)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t mountId;
if (isNumber(L, 2)) {
mountId = getNumber<uint8_t>(L, 2);
} else {
Mount* mount = g_game.mounts.getMountByName(getString(L, 2));
if (!mount) {
lua_pushnil(L);
return 1;
}
mountId = mount->id;
}
pushBoolean(L, player->tameMount(mountId));
return 1;
}
int LuaScriptInterface::luaPlayerRemoveMount(lua_State* L) {
// player:removeMount(mountId or mountName)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t mountId;
if (isNumber(L, 2)) {
mountId = getNumber<uint8_t>(L, 2);
} else {
Mount* mount = g_game.mounts.getMountByName(getString(L, 2));
if (!mount) {
lua_pushnil(L);
return 1;
}
mountId = mount->id;
}
pushBoolean(L, player->untameMount(mountId));
return 1;
}
int LuaScriptInterface::luaPlayerHasMount(lua_State* L) {
// player:hasMount(mountId or mountName)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Mount* mount = nullptr;
if (isNumber(L, 2)) {
mount = g_game.mounts.getMountByID(getNumber<uint8_t>(L, 2));
} else {
mount = g_game.mounts.getMountByName(getString(L, 2));
}
if (mount) {
pushBoolean(L, player->hasMount(mount));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetPremiumDays(lua_State* L)
{
// player:getPremiumDays()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->premiumDays);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddPremiumDays(lua_State* L)
{
// player:addPremiumDays(days)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
if (player->premiumDays != std::numeric_limits<uint16_t>::max()) {
uint16_t days = getNumber<uint16_t>(L, 2);
int32_t addDays = std::min<int32_t>(0xFFFE - player->premiumDays, days);
if (addDays > 0) {
player->setPremiumDays(player->premiumDays + addDays);
IOLoginData::addPremiumDays(player->getAccount(), addDays);
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerRemovePremiumDays(lua_State* L)
{
// player:removePremiumDays(days)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
if (player->premiumDays != std::numeric_limits<uint16_t>::max()) {
uint16_t days = getNumber<uint16_t>(L, 2);
int32_t removeDays = std::min<int32_t>(player->premiumDays, days);
if (removeDays > 0) {
player->setPremiumDays(player->premiumDays - removeDays);
IOLoginData::removePremiumDays(player->getAccount(), removeDays);
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerHasBlessing(lua_State* L)
{
// player:hasBlessing(blessing)
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->hasBlessing(blessing));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddBlessing(lua_State* L)
{
// player:addBlessing(blessing)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
if (player->hasBlessing(blessing)) {
pushBoolean(L, false);
return 1;
}
player->addBlessing(1 << blessing);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerRemoveBlessing(lua_State* L)
{
// player:removeBlessing(blessing)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
if (!player->hasBlessing(blessing)) {
pushBoolean(L, false);
return 1;
}
player->removeBlessing(1 << blessing);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerCanLearnSpell(lua_State* L)
{
// player:canLearnSpell(spellName)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
const std::string& spellName = getString(L, 2);
InstantSpell* spell = g_spells->getInstantSpellByName(spellName);
if (!spell) {
reportErrorFunc("Spell \"" + spellName + "\" not found");
pushBoolean(L, false);
return 1;
}
if (player->hasFlag(PlayerFlag_IgnoreSpellCheck)) {
pushBoolean(L, true);
return 1;
}
const auto& vocMap = spell->getVocMap();
if (vocMap.count(player->getVocationId()) == 0) {
pushBoolean(L, false);
} else if (player->getLevel() < spell->getLevel()) {
pushBoolean(L, false);
} else if (player->getMagicLevel() < spell->getMagicLevel()) {
pushBoolean(L, false);
} else {
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaPlayerLearnSpell(lua_State* L)
{
// player:learnSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
player->learnInstantSpell(spellName);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerForgetSpell(lua_State* L)
{
// player:forgetSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
player->forgetInstantSpell(spellName);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasLearnedSpell(lua_State* L)
{
// player:hasLearnedSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
pushBoolean(L, player->hasLearnedInstantSpell(spellName));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendTutorial(lua_State* L)
{
// player:sendTutorial(tutorialId)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint8_t tutorialId = getNumber<uint8_t>(L, 2);
player->sendTutorial(tutorialId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMapMark(lua_State* L)
{
// player:addMapMark(position, type, description)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const Position& position = getPosition(L, 2);
uint8_t type = getNumber<uint8_t>(L, 3);
const std::string& description = getString(L, 4);
player->sendAddMarker(position, type, description);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSave(lua_State* L)
{
// player:save()
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->loginPosition = player->getPosition();
pushBoolean(L, IOLoginData::savePlayer(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerPopupFYI(lua_State* L)
{
// player:popupFYI(message)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& message = getString(L, 2);
player->sendFYIBox(message);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerIsPzLocked(lua_State* L)
{
// player:isPzLocked()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->isPzLocked());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetClient(lua_State* L)
{
// player:getClient()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_createtable(L, 0, 2);
setField(L, "version", player->getProtocolVersion());
setField(L, "os", player->getOperatingSystem());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetHouse(lua_State* L)
{
// player:getHouse()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = g_game.map.houses.getHouseByPlayerId(player->getGUID());
if (house) {
pushUserdata<House>(L, house);
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendHouseWindow(lua_State* L)
{
// player:sendHouseWindow(house, listId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = getUserdata<House>(L, 2);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 3);
player->sendHouseWindow(house, listId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSetEditHouse(lua_State* L)
{
// player:setEditHouse(house, listId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = getUserdata<House>(L, 2);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 3);
player->setEditHouse(house, listId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSetGhostMode(lua_State* L)
{
// player:setGhostMode(enabled)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
bool enabled = getBoolean(L, 2);
if (player->isInGhostMode() == enabled) {
pushBoolean(L, true);
return 1;
}
player->switchGhostMode();
Tile* tile = player->getTile();
const Position& position = player->getPosition();
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, position, true, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer != player && !tmpPlayer->isAccessPlayer()) {
if (enabled) {
tmpPlayer->sendRemoveTileThing(position, tile->getStackposOfCreature(tmpPlayer, player));
} else {
tmpPlayer->sendCreatureAppear(player, position, true);
}
} else {
tmpPlayer->sendCreatureChangeVisible(player, !enabled);
}
}
if (player->isInGhostMode()) {
for (const auto& it : g_game.getPlayers()) {
if (!it.second->isAccessPlayer()) {
it.second->notifyStatusChange(player, VIPSTATUS_OFFLINE);
}
}
IOLoginData::updateOnlineStatus(player->getGUID(), false);
} else {
for (const auto& it : g_game.getPlayers()) {
if (!it.second->isAccessPlayer()) {
it.second->notifyStatusChange(player, VIPSTATUS_ONLINE);
}
}
IOLoginData::updateOnlineStatus(player->getGUID(), true);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerId(lua_State* L)
{
// player:getContainerId(container)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* container = getUserdata<Container>(L, 2);
if (container) {
lua_pushnumber(L, player->getContainerID(container));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerById(lua_State* L)
{
// player:getContainerById(id)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* container = player->getContainerByID(getNumber<uint8_t>(L, 2));
if (container) {
pushUserdata<Container>(L, container);
setMetatable(L, -1, "Container");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerIndex(lua_State* L)
{
// player:getContainerIndex(id)
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getContainerIndex(getNumber<uint8_t>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetInstantSpells(lua_State* L)
{
// player:getInstantSpells()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
std::vector<InstantSpell*> spells;
for (auto spell : g_spells->getInstantSpells()) {
if (spell.second->canCast(player)) {
spells.push_back(spell.second);
}
}
lua_createtable(L, spells.size(), 0);
int index = 0;
for (auto spell : spells) {
pushInstantSpell(L, *spell);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaPlayerCanCast(lua_State* L)
{
// player:canCast(spell)
Player* player = getUserdata<Player>(L, 1);
InstantSpell* spell = getUserdata<InstantSpell>(L, 2);
if (player && spell) {
pushBoolean(L, spell->canCast(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasChaseMode(lua_State* L)
{
// player:hasChaseMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->chaseMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasSecureMode(lua_State* L)
{
// player:hasSecureMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->secureMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetFightMode(lua_State* L)
{
// player:getFightMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->fightMode);
} else {
lua_pushnil(L);
}
return 1;
}
// Monster
int LuaScriptInterface::luaMonsterCreate(lua_State* L)
{
// Monster(id or userdata)
Monster* monster;
if (isNumber(L, 2)) {
monster = g_game.getMonsterByID(getNumber<uint32_t>(L, 2));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Monster) {
lua_pushnil(L);
return 1;
}
monster = getUserdata<Monster>(L, 2);
} else {
monster = nullptr;
}
if (monster) {
pushUserdata<Monster>(L, monster);
setMetatable(L, -1, "Monster");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsMonster(lua_State* L)
{
// monster:isMonster()
pushBoolean(L, getUserdata<const Monster>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaMonsterGetType(lua_State* L)
{
// monster:getType()
const Monster* monster = getUserdata<const Monster>(L, 1);
if (monster) {
pushUserdata<MonsterType>(L, monster->mType);
setMetatable(L, -1, "MonsterType");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetSpawnPosition(lua_State* L)
{
// monster:getSpawnPosition()
const Monster* monster = getUserdata<const Monster>(L, 1);
if (monster) {
pushPosition(L, monster->getMasterPos());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsInSpawnRange(lua_State* L)
{
// monster:isInSpawnRange([position])
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->isInSpawnRange(lua_gettop(L) >= 2 ? getPosition(L, 2) : monster->getPosition()));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsIdle(lua_State* L)
{
// monster:isIdle()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->getIdleStatus());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSetIdle(lua_State* L)
{
// monster:setIdle(idle)
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
monster->setIdle(getBoolean(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterIsTarget(lua_State* L)
{
// monster:isTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
pushBoolean(L, monster->isTarget(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsOpponent(lua_State* L)
{
// monster:isOpponent(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
pushBoolean(L, monster->isOpponent(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsFriend(lua_State* L)
{
// monster:isFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
pushBoolean(L, monster->isFriend(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterAddFriend(lua_State* L)
{
// monster:addFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
monster->addFriend(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterRemoveFriend(lua_State* L)
{
// monster:removeFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
monster->removeFriend(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetFriendList(lua_State* L)
{
// monster:getFriendList()
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
const auto& friendList = monster->getFriendList();
lua_createtable(L, friendList.size(), 0);
int index = 0;
for (Creature* creature : friendList) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetFriendCount(lua_State* L)
{
// monster:getFriendCount()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
lua_pushnumber(L, monster->getFriendList().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterAddTarget(lua_State* L)
{
// monster:addTarget(creature[, pushFront = false])
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
bool pushFront = getBoolean(L, 3, false);
monster->addTarget(creature, pushFront);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterRemoveTarget(lua_State* L)
{
// monster:removeTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
monster->removeTarget(getCreature(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterGetTargetList(lua_State* L)
{
// monster:getTargetList()
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
const auto& targetList = monster->getTargetList();
lua_createtable(L, targetList.size(), 0);
int index = 0;
for (Creature* creature : targetList) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetTargetCount(lua_State* L)
{
// monster:getTargetCount()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
lua_pushnumber(L, monster->getTargetList().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSelectTarget(lua_State* L)
{
// monster:selectTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
pushBoolean(L, monster->selectTarget(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSearchTarget(lua_State* L)
{
// monster:searchTarget([searchType = TARGETSEARCH_DEFAULT])
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
TargetSearchType_t searchType = getNumber<TargetSearchType_t>(L, 2, TARGETSEARCH_DEFAULT);
pushBoolean(L, monster->searchTarget(searchType));
} else {
lua_pushnil(L);
}
return 1;
}
// Npc
int LuaScriptInterface::luaNpcCreate(lua_State* L)
{
// Npc([id or name or userdata])
Npc* npc;
if (lua_gettop(L) >= 2) {
if (isNumber(L, 2)) {
npc = g_game.getNpcByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
npc = g_game.getNpcByName(getString(L, 2));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Npc) {
lua_pushnil(L);
return 1;
}
npc = getUserdata<Npc>(L, 2);
} else {
npc = nullptr;
}
} else {
npc = getScriptEnv()->getNpc();
}
if (npc) {
pushUserdata<Npc>(L, npc);
setMetatable(L, -1, "Npc");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNpcIsNpc(lua_State* L)
{
// npc:isNpc()
pushBoolean(L, getUserdata<const Npc>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaNpcSetMasterPos(lua_State* L)
{
// npc:setMasterPos(pos[, radius])
Npc* npc = getUserdata<Npc>(L, 1);
if (!npc) {
lua_pushnil(L);
return 1;
}
const Position& pos = getPosition(L, 2);
int32_t radius = getNumber<int32_t>(L, 3, 1);
npc->setMasterPos(pos, radius);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaNpcGetSpeechBubble(lua_State* L)
{
// npc:getSpeechBubble()
Npc* npc = getUserdata<Npc>(L, 1);
if (npc) {
lua_pushnumber(L, npc->getSpeechBubble());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNpcSetSpeechBubble(lua_State* L)
{
// npc:setSpeechBubble(speechBubble)
Npc* npc = getUserdata<Npc>(L, 1);
if (npc) {
npc->setSpeechBubble(getNumber<uint8_t>(L, 2));
}
return 0;
}
// Guild
int LuaScriptInterface::luaGuildCreate(lua_State* L)
{
// Guild(id)
uint32_t id = getNumber<uint32_t>(L, 2);
Guild* guild = g_game.getGuild(id);
if (guild) {
pushUserdata<Guild>(L, guild);
setMetatable(L, -1, "Guild");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetId(lua_State* L)
{
// guild:getId()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
lua_pushnumber(L, guild->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetName(lua_State* L)
{
// guild:getName()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
pushString(L, guild->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetMembersOnline(lua_State* L)
{
// guild:getMembersOnline()
const Guild* guild = getUserdata<const Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
const auto& members = guild->getMembersOnline();
lua_createtable(L, members.size(), 0);
int index = 0;
for (Player* player : members) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGuildAddRank(lua_State* L)
{
// guild:addRank(id, name, level)
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
uint32_t id = getNumber<uint32_t>(L, 2);
const std::string& name = getString(L, 3);
uint8_t level = getNumber<uint8_t>(L, 4);
guild->addRank(id, name, level);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetRankById(lua_State* L)
{
// guild:getRankById(id)
Guild* guild = getUserdata<Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
uint32_t id = getNumber<uint32_t>(L, 2);
GuildRank* rank = guild->getRankById(id);
if (rank) {
lua_createtable(L, 0, 3);
setField(L, "id", rank->id);
setField(L, "name", rank->name);
setField(L, "level", rank->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetRankByLevel(lua_State* L)
{
// guild:getRankByLevel(level)
const Guild* guild = getUserdata<const Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
uint8_t level = getNumber<uint8_t>(L, 2);
const GuildRank* rank = guild->getRankByLevel(level);
if (rank) {
lua_createtable(L, 0, 3);
setField(L, "id", rank->id);
setField(L, "name", rank->name);
setField(L, "level", rank->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetMotd(lua_State* L)
{
// guild:getMotd()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
pushString(L, guild->getMotd());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildSetMotd(lua_State* L)
{
// guild:setMotd(motd)
const std::string& motd = getString(L, 2);
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
guild->setMotd(motd);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Group
int LuaScriptInterface::luaGroupCreate(lua_State* L)
{
// Group(id)
uint32_t id = getNumber<uint32_t>(L, 2);
Group* group = g_game.groups.getGroup(id);
if (group) {
pushUserdata<Group>(L, group);
setMetatable(L, -1, "Group");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetId(lua_State* L)
{
// group:getId()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetName(lua_State* L)
{
// group:getName()
Group* group = getUserdata<Group>(L, 1);
if (group) {
pushString(L, group->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetFlags(lua_State* L)
{
// group:getFlags()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->flags);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetAccess(lua_State* L)
{
// group:getAccess()
Group* group = getUserdata<Group>(L, 1);
if (group) {
pushBoolean(L, group->access);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetMaxDepotItems(lua_State* L)
{
// group:getMaxDepotItems()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->maxDepotItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetMaxVipEntries(lua_State* L)
{
// group:getMaxVipEntries()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->maxVipEntries);
} else {
lua_pushnil(L);
}
return 1;
}
// Vocation
int LuaScriptInterface::luaVocationCreate(lua_State* L)
{
// Vocation(id or name)
uint32_t id;
if (isNumber(L, 2)) {
id = getNumber<uint32_t>(L, 2);
} else {
id = g_vocations.getVocationId(getString(L, 2));
}
Vocation* vocation = g_vocations.getVocation(id);
if (vocation) {
pushUserdata<Vocation>(L, vocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetId(lua_State* L)
{
// vocation:getId()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetClientId(lua_State* L)
{
// vocation:getClientId()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getClientId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetName(lua_State* L)
{
// vocation:getName()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushString(L, vocation->getVocName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetDescription(lua_State* L)
{
// vocation:getDescription()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushString(L, vocation->getVocDescription());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetRequiredSkillTries(lua_State* L)
{
// vocation:getRequiredSkillTries(skillType, skillLevel)
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint16_t skillLevel = getNumber<uint16_t>(L, 3);
lua_pushnumber(L, vocation->getReqSkillTries(skillType, skillLevel));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetRequiredManaSpent(lua_State* L)
{
// vocation:getRequiredManaSpent(magicLevel)
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
uint32_t magicLevel = getNumber<uint32_t>(L, 2);
lua_pushnumber(L, vocation->getReqMana(magicLevel));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetCapacityGain(lua_State* L)
{
// vocation:getCapacityGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getCapGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGain(lua_State* L)
{
// vocation:getHealthGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHPGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGainTicks(lua_State* L)
{
// vocation:getHealthGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHealthGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGainAmount(lua_State* L)
{
// vocation:getHealthGainAmount()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHealthGainAmount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGain(lua_State* L)
{
// vocation:getManaGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGainTicks(lua_State* L)
{
// vocation:getManaGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGainAmount(lua_State* L)
{
// vocation:getManaGainAmount()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGainAmount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetMaxSoul(lua_State* L)
{
// vocation:getMaxSoul()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getSoulMax());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetSoulGainTicks(lua_State* L)
{
// vocation:getSoulGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getSoulGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetAttackSpeed(lua_State* L)
{
// vocation:getAttackSpeed()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getAttackSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetBaseSpeed(lua_State* L)
{
// vocation:getBaseSpeed()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getBaseSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetDemotion(lua_State* L)
{
// vocation:getDemotion()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (!vocation) {
lua_pushnil(L);
return 1;
}
uint16_t fromId = vocation->getFromVocation();
if (fromId == VOCATION_NONE) {
lua_pushnil(L);
return 1;
}
Vocation* demotedVocation = g_vocations.getVocation(fromId);
if (demotedVocation && demotedVocation != vocation) {
pushUserdata<Vocation>(L, demotedVocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetPromotion(lua_State* L)
{
// vocation:getPromotion()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (!vocation) {
lua_pushnil(L);
return 1;
}
uint16_t promotedId = g_vocations.getPromotedVocation(vocation->getId());
if (promotedId == VOCATION_NONE) {
lua_pushnil(L);
return 1;
}
Vocation* promotedVocation = g_vocations.getVocation(promotedId);
if (promotedVocation && promotedVocation != vocation) {
pushUserdata<Vocation>(L, promotedVocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
// Town
int LuaScriptInterface::luaTownCreate(lua_State* L)
{
// Town(id or name)
Town* town;
if (isNumber(L, 2)) {
town = g_game.map.towns.getTown(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
town = g_game.map.towns.getTown(getString(L, 2));
} else {
town = nullptr;
}
if (town) {
pushUserdata<Town>(L, town);
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetId(lua_State* L)
{
// town:getId()
Town* town = getUserdata<Town>(L, 1);
if (town) {
lua_pushnumber(L, town->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetName(lua_State* L)
{
// town:getName()
Town* town = getUserdata<Town>(L, 1);
if (town) {
pushString(L, town->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetTemplePosition(lua_State* L)
{
// town:getTemplePosition()
Town* town = getUserdata<Town>(L, 1);
if (town) {
pushPosition(L, town->getTemplePosition());
} else {
lua_pushnil(L);
}
return 1;
}
// House
int LuaScriptInterface::luaHouseCreate(lua_State* L)
{
// House(id)
House* house = g_game.map.houses.getHouse(getNumber<uint32_t>(L, 2));
if (house) {
pushUserdata<House>(L, house);
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetId(lua_State* L)
{
// house:getId()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetName(lua_State* L)
{
// house:getName()
House* house = getUserdata<House>(L, 1);
if (house) {
pushString(L, house->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTown(lua_State* L)
{
// house:getTown()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
Town* town = g_game.map.towns.getTown(house->getTownId());
if (town) {
pushUserdata<Town>(L, town);
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetExitPosition(lua_State* L)
{
// house:getExitPosition()
House* house = getUserdata<House>(L, 1);
if (house) {
pushPosition(L, house->getEntryPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetRent(lua_State* L)
{
// house:getRent()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getRent());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetOwnerGuid(lua_State* L)
{
// house:getOwnerGuid()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getOwner());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseSetOwnerGuid(lua_State* L)
{
// house:setOwnerGuid(guid[, updateDatabase = true])
House* house = getUserdata<House>(L, 1);
if (house) {
uint32_t guid = getNumber<uint32_t>(L, 2);
bool updateDatabase = getBoolean(L, 3, true);
house->setOwner(guid, updateDatabase);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseStartTrade(lua_State* L)
{
// house:startTrade(player, tradePartner)
House* house = getUserdata<House>(L, 1);
Player* player = getUserdata<Player>(L, 2);
Player* tradePartner = getUserdata<Player>(L, 3);
if (!player || !tradePartner || !house) {
lua_pushnil(L);
return 1;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERFARAWAY);
return 1;
}
if (house->getOwner() != player->getGUID()) {
lua_pushnumber(L, RETURNVALUE_YOUDONTOWNTHISHOUSE);
return 1;
}
if (g_game.map.houses.getHouseByPlayerId(tradePartner->getGUID())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERALREADYOWNSAHOUSE);
return 1;
}
if (IOLoginData::hasBiddedOnHouse(tradePartner->getGUID())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERHIGHESTBIDDER);
return 1;
}
Item* transferItem = house->getTransferItem();
if (!transferItem) {
lua_pushnumber(L, RETURNVALUE_YOUCANNOTTRADETHISHOUSE);
return 1;
}
transferItem->getParent()->setParent(player);
if (!g_game.internalStartTrade(player, tradePartner, transferItem)) {
house->resetTransferItem();
}
lua_pushnumber(L, RETURNVALUE_NOERROR);
return 1;
}
int LuaScriptInterface::luaHouseGetBeds(lua_State* L)
{
// house:getBeds()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& beds = house->getBeds();
lua_createtable(L, beds.size(), 0);
int index = 0;
for (BedItem* bedItem : beds) {
pushUserdata<Item>(L, bedItem);
setItemMetatable(L, -1, bedItem);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetBedCount(lua_State* L)
{
// house:getBedCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getBedCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoors(lua_State* L)
{
// house:getDoors()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& doors = house->getDoors();
lua_createtable(L, doors.size(), 0);
int index = 0;
for (Door* door : doors) {
pushUserdata<Item>(L, door);
setItemMetatable(L, -1, door);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoorCount(lua_State* L)
{
// house:getDoorCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getDoors().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoorIdByPosition(lua_State* L)
{
// house:getDoorIdByPosition(position)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
Door* door = house->getDoorByPosition(getPosition(L, 2));
if (door) {
lua_pushnumber(L, door->getDoorId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTiles(lua_State* L)
{
// house:getTiles()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& tiles = house->getTiles();
lua_createtable(L, tiles.size(), 0);
int index = 0;
for (Tile* tile : tiles) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTileCount(lua_State* L)
{
// house:getTileCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getTiles().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseCanEditAccessList(lua_State* L)
{
// house:canEditAccessList(listId, player)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 2);
Player* player = getPlayer(L, 3);
pushBoolean(L, house->canEditAccessList(listId, player));
return 1;
}
int LuaScriptInterface::luaHouseGetAccessList(lua_State* L)
{
// house:getAccessList(listId)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
std::string list;
uint32_t listId = getNumber<uint32_t>(L, 2);
if (house->getAccessList(listId, list)) {
pushString(L, list);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaHouseSetAccessList(lua_State* L)
{
// house:setAccessList(listId, list)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 2);
const std::string& list = getString(L, 3);
house->setAccessList(listId, list);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaHouseKickPlayer(lua_State* L)
{
// house:kickPlayer(player, targetPlayer)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
pushBoolean(L, house->kickPlayer(getPlayer(L, 2), getPlayer(L, 3)));
return 1;
}
// ItemType
int LuaScriptInterface::luaItemTypeCreate(lua_State* L)
{
// ItemType(id or name)
uint32_t id;
if (isNumber(L, 2)) {
id = getNumber<uint32_t>(L, 2);
} else {
id = Item::items.getItemIdByName(getString(L, 2));
}
const ItemType& itemType = Item::items[id];
pushUserdata<const ItemType>(L, &itemType);
setMetatable(L, -1, "ItemType");
return 1;
}
int LuaScriptInterface::luaItemTypeIsCorpse(lua_State* L)
{
// itemType:isCorpse()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->corpseType != RACE_NONE);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsDoor(lua_State* L)
{
// itemType:isDoor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isDoor());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsContainer(lua_State* L)
{
// itemType:isContainer()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isContainer());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsFluidContainer(lua_State* L)
{
// itemType:isFluidContainer()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isFluidContainer());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsMovable(lua_State* L)
{
// itemType:isMovable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->moveable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsRune(lua_State* L)
{
// itemType:isRune()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isRune());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsStackable(lua_State* L)
{
// itemType:isStackable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->stackable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsReadable(lua_State* L)
{
// itemType:isReadable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->canReadText);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsWritable(lua_State* L)
{
// itemType:isWritable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->canWriteText);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetType(lua_State* L)
{
// itemType:getType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->type);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetId(lua_State* L)
{
// itemType:getId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetClientId(lua_State* L)
{
// itemType:getClientId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->clientId);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetName(lua_State* L)
{
// itemType:getName()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetPluralName(lua_State* L)
{
// itemType:getPluralName()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->getPluralName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetArticle(lua_State* L)
{
// itemType:getArticle()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->article);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDescription(lua_State* L)
{
// itemType:getDescription()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->description);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetSlotPosition(lua_State *L)
{
// itemType:getSlotPosition()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->slotPosition);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCharges(lua_State* L)
{
// itemType:getCharges()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->charges);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetFluidSource(lua_State* L)
{
// itemType:getFluidSource()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->fluidSource);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCapacity(lua_State* L)
{
// itemType:getCapacity()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->maxItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWeight(lua_State* L)
{
// itemType:getWeight([count = 1])
uint16_t count = getNumber<uint16_t>(L, 2, 1);
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
uint64_t weight = static_cast<uint64_t>(itemType->weight) * std::max<int32_t>(1, count);
lua_pushnumber(L, weight);
return 1;
}
int LuaScriptInterface::luaItemTypeGetHitChance(lua_State* L)
{
// itemType:getHitChance()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->hitChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetShootRange(lua_State* L)
{
// itemType:getShootRange()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->shootRange);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAttack(lua_State* L)
{
// itemType:getAttack()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->attack);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDefense(lua_State* L)
{
// itemType:getDefense()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->defense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetExtraDefense(lua_State* L)
{
// itemType:getExtraDefense()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->extraDefense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetArmor(lua_State* L)
{
// itemType:getArmor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->armor);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWeaponType(lua_State* L)
{
// itemType:getWeaponType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->weaponType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetElementType(lua_State* L)
{
// itemType:getElementType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
auto& abilities = itemType->abilities;
if (abilities) {
lua_pushnumber(L, abilities->elementType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetElementDamage(lua_State* L)
{
// itemType:getElementDamage()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
auto& abilities = itemType->abilities;
if (abilities) {
lua_pushnumber(L, abilities->elementDamage);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetTransformEquipId(lua_State* L)
{
// itemType:getTransformEquipId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->transformEquipTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetTransformDeEquipId(lua_State* L)
{
// itemType:getTransformDeEquipId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->transformDeEquipTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDestroyId(lua_State* L)
{
// itemType:getDestroyId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->destroyTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDecayId(lua_State* L)
{
// itemType:getDecayId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->decayTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetRequiredLevel(lua_State* L)
{
// itemType:getRequiredLevel()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->minReqLevel);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasSubType(lua_State* L)
{
// itemType:hasSubType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->hasSubType());
} else {
lua_pushnil(L);
}
return 1;
}
// Combat
int LuaScriptInterface::luaCombatCreate(lua_State* L)
{
// Combat()
pushUserdata<Combat>(L, g_luaEnvironment.createCombatObject(getScriptEnv()->getScriptInterface()));
setMetatable(L, -1, "Combat");
return 1;
}
int LuaScriptInterface::luaCombatSetParameter(lua_State* L)
{
// combat:setParameter(key, value)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
lua_pushnil(L);
return 1;
}
CombatParam_t key = getNumber<CombatParam_t>(L, 2);
uint32_t value;
if (isBoolean(L, 3)) {
value = getBoolean(L, 3) ? 1 : 0;
} else {
value = getNumber<uint32_t>(L, 3);
}
combat->setParam(key, value);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatSetFormula(lua_State* L)
{
// combat:setFormula(type, mina, minb, maxa, maxb)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
lua_pushnil(L);
return 1;
}
formulaType_t type = getNumber<formulaType_t>(L, 2);
double mina = getNumber<double>(L, 3);
double minb = getNumber<double>(L, 4);
double maxa = getNumber<double>(L, 5);
double maxb = getNumber<double>(L, 6);
combat->setPlayerCombatValues(type, mina, minb, maxa, maxb);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatSetArea(lua_State* L)
{
// combat:setArea(area)
if (getScriptEnv()->getScriptId() != EVENT_ID_LOADING) {
reportErrorFunc("This function can only be used while loading the script.");
lua_pushnil(L);
return 1;
}
const AreaCombat* area = g_luaEnvironment.getAreaObject(getNumber<uint32_t>(L, 2));
if (!area) {
reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
lua_pushnil(L);
return 1;
}
Combat* combat = getUserdata<Combat>(L, 1);
if (combat) {
combat->setArea(new AreaCombat(*area));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCombatSetCondition(lua_State* L)
{
// combat:setCondition(condition)
Condition* condition = getUserdata<Condition>(L, 2);
Combat* combat = getUserdata<Combat>(L, 1);
if (combat && condition) {
combat->setCondition(condition->clone());
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCombatSetCallback(lua_State* L)
{
// combat:setCallback(key, function)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
lua_pushnil(L);
return 1;
}
CallBackParam_t key = getNumber<CallBackParam_t>(L, 2);
if (!combat->setCallback(key)) {
lua_pushnil(L);
return 1;
}
CallBack* callback = combat->getCallback(key);
if (!callback) {
lua_pushnil(L);
return 1;
}
const std::string& function = getString(L, 3);
pushBoolean(L, callback->loadCallBack(getScriptEnv()->getScriptInterface(), function));
return 1;
}
int LuaScriptInterface::luaCombatSetOrigin(lua_State* L)
{
// combat:setOrigin(origin)
Combat* combat = getUserdata<Combat>(L, 1);
if (combat) {
combat->setOrigin(getNumber<CombatOrigin>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCombatExecute(lua_State* L)
{
// combat:execute(creature, variant)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
pushBoolean(L, false);
return 1;
}
Creature* creature = getCreature(L, 2);
const LuaVariant& variant = getVariant(L, 3);
switch (variant.type) {
case VARIANT_NUMBER: {
Creature* target = g_game.getCreatureByID(variant.number);
if (!target) {
pushBoolean(L, false);
return 1;
}
if (combat->hasArea()) {
combat->doCombat(creature, target->getPosition());
} else {
combat->doCombat(creature, target);
}
break;
}
case VARIANT_POSITION: {
combat->doCombat(creature, variant.pos);
break;
}
case VARIANT_TARGETPOSITION: {
if (combat->hasArea()) {
combat->doCombat(creature, variant.pos);
} else {
combat->postCombatEffects(creature, variant.pos);
g_game.addMagicEffect(variant.pos, CONST_ME_POFF);
}
break;
}
case VARIANT_STRING: {
Player* target = g_game.getPlayerByName(variant.text);
if (!target) {
pushBoolean(L, false);
return 1;
}
combat->doCombat(creature, target);
break;
}
case VARIANT_NONE: {
reportErrorFunc(getErrorDesc(LUA_ERROR_VARIANT_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
default: {
break;
}
}
pushBoolean(L, true);
return 1;
}
// Condition
int LuaScriptInterface::luaConditionCreate(lua_State* L)
{
// Condition(conditionType[, conditionId = CONDITIONID_COMBAT])
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
Condition* condition = Condition::createCondition(conditionId, conditionType, 0, 0);
if (condition) {
pushUserdata<Condition>(L, condition);
setMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionDelete(lua_State* L)
{
// condition:delete()
Condition** conditionPtr = getRawUserdata<Condition>(L, 1);
if (conditionPtr && *conditionPtr) {
delete *conditionPtr;
*conditionPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaConditionGetId(lua_State* L)
{
// condition:getId()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetSubId(lua_State* L)
{
// condition:getSubId()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getSubId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetType(lua_State* L)
{
// condition:getType()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetIcons(lua_State* L)
{
// condition:getIcons()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getIcons());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetEndTime(lua_State* L)
{
// condition:getEndTime()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getEndTime());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionClone(lua_State* L)
{
// condition:clone()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
pushUserdata<Condition>(L, condition->clone());
setMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetTicks(lua_State* L)
{
// condition:getTicks()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetTicks(lua_State* L)
{
// condition:setTicks(ticks)
int32_t ticks = getNumber<int32_t>(L, 2);
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
condition->setTicks(ticks);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetParameter(lua_State* L)
{
// condition:setParameter(key, value)
Condition* condition = getUserdata<Condition>(L, 1);
if (!condition) {
lua_pushnil(L);
return 1;
}
ConditionParam_t key = getNumber<ConditionParam_t>(L, 2);
int32_t value;
if (isBoolean(L, 3)) {
value = getBoolean(L, 3) ? 1 : 0;
} else {
value = getNumber<int32_t>(L, 3);
}
condition->setParam(key, value);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaConditionSetFormula(lua_State* L)
{
// condition:setFormula(mina, minb, maxa, maxb)
double maxb = getNumber<double>(L, 5);
double maxa = getNumber<double>(L, 4);
double minb = getNumber<double>(L, 3);
double mina = getNumber<double>(L, 2);
ConditionSpeed* condition = dynamic_cast<ConditionSpeed*>(getUserdata<Condition>(L, 1));
if (condition) {
condition->setFormulaVars(mina, minb, maxa, maxb);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetOutfit(lua_State* L)
{
// condition:setOutfit(outfit)
// condition:setOutfit(lookTypeEx, lookType, lookHead, lookBody, lookLegs, lookFeet[, lookAddons[, lookMount]])
Outfit_t outfit;
if (isTable(L, 2)) {
outfit = getOutfit(L, 2);
} else {
outfit.lookMount = getNumber<uint16_t>(L, 9, outfit.lookMount);
outfit.lookAddons = getNumber<uint8_t>(L, 8, outfit.lookAddons);
outfit.lookFeet = getNumber<uint8_t>(L, 7);
outfit.lookLegs = getNumber<uint8_t>(L, 6);
outfit.lookBody = getNumber<uint8_t>(L, 5);
outfit.lookHead = getNumber<uint8_t>(L, 4);
outfit.lookType = getNumber<uint16_t>(L, 3);
outfit.lookTypeEx = getNumber<uint16_t>(L, 2);
}
ConditionOutfit* condition = dynamic_cast<ConditionOutfit*>(getUserdata<Condition>(L, 1));
if (condition) {
condition->setOutfit(outfit);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionAddDamage(lua_State* L)
{
// condition:addDamage(rounds, time, value)
int32_t value = getNumber<int32_t>(L, 4);
int32_t time = getNumber<int32_t>(L, 3);
int32_t rounds = getNumber<int32_t>(L, 2);
ConditionDamage* condition = dynamic_cast<ConditionDamage*>(getUserdata<Condition>(L, 1));
if (condition) {
pushBoolean(L, condition->addDamage(rounds, time, value));
} else {
lua_pushnil(L);
}
return 1;
}
// MonsterType
int LuaScriptInterface::luaMonsterTypeCreate(lua_State* L)
{
// MonsterType(name)
MonsterType* monsterType = g_monsters.getMonsterType(getString(L, 2));
if (monsterType) {
pushUserdata<MonsterType>(L, monsterType);
setMetatable(L, -1, "MonsterType");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsAttackable(lua_State* L)
{
// monsterType:isAttackable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isAttackable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsConvinceable(lua_State* L)
{
// monsterType:isConvinceable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isConvinceable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsSummonable(lua_State* L)
{
// monsterType:isSummonable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isSummonable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsIllusionable(lua_State* L)
{
// monsterType:isIllusionable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isIllusionable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsHostile(lua_State* L)
{
// monsterType:isHostile()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.isHostile);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsPushable(lua_State* L)
{
// monsterType:isPushable()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.pushable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsHealthShown(lua_State* L)
{
// monsterType:isHealthShown()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, !monsterType->info.hiddenHealth);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanPushItems(lua_State* L)
{
// monsterType:canPushItems()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.canPushItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanPushCreatures(lua_State* L)
{
// monsterType:canPushCreatures()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushBoolean(L, monsterType->info.canPushCreatures);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetName(lua_State* L)
{
// monsterType:getName()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushString(L, monsterType->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetNameDescription(lua_State* L)
{
// monsterType:getNameDescription()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushString(L, monsterType->nameDescription);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetHealth(lua_State* L)
{
// monsterType:getHealth()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.health);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetMaxHealth(lua_State* L)
{
// monsterType:getMaxHealth()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.healthMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetRunHealth(lua_State* L)
{
// monsterType:getRunHealth()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.runAwayHealth);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetExperience(lua_State* L)
{
// monsterType:getExperience()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.experience);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetCombatImmunities(lua_State* L)
{
// monsterType:getCombatImmunities()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.damageImmunities);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetConditionImmunities(lua_State* L)
{
// monsterType:getConditionImmunities()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.conditionImmunities);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetAttackList(lua_State* L)
{
// monsterType:getAttackList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.attackSpells.size(), 0);
int index = 0;
for (const auto& spellBlock : monsterType->info.attackSpells) {
lua_createtable(L, 0, 8);
setField(L, "chance", spellBlock.chance);
setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0);
setField(L, "isMelee", spellBlock.isMelee ? 1 : 0);
setField(L, "minCombatValue", spellBlock.minCombatValue);
setField(L, "maxCombatValue", spellBlock.maxCombatValue);
setField(L, "range", spellBlock.range);
setField(L, "speed", spellBlock.speed);
pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell));
lua_setfield(L, -2, "spell");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetDefenseList(lua_State* L)
{
// monsterType:getDefenseList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.defenseSpells.size(), 0);
int index = 0;
for (const auto& spellBlock : monsterType->info.defenseSpells) {
lua_createtable(L, 0, 8);
setField(L, "chance", spellBlock.chance);
setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0);
setField(L, "isMelee", spellBlock.isMelee ? 1 : 0);
setField(L, "minCombatValue", spellBlock.minCombatValue);
setField(L, "maxCombatValue", spellBlock.maxCombatValue);
setField(L, "range", spellBlock.range);
setField(L, "speed", spellBlock.speed);
pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell));
lua_setfield(L, -2, "spell");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetElementList(lua_State* L)
{
// monsterType:getElementList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.elementMap.size(), 0);
for (const auto& elementEntry : monsterType->info.elementMap) {
lua_pushnumber(L, elementEntry.second);
lua_rawseti(L, -2, elementEntry.first);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetVoices(lua_State* L)
{
// monsterType:getVoices()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.voiceVector.size(), 0);
for (const auto& voiceBlock : monsterType->info.voiceVector) {
lua_createtable(L, 0, 2);
setField(L, "text", voiceBlock.text);
setField(L, "yellText", voiceBlock.yellText);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetLoot(lua_State* L)
{
// monsterType:getLoot()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
static const std::function<void(const std::vector<LootBlock>&)> parseLoot = [&](const std::vector<LootBlock>& lootList) {
lua_createtable(L, lootList.size(), 0);
int index = 0;
for (const auto& lootBlock : lootList) {
lua_createtable(L, 0, 7);
setField(L, "itemId", lootBlock.id);
setField(L, "chance", lootBlock.chance);
setField(L, "subType", lootBlock.subType);
setField(L, "maxCount", lootBlock.countmax);
setField(L, "actionId", lootBlock.actionId);
setField(L, "text", lootBlock.text);
parseLoot(lootBlock.childLoot);
lua_setfield(L, -2, "childLoot");
lua_rawseti(L, -2, ++index);
}
};
parseLoot(monsterType->info.lootItems);
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetCreatureEvents(lua_State* L)
{
// monsterType:getCreatureEvents()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.scripts.size(), 0);
for (const std::string& creatureEvent : monsterType->info.scripts) {
pushString(L, creatureEvent);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetSummonList(lua_State* L)
{
// monsterType:getSummonList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.summons.size(), 0);
for (const auto& summonBlock : monsterType->info.summons) {
lua_createtable(L, 0, 3);
setField(L, "name", summonBlock.name);
setField(L, "speed", summonBlock.speed);
setField(L, "chance", summonBlock.chance);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetMaxSummons(lua_State* L)
{
// monsterType:getMaxSummons()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.maxSummons);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetArmor(lua_State* L)
{
// monsterType:getArmor()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.armor);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetDefense(lua_State* L)
{
// monsterType:getDefense()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.defense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetOutfit(lua_State* L)
{
// monsterType:getOutfit()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
pushOutfit(L, monsterType->info.outfit);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetRace(lua_State* L)
{
// monsterType:getRace()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.race);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetCorpseId(lua_State* L)
{
// monsterType:getCorpseId()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.lookcorpse);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetManaCost(lua_State* L)
{
// monsterType:getManaCost()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.manaCost);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetBaseSpeed(lua_State* L)
{
// monsterType:getBaseSpeed()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.baseSpeed);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetLight(lua_State* L)
{
// monsterType:getLight()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, monsterType->info.light.level);
lua_pushnumber(L, monsterType->info.light.color);
return 2;
}
int LuaScriptInterface::luaMonsterTypeGetStaticAttackChance(lua_State* L)
{
// monsterType:getStaticAttackChance()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.staticAttackChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetTargetDistance(lua_State* L)
{
// monsterType:getTargetDistance()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.targetDistance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetYellChance(lua_State* L)
{
// monsterType:getYellChance()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.yellChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetYellSpeedTicks(lua_State* L)
{
// monsterType:getYellSpeedTicks()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.yellSpeedTicks);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetChangeTargetChance(lua_State* L)
{
// monsterType:getChangeTargetChance()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.changeTargetChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetChangeTargetSpeed(lua_State* L)
{
// monsterType:getChangeTargetSpeed()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
lua_pushnumber(L, monsterType->info.changeTargetSpeed);
} else {
lua_pushnil(L);
}
return 1;
}
// Party
int LuaScriptInterface::luaPartyDisband(lua_State* L)
{
// party:disband()
Party** partyPtr = getRawUserdata<Party>(L, 1);
if (partyPtr && *partyPtr) {
Party*& party = *partyPtr;
party->disband();
party = nullptr;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetLeader(lua_State* L)
{
// party:getLeader()
Party* party = getUserdata<Party>(L, 1);
if (!party) {
lua_pushnil(L);
return 1;
}
Player* leader = party->getLeader();
if (leader) {
pushUserdata<Player>(L, leader);
setMetatable(L, -1, "Player");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartySetLeader(lua_State* L)
{
// party:setLeader(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->passPartyLeadership(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetMembers(lua_State* L)
{
// party:getMembers()
Party* party = getUserdata<Party>(L, 1);
if (!party) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, party->getMemberCount(), 0);
for (Player* player : party->getMembers()) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaPartyGetMemberCount(lua_State* L)
{
// party:getMemberCount()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_pushnumber(L, party->getMemberCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetInvitees(lua_State* L)
{
// party:getInvitees()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_createtable(L, party->getInvitationCount(), 0);
int index = 0;
for (Player* player : party->getInvitees()) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetInviteeCount(lua_State* L)
{
// party:getInviteeCount()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_pushnumber(L, party->getInvitationCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyAddInvite(lua_State* L)
{
// party:addInvite(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->invitePlayer(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyRemoveInvite(lua_State* L)
{
// party:removeInvite(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->removeInvite(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyAddMember(lua_State* L)
{
// party:addMember(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->joinParty(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyRemoveMember(lua_State* L)
{
// party:removeMember(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->leaveParty(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyIsSharedExperienceActive(lua_State* L)
{
// party:isSharedExperienceActive()
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->isSharedExperienceActive());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyIsSharedExperienceEnabled(lua_State* L)
{
// party:isSharedExperienceEnabled()
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->isSharedExperienceEnabled());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyShareExperience(lua_State* L)
{
// party:shareExperience(experience)
uint64_t experience = getNumber<uint64_t>(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party) {
party->shareExperience(experience);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartySetSharedExperience(lua_State* L)
{
// party:setSharedExperience(active)
bool active = getBoolean(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->setSharedExperience(party->getLeader(), active));
} else {
lua_pushnil(L);
}
return 1;
}
// Spells
int LuaScriptInterface::luaSpellCreate(lua_State* L)
{
// Spell(words, name or id)
InstantSpell* spell = nullptr;
if (isNumber(L, 2)) {
spell = g_spells->getInstantSpellById(getNumber<uint32_t>(L, 2));
} else {
std::string stringArgument = getString(L, 2);
spell = g_spells->getInstantSpellByName(stringArgument);
if (!spell) {
spell = g_spells->getInstantSpell(stringArgument);
}
}
if (spell) {
pushInstantSpell(L, *spell);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellGetManaCost(lua_State* L)
{
// spell:getManaCost(player)
InstantSpell* spell = getUserdata<InstantSpell>(L, 1);
Player* player = getUserdata<Player>(L, 2);
if (spell && player) {
lua_pushnumber(L, spell->getManaCost(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellGetSoulCost(lua_State* L)
{
// spell:getSoulCost()
if (InstantSpell* spell = getUserdata<InstantSpell>(L, 1)) {
lua_pushnumber(L, spell->getSoulCost());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellIsPremium(lua_State* L)
{
// spell:isPremium()
if (InstantSpell* spell = getUserdata<InstantSpell>(L, 1)) {
pushBoolean(L, spell->isPremium());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellIsLearnable(lua_State* L)
{
// spell:isLearnable()
if (InstantSpell* spell = getUserdata<InstantSpell>(L, 1)) {
pushBoolean(L, spell->isLearnable());
} else {
lua_pushnil(L);
}
return 1;
}
//
LuaEnvironment::LuaEnvironment() : LuaScriptInterface("Main Interface") {}
LuaEnvironment::~LuaEnvironment()
{
delete testInterface;
closeState();
}
bool LuaEnvironment::initState()
{
luaState = luaL_newstate();
if (!luaState) {
return false;
}
luaL_openlibs(luaState);
registerFunctions();
runningEventId = EVENT_ID_USER;
return true;
}
bool LuaEnvironment::reInitState()
{
// TODO: get children, reload children
closeState();
return initState();
}
bool LuaEnvironment::closeState()
{
if (!luaState) {
return false;
}
for (const auto& combatEntry : combatIdMap) {
clearCombatObjects(combatEntry.first);
}
for (const auto& areaEntry : areaIdMap) {
clearAreaObjects(areaEntry.first);
}
for (auto& timerEntry : timerEvents) {
LuaTimerEventDesc timerEventDesc = std::move(timerEntry.second);
for (int32_t parameter : timerEventDesc.parameters) {
luaL_unref(luaState, LUA_REGISTRYINDEX, parameter);
}
luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
}
combatIdMap.clear();
areaIdMap.clear();
timerEvents.clear();
cacheFiles.clear();
lua_close(luaState);
luaState = nullptr;
return true;
}
LuaScriptInterface* LuaEnvironment::getTestInterface()
{
if (!testInterface) {
testInterface = new LuaScriptInterface("Test Interface");
testInterface->initState();
}
return testInterface;
}
Combat* LuaEnvironment::getCombatObject(uint32_t id) const
{
auto it = combatMap.find(id);
if (it == combatMap.end()) {
return nullptr;
}
return it->second;
}
Combat* LuaEnvironment::createCombatObject(LuaScriptInterface* interface)
{
Combat* combat = new Combat;
combatMap[++lastCombatId] = combat;
combatIdMap[interface].push_back(lastCombatId);
return combat;
}
void LuaEnvironment::clearCombatObjects(LuaScriptInterface* interface)
{
auto it = combatIdMap.find(interface);
if (it == combatIdMap.end()) {
return;
}
for (uint32_t id : it->second) {
auto itt = combatMap.find(id);
if (itt != combatMap.end()) {
delete itt->second;
combatMap.erase(itt);
}
}
it->second.clear();
}
AreaCombat* LuaEnvironment::getAreaObject(uint32_t id) const
{
auto it = areaMap.find(id);
if (it == areaMap.end()) {
return nullptr;
}
return it->second;
}
uint32_t LuaEnvironment::createAreaObject(LuaScriptInterface* interface)
{
areaMap[++lastAreaId] = new AreaCombat;
areaIdMap[interface].push_back(lastAreaId);
return lastAreaId;
}
void LuaEnvironment::clearAreaObjects(LuaScriptInterface* interface)
{
auto it = areaIdMap.find(interface);
if (it == areaIdMap.end()) {
return;
}
for (uint32_t id : it->second) {
auto itt = areaMap.find(id);
if (itt != areaMap.end()) {
delete itt->second;
areaMap.erase(itt);
}
}
it->second.clear();
}
void LuaEnvironment::executeTimerEvent(uint32_t eventIndex)
{
auto it = timerEvents.find(eventIndex);
if (it == timerEvents.end()) {
return;
}
LuaTimerEventDesc timerEventDesc = std::move(it->second);
timerEvents.erase(it);
//push function
lua_rawgeti(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
//push parameters
for (auto parameter : boost::adaptors::reverse(timerEventDesc.parameters)) {
lua_rawgeti(luaState, LUA_REGISTRYINDEX, parameter);
}
//call the function
if (reserveScriptEnv()) {
ScriptEnvironment* env = getScriptEnv();
env->setTimerEvent();
env->setScriptId(timerEventDesc.scriptId, this);
callFunction(timerEventDesc.parameters.size());
} else {
std::cout << "[Error - LuaScriptInterface::executeTimerEvent] Call stack overflow" << std::endl;
}
//free resources
luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
for (auto parameter : timerEventDesc.parameters) {
luaL_unref(luaState, LUA_REGISTRYINDEX, parameter);
}
}
| 1 | 14,610 | There's `pushString` to use with C++ strings. | otland-forgottenserver | cpp |
@@ -306,6 +306,13 @@ var _ = infrastructure.DatastoreDescribe("service loop prevention; with 2 nodes"
cfg.Spec.ServiceLoopPrevention = "Disabled"
})
+ // Expect to see empty cali-cidr-block chains. (Allowing time for a Felix
+ // restart.) This ensures that the cali-cidr-block chain has been cleared
+ // before we try a test ping.
+ for _, felix := range felixes {
+ Eventually(getCIDRBlockRules(felix, "iptables-save"), "8s", "0.5s").Should(BeEmpty())
+ }
+
By("test that we DO get a routing loop")
// (In order to test that the tryRoutingLoop setup is genuine.)
tryRoutingLoop(true) | 1 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build fvtests
package fv_test
import (
"context"
"regexp"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/felix/fv/containers"
"github.com/projectcalico/felix/fv/infrastructure"
"github.com/projectcalico/felix/fv/utils"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
client "github.com/projectcalico/libcalico-go/lib/clientv3"
"github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/options"
)
var _ = infrastructure.DatastoreDescribe("service loop prevention; with 2 nodes", []apiconfig.DatastoreType{apiconfig.EtcdV3, apiconfig.Kubernetes}, func(getInfra infrastructure.InfraFactory) {
var (
infra infrastructure.DatastoreInfra
felixes []*infrastructure.Felix
client client.Interface
)
BeforeEach(func() {
infra = getInfra()
options := infrastructure.DefaultTopologyOptions()
options.IPIPEnabled = false
felixes, client = infrastructure.StartNNodeTopology(2, options, infra)
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
for _, felix := range felixes {
felix.Exec("iptables-save", "-c")
felix.Exec("ipset", "list")
felix.Exec("ip", "r")
felix.Exec("ip", "a")
}
}
for _, felix := range felixes {
felix.Stop()
}
if CurrentGinkgoTestDescription().Failed {
infra.DumpErrorData()
}
infra.Stop()
})
updateFelixConfig := func(deltaFn func(*api.FelixConfiguration)) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cfg, err := client.FelixConfigurations().Get(ctx, "default", options.GetOptions{})
if _, doesNotExist := err.(errors.ErrorResourceDoesNotExist); doesNotExist {
cfg = api.NewFelixConfiguration()
cfg.Name = "default"
deltaFn(cfg)
_, err = client.FelixConfigurations().Create(ctx, cfg, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
} else {
Expect(err).NotTo(HaveOccurred())
deltaFn(cfg)
_, err = client.FelixConfigurations().Update(ctx, cfg, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
}
}
updateBGPConfig := func(deltaFn func(*api.BGPConfiguration)) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cfg, err := client.BGPConfigurations().Get(ctx, "default", options.GetOptions{})
if _, doesNotExist := err.(errors.ErrorResourceDoesNotExist); doesNotExist {
cfg = api.NewBGPConfiguration()
cfg.Name = "default"
deltaFn(cfg)
_, err = client.BGPConfigurations().Create(ctx, cfg, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
} else {
Expect(err).NotTo(HaveOccurred())
deltaFn(cfg)
_, err = client.BGPConfigurations().Update(ctx, cfg, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
}
}
getCIDRBlockRules := func(felix *infrastructure.Felix, saveCommand string) func() []string {
return func() []string {
out, err := felix.ExecOutput(saveCommand, "-t", "filter")
Expect(err).NotTo(HaveOccurred())
var cidrBlockLines []string
for _, line := range strings.Split(out, "\n") {
if strings.Contains(line, "-A cali-cidr-block") {
cidrBlockLines = append(cidrBlockLines, line)
}
}
return cidrBlockLines
}
}
tryRoutingLoop := func(expectLoop bool) {
// Run containers to model a default gateway, and an external client connecting to
// services within the cluster via that gateway.
externalGW := containers.Run("external-gw",
containers.RunOpts{AutoRemove: true},
"--privileged", // So that we can add routes inside the container.
utils.Config.BusyboxImage,
"/bin/sh", "-c", "sleep 1000")
defer externalGW.Stop()
externalClient := containers.Run("external-client",
containers.RunOpts{AutoRemove: true},
"--privileged", // So that we can add routes inside the container.
utils.Config.BusyboxImage,
"/bin/sh", "-c", "sleep 1000")
defer externalClient.Stop()
// Add a service CIDR route in those containers, similar to the routes that they
// would have via BGP per our service advertisement feature. (This should really be
// an ECMP route to both Felixes, but busybox's ip can't program ECMP routes, and a
// non-ECMP route is sufficient to demonstrate the looping issue.)
externalClient.Exec("ip", "r", "a", "10.96.0.0/17", "via", externalGW.IP)
externalGW.Exec("ip", "r", "a", "10.96.0.0/17", "via", felixes[0].IP)
// Configure the external gateway client to forward, in order to create the
// conditions for looping.
externalClient.Exec("sysctl", "-w", "net.ipv4.ip_forward=1")
externalGW.Exec("sysctl", "-w", "net.ipv4.ip_forward=1")
// Also tell Felix to route that CIDR to the external gateway.
felixes[0].ExecMayFail("ip", "r", "d", "10.96.0.0/17")
felixes[0].Exec("ip", "r", "a", "10.96.0.0/17", "via", externalGW.IP)
felixes[0].Exec("iptables", "-P", "FORWARD", "ACCEPT")
// Start monitoring all packets, on the Felix, to or from a specific (but
// unused) service IP.
tcpdumpF := felixes[0].AttachTCPDump("eth0")
tcpdumpF.AddMatcher("serviceIPPackets", regexp.MustCompile("10\\.96\\.0\\.19"))
tcpdumpF.Start()
defer tcpdumpF.Stop()
// Send a single ping from the external client to the unused service IP.
err := externalClient.ExecMayFail("ping", "-c", "1", "-W", "1", "10.96.0.19")
Expect(err).To(HaveOccurred())
countServiceIPPackets := func() int {
// Return the number of packets observed to/from 10.96.0.19.
return tcpdumpF.MatchCount("serviceIPPackets")
}
if expectLoop {
// Tcpdump should see more than 2 packets, because of looping. Note: 2
// packets would be Felix receiving the ping and then forwarding it out
// again. I want to check here that it's also looped around again by the
// gateway, resulting in MORE THAN 2 packets.
Eventually(countServiceIPPackets).Should(BeNumerically(">", 2))
} else {
// Tcpdump should see just 1 packet, the request, with no response (because
// we DROP) and no looping.
Eventually(countServiceIPPackets).Should(BeNumerically("==", 1))
}
}
It("programs iptables as expected to block service routing loops", func() {
By("configuring service cluster IPs")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceClusterIPs = []api.ServiceClusterIPBlock{
{
CIDR: "10.96.0.0/17",
},
{
CIDR: "fd5f::/119",
},
}
})
// Default ServiceLoopPrevention is Drop, so expect to see rules in cali-cidr-block
// chains with DROP. (Felix handles BGPConfiguration without restarting, so this
// should be quick.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save")).Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d 10\\.96\\.0\\.0/17 .* -j DROP"),
))
Eventually(getCIDRBlockRules(felix, "ip6tables-save")).Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d fd5f::/119 .* -j DROP"),
))
}
By("test that we don't get a routing loop")
tryRoutingLoop(false)
By("configuring ServiceLoopPrevention=Reject")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Reject"
})
// Expect to see rules in cali-cidr-block chains with REJECT. (Allowing time for a
// Felix restart.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save"), "8s", "0.5s").Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d 10\\.96\\.0\\.0/17 .* -j REJECT"),
))
Eventually(getCIDRBlockRules(felix, "ip6tables-save"), "8s", "0.5s").Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d fd5f::/119 .* -j REJECT"),
))
}
By("configuring ServiceLoopPrevention=Disabled")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Disabled"
})
// Expect to see empty cali-cidr-block chains. (Allowing time for a Felix restart.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save"), "8s", "0.5s").Should(BeEmpty())
Eventually(getCIDRBlockRules(felix, "ip6tables-save"), "8s", "0.5s").Should(BeEmpty())
}
By("test that we DO get a routing loop")
// (In order to test that the tryRoutingLoop setup is genuine.)
tryRoutingLoop(true)
By("configuring ServiceLoopPrevention=Drop")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Drop"
})
// Expect to see rules in cali-cidr-block chains with DROP. (Allowing time for a
// Felix restart.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save"), "8s", "0.5s").Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d 10\\.96\\.0\\.0/17 .* -j DROP"),
))
Eventually(getCIDRBlockRules(felix, "ip6tables-save"), "8s", "0.5s").Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d fd5f::/119 .* -j DROP"),
))
}
By("updating the service CIDRs")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceClusterIPs = []api.ServiceClusterIPBlock{
{
CIDR: "1.1.0.0/16",
},
{
CIDR: "fd5e::/119",
},
}
})
// Expect to see rules in cali-cidr-block chains with DROP and the updated CIDRs.
// (BGPConfiguration change is handled without needing a restart.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save")).Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d 1\\.1\\.0\\.0/16 .* -j DROP"),
))
Eventually(getCIDRBlockRules(felix, "ip6tables-save")).Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d fd5e::/119 .* -j DROP"),
))
}
By("resetting BGP config")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceClusterIPs = nil
})
})
It("ServiceExternalIPs also blocks service routing loop", func() {
By("configuring service external IPs")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceExternalIPs = []api.ServiceExternalIPBlock{
{
CIDR: "10.96.0.0/17",
},
}
})
By("test that we don't get a routing loop")
tryRoutingLoop(false)
By("configuring ServiceLoopPrevention=Disabled")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Disabled"
})
By("test that we DO get a routing loop")
// (In order to test that the tryRoutingLoop setup is genuine.)
tryRoutingLoop(true)
By("resetting BGP config")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceExternalIPs = nil
})
})
It("ServiceLoadBalancerIPs also blocks service routing loop", func() {
By("configuring service LB IPs")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceLoadBalancerIPs = []api.ServiceLoadBalancerIPBlock{
{
CIDR: "10.96.0.0/17",
},
}
})
By("test that we don't get a routing loop")
tryRoutingLoop(false)
By("configuring ServiceLoopPrevention=Disabled")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Disabled"
})
By("test that we DO get a routing loop")
// (In order to test that the tryRoutingLoop setup is genuine.)
tryRoutingLoop(true)
By("resetting BGP config")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceLoadBalancerIPs = nil
})
})
})
| 1 | 19,564 | qq: Should this include the iptables6-save sim. to the inverse checks above? | projectcalico-felix | go |
@@ -622,7 +622,11 @@ bool RTPSParticipantImpl::create_writer(
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
m_allWriterList.push_back(SWriter);
- if (!is_builtin)
+ if (is_builtin)
+ {
+ async_thread().wake_up(SWriter);
+ }
+ else
{
m_userWriterList.push_back(SWriter);
} | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file RTPSParticipant.cpp
*
*/
#include <rtps/participant/RTPSParticipantImpl.h>
#include <rtps/flowcontrol/ThroughputController.h>
#include <rtps/persistence/PersistenceService.h>
#include <rtps/history/BasicPayloadPool.hpp>
#include <fastdds/rtps/messages/MessageReceiver.h>
#include <fastdds/rtps/history/WriterHistory.h>
#include <fastdds/rtps/writer/StatelessWriter.h>
#include <fastdds/rtps/writer/StatefulWriter.h>
#include <fastdds/rtps/writer/StatelessPersistentWriter.h>
#include <fastdds/rtps/writer/StatefulPersistentWriter.h>
#include <fastdds/rtps/reader/StatelessReader.h>
#include <fastdds/rtps/reader/StatefulReader.h>
#include <fastdds/rtps/reader/StatelessPersistentReader.h>
#include <fastdds/rtps/reader/StatefulPersistentReader.h>
#include <fastdds/rtps/participant/RTPSParticipant.h>
#include <fastdds/rtps/transport/UDPv4TransportDescriptor.h>
#include <fastdds/rtps/transport/TCPv4TransportDescriptor.h>
#include <fastdds/rtps/transport/TCPv6TransportDescriptor.h>
#include <fastdds/rtps/transport/shared_mem/SharedMemTransportDescriptor.h>
#include <fastdds/rtps/RTPSDomain.h>
#include <fastdds/rtps/builtin/BuiltinProtocols.h>
#include <fastdds/rtps/builtin/discovery/participant/PDPSimple.h>
#include <fastdds/rtps/builtin/data/ParticipantProxyData.h>
#include <fastdds/rtps/builtin/liveliness/WLP.h>
#include <fastrtps/utils/IPFinder.h>
#include <fastrtps/utils/Semaphore.h>
#include <fastrtps/utils/System.h>
#include <mutex>
#include <functional>
#include <algorithm>
#include <fastdds/dds/log/Log.hpp>
#include <fastrtps/xmlparser/XMLProfileManager.h>
namespace eprosima {
namespace fastrtps {
namespace rtps {
using UDPv4TransportDescriptor = fastdds::rtps::UDPv4TransportDescriptor;
using TCPTransportDescriptor = fastdds::rtps::TCPTransportDescriptor;
using SharedMemTransportDescriptor = fastdds::rtps::SharedMemTransportDescriptor;
static EntityId_t TrustedWriter(
const EntityId_t& reader)
{
return
(reader == c_EntityId_SPDPReader) ? c_EntityId_SPDPWriter :
(reader == c_EntityId_SEDPPubReader) ? c_EntityId_SEDPPubWriter :
(reader == c_EntityId_SEDPSubReader) ? c_EntityId_SEDPSubWriter :
(reader == c_EntityId_ReaderLiveliness) ? c_EntityId_WriterLiveliness :
c_EntityId_Unknown;
}
static bool should_be_intraprocess_only(
const RTPSParticipantAttributes& att)
{
return
xmlparser::XMLProfileManager::library_settings().intraprocess_delivery == INTRAPROCESS_FULL &&
att.builtin.discovery_config.ignoreParticipantFlags ==
(ParticipantFilteringFlags::FILTER_DIFFERENT_HOST | ParticipantFilteringFlags::FILTER_DIFFERENT_PROCESS);
}
Locator_t& RTPSParticipantImpl::applyLocatorAdaptRule(
Locator_t& loc)
{
// This is a completely made up rule
// It is transport responsability to interpret this new port.
loc.port += m_att.port.participantIDGain;
return loc;
}
RTPSParticipantImpl::RTPSParticipantImpl(
uint32_t domain_id,
const RTPSParticipantAttributes& PParam,
const GuidPrefix_t& guidP,
const GuidPrefix_t& persistence_guid,
RTPSParticipant* par,
RTPSParticipantListener* plisten)
: domain_id_(domain_id)
, m_att(PParam)
, m_guid(guidP, c_EntityId_RTPSParticipant)
, m_persistence_guid(persistence_guid, c_EntityId_RTPSParticipant)
, mp_builtinProtocols(nullptr)
, mp_ResourceSemaphore(new Semaphore(0))
, IdCounter(0)
, type_check_fn_(nullptr)
#if HAVE_SECURITY
, m_security_manager(this)
#endif // if HAVE_SECURITY
, mp_participantListener(plisten)
, mp_userParticipant(par)
, mp_mutex(new std::recursive_mutex())
, is_intraprocess_only_(should_be_intraprocess_only(PParam))
, has_shm_transport_(false)
{
// Builtin transports by default
if (PParam.useBuiltinTransports)
{
UDPv4TransportDescriptor descriptor;
descriptor.sendBufferSize = m_att.sendSocketBufferSize;
descriptor.receiveBufferSize = m_att.listenSocketBufferSize;
m_network_Factory.RegisterTransport(&descriptor);
#ifdef SHM_TRANSPORT_BUILTIN
SharedMemTransportDescriptor shm_transport;
// We assume (Linux) UDP doubles the user socket buffer size in kernel, so
// the equivalent segment size in SHM would be socket buffer size x 2
auto segment_size_udp_equivalent =
std::max(m_att.sendSocketBufferSize, m_att.listenSocketBufferSize) * 2;
shm_transport.segment_size(segment_size_udp_equivalent);
// Use same default max_message_size on both UDP and SHM
shm_transport.max_message_size(descriptor.max_message_size());
has_shm_transport_ |= m_network_Factory.RegisterTransport(&shm_transport);
#endif // ifdef SHM_TRANSPORT_BUILTIN
}
// BACKUP servers guid is its persistence one
if (PParam.builtin.discovery_config.discoveryProtocol == DiscoveryProtocol::BACKUP)
{
m_persistence_guid = m_guid;
}
// Client-server discovery protocol requires that every TCP transport has a listening port
switch (PParam.builtin.discovery_config.discoveryProtocol)
{
case DiscoveryProtocol::BACKUP:
case DiscoveryProtocol::CLIENT:
case DiscoveryProtocol::SERVER:
// Verify if listening ports are provided
for (auto& transportDescriptor : PParam.userTransports)
{
TCPTransportDescriptor* pT = dynamic_cast<TCPTransportDescriptor*>(transportDescriptor.get());
if (pT && pT->listening_ports.empty())
{
logError(RTPS_PARTICIPANT,
"Participant " << m_att.getName() << " with GUID " << m_guid
<< " tries to use discovery server over TCP without providing a proper listening port");
}
}
default:
break;
}
// User defined transports
for (const auto& transportDescriptor : PParam.userTransports)
{
if (m_network_Factory.RegisterTransport(transportDescriptor.get()))
{
has_shm_transport_ |=
(dynamic_cast<fastdds::rtps::SharedMemTransportDescriptor*>(transportDescriptor.get()) != nullptr);
}
else
{
// SHM transport could be disabled
if ((dynamic_cast<fastdds::rtps::SharedMemTransportDescriptor*>(transportDescriptor.get()) != nullptr))
{
logError(RTPS_PARTICIPANT,
"Unable to Register SHM Transport. SHM Transport is not supported in"
" the current platform.");
}
else
{
logError(RTPS_PARTICIPANT,
"User transport failed to register.");
}
}
}
mp_userParticipant->mp_impl = this;
mp_event_thr.init_thread();
if (!networkFactoryHasRegisteredTransports())
{
return;
}
// Throughput controller, if the descriptor has valid values
if (PParam.throughputController.bytesPerPeriod != UINT32_MAX && PParam.throughputController.periodMillisecs != 0)
{
std::unique_ptr<FlowController> controller(new ThroughputController(PParam.throughputController, this));
m_controllers.push_back(std::move(controller));
}
/* If metatrafficMulticastLocatorList is empty, add mandatory default Locators
Else -> Take them */
// Creation of metatraffic locator and receiver resources
uint32_t metatraffic_multicast_port = m_att.port.getMulticastPort(domain_id_);
uint32_t metatraffic_unicast_port = m_att.port.getUnicastPort(domain_id_,
static_cast<uint32_t>(m_att.participantID));
/* INSERT DEFAULT MANDATORY MULTICAST LOCATORS HERE */
if (m_att.builtin.metatrafficMulticastLocatorList.empty() && m_att.builtin.metatrafficUnicastLocatorList.empty())
{
m_network_Factory.getDefaultMetatrafficMulticastLocators(m_att.builtin.metatrafficMulticastLocatorList,
metatraffic_multicast_port);
m_network_Factory.NormalizeLocators(m_att.builtin.metatrafficMulticastLocatorList);
m_network_Factory.getDefaultMetatrafficUnicastLocators(m_att.builtin.metatrafficUnicastLocatorList,
metatraffic_unicast_port);
m_network_Factory.NormalizeLocators(m_att.builtin.metatrafficUnicastLocatorList);
}
else
{
std::for_each(m_att.builtin.metatrafficMulticastLocatorList.begin(),
m_att.builtin.metatrafficMulticastLocatorList.end(), [&](Locator_t& locator)
{
m_network_Factory.fillMetatrafficMulticastLocator(locator, metatraffic_multicast_port);
});
m_network_Factory.NormalizeLocators(m_att.builtin.metatrafficMulticastLocatorList);
std::for_each(m_att.builtin.metatrafficUnicastLocatorList.begin(),
m_att.builtin.metatrafficUnicastLocatorList.end(), [&](Locator_t& locator)
{
m_network_Factory.fillMetatrafficUnicastLocator(locator, metatraffic_unicast_port);
});
m_network_Factory.NormalizeLocators(m_att.builtin.metatrafficUnicastLocatorList);
}
// Initial peers
if (m_att.builtin.initialPeersList.empty())
{
m_att.builtin.initialPeersList = m_att.builtin.metatrafficMulticastLocatorList;
}
else
{
LocatorList_t initial_peers;
initial_peers.swap(m_att.builtin.initialPeersList);
std::for_each(initial_peers.begin(), initial_peers.end(),
[&](Locator_t& locator)
{
m_network_Factory.configureInitialPeerLocator(domain_id_, locator, m_att);
});
}
// Creation of user locator and receiver resources
bool hasLocatorsDefined = true;
//If no default locators are defined we define some.
/* The reasoning here is the following.
If the parameters of the RTPS Participant don't hold default listening locators for the creation
of Endpoints, we make some for Unicast only.
If there is at least one listen locator of any kind, we do not create any default ones.
If there are no sending locators defined, we create default ones for the transports we implement.
*/
if (m_att.defaultUnicastLocatorList.empty() && m_att.defaultMulticastLocatorList.empty())
{
//Default Unicast Locators in case they have not been provided
/* INSERT DEFAULT UNICAST LOCATORS FOR THE PARTICIPANT */
hasLocatorsDefined = false;
m_network_Factory.getDefaultUnicastLocators(domain_id_, m_att.defaultUnicastLocatorList, m_att);
}
else
{
// Locator with port 0, calculate port.
std::for_each(m_att.defaultUnicastLocatorList.begin(), m_att.defaultUnicastLocatorList.end(),
[&](Locator_t& loc)
{
m_network_Factory.fill_default_locator_port(domain_id_, loc, m_att, false);
});
}
// Normalize unicast locators.
m_network_Factory.NormalizeLocators(m_att.defaultUnicastLocatorList);
if (!hasLocatorsDefined)
{
logInfo(RTPS_PARTICIPANT, m_att.getName() << " Created with NO default Unicast Locator List, adding Locators:"
<< m_att.defaultUnicastLocatorList);
}
#if HAVE_SECURITY
// Start security
// TODO(Ricardo) Get returned value in future.
m_security_manager_initialized = m_security_manager.init(security_attributes_, PParam.properties,
m_is_security_active);
if (!m_security_manager_initialized)
{
// Participant will be deleted, no need to allocate buffers or create builtin endpoints
return;
}
#endif // if HAVE_SECURITY
if (is_intraprocess_only())
{
m_att.builtin.metatrafficUnicastLocatorList.clear();
m_att.defaultUnicastLocatorList.clear();
m_att.defaultMulticastLocatorList.clear();
}
createReceiverResources(m_att.builtin.metatrafficMulticastLocatorList, true, false);
createReceiverResources(m_att.builtin.metatrafficUnicastLocatorList, true, false);
createReceiverResources(m_att.defaultUnicastLocatorList, true, false);
createReceiverResources(m_att.defaultMulticastLocatorList, true, false);
bool allow_growing_buffers = m_att.allocation.send_buffers.dynamic;
size_t num_send_buffers = m_att.allocation.send_buffers.preallocated_number;
if (num_send_buffers == 0)
{
// Three buffers (user, events and async writer threads)
num_send_buffers = 3;
// Add one buffer per reception thread
num_send_buffers += m_receiverResourcelist.size();
}
// Create buffer pool
send_buffers_.reset(new SendBuffersManager(num_send_buffers, allow_growing_buffers));
send_buffers_->init(this);
#if HAVE_SECURITY
if (m_is_security_active)
{
m_is_security_active = m_security_manager.create_entities();
if (!m_is_security_active)
{
// Participant will be deleted, no need to create builtin endpoints
m_security_manager_initialized = false;
return;
}
}
#endif // if HAVE_SECURITY
mp_builtinProtocols = new BuiltinProtocols();
logInfo(RTPS_PARTICIPANT, "RTPSParticipant \"" << m_att.getName() << "\" with guidPrefix: " << m_guid.guidPrefix);
}
RTPSParticipantImpl::RTPSParticipantImpl(
uint32_t domain_id,
const RTPSParticipantAttributes& PParam,
const GuidPrefix_t& guidP,
RTPSParticipant* par,
RTPSParticipantListener* plisten)
: RTPSParticipantImpl(domain_id, PParam, guidP, c_GuidPrefix_Unknown, par, plisten)
{
}
void RTPSParticipantImpl::enable()
{
// Start builtin protocols
if (!mp_builtinProtocols->initBuiltinProtocols(this, m_att.builtin))
{
logError(RTPS_PARTICIPANT, "The builtin protocols were not correctly initialized");
}
//Start reception
for (auto& receiver : m_receiverResourcelist)
{
receiver.Receiver->RegisterReceiver(receiver.mp_receiver);
}
}
void RTPSParticipantImpl::disable()
{
// Ensure that other participants will not accidentally discover this one
if (mp_builtinProtocols && mp_builtinProtocols->mp_PDP)
{
mp_builtinProtocols->stopRTPSParticipantAnnouncement();
}
// Disable Retries on Transports
m_network_Factory.Shutdown();
// Safely abort threads.
for (auto& block : m_receiverResourcelist)
{
block.Receiver->UnregisterReceiver(block.mp_receiver);
block.disable();
}
while (m_userReaderList.size() > 0)
{
deleteUserEndpoint(static_cast<Endpoint*>(*m_userReaderList.begin()));
}
while (m_userWriterList.size() > 0)
{
deleteUserEndpoint(static_cast<Endpoint*>(*m_userWriterList.begin()));
}
delete(mp_builtinProtocols);
mp_builtinProtocols = nullptr;
}
const std::vector<RTPSWriter*>& RTPSParticipantImpl::getAllWriters() const
{
return m_allWriterList;
}
const std::vector<RTPSReader*>& RTPSParticipantImpl::getAllReaders() const
{
return m_allReaderList;
}
RTPSParticipantImpl::~RTPSParticipantImpl()
{
disable();
#if HAVE_SECURITY
m_security_manager.destroy();
#endif // if HAVE_SECURITY
// Destruct message receivers
for (auto& block : m_receiverResourcelist)
{
delete block.mp_receiver;
}
m_receiverResourcelist.clear();
delete mp_ResourceSemaphore;
delete mp_userParticipant;
send_resource_list_.clear();
delete mp_mutex;
}
template <EndpointKind_t kind, octet no_key, octet with_key>
bool RTPSParticipantImpl::preprocess_endpoint_attributes(
const char* debug_label,
const EntityId_t& entity_id,
EndpointAttributes& att,
EntityId_t& entId)
{
if (!att.unicastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT, "Unicast Locator List for " << debug_label << " contains invalid Locator");
return false;
}
if (!att.multicastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT, "Multicast Locator List for " << debug_label << " contains invalid Locator");
return false;
}
if (!att.remoteLocatorList.isValid())
{
logError(RTPS_PARTICIPANT, "Remote Locator List for " << debug_label << " contains invalid Locator");
return false;
}
if (entity_id == c_EntityId_Unknown)
{
if (att.topicKind == NO_KEY)
{
entId.value[3] = no_key;
}
else if (att.topicKind == WITH_KEY)
{
entId.value[3] = with_key;
}
uint32_t idnum;
if (att.getEntityID() > 0)
{
idnum = static_cast<uint32_t>(att.getEntityID());
}
else
{
IdCounter++;
idnum = IdCounter;
}
octet* c = reinterpret_cast<octet*>(&idnum);
entId.value[2] = c[0];
entId.value[1] = c[1];
entId.value[0] = c[2];
if (this->existsEntityId(entId, kind))
{
logError(RTPS_PARTICIPANT,
"A " << debug_label << " with the same entityId already exists in this RTPSParticipant");
return false;
}
}
else
{
entId = entity_id;
}
if (att.persistence_guid == c_Guid_Unknown)
{
// Try to load persistence_guid from property
const std::string* persistence_guid_property = PropertyPolicyHelper::find_property(
att.properties, "dds.persistence.guid");
if (persistence_guid_property != nullptr)
{
// Load persistence_guid from property
std::istringstream(persistence_guid_property->c_str()) >> att.persistence_guid;
if (att.persistence_guid == c_Guid_Unknown)
{
// Wrongly configured property
logError(RTPS_PARTICIPANT, "Cannot configure " << debug_label << "'s persistence GUID from '"
<< persistence_guid_property->c_str()
<< "'. Wrong input");
return false;
}
}
}
return true;
}
template<typename Functor>
bool RTPSParticipantImpl::create_writer(
RTPSWriter** writer_out,
WriterAttributes& param,
const EntityId_t& entity_id,
bool is_builtin,
const Functor& callback)
{
std::string type = (param.endpoint.reliabilityKind == RELIABLE) ? "RELIABLE" : "BEST_EFFORT";
logInfo(RTPS_PARTICIPANT, "Creating writer of type " << type);
EntityId_t entId;
if (!preprocess_endpoint_attributes<WRITER, 0x03, 0x02>("writer", entity_id, param.endpoint, entId))
{
return false;
}
if (((param.throughputController.bytesPerPeriod != UINT32_MAX && param.throughputController.periodMillisecs != 0) ||
(m_att.throughputController.bytesPerPeriod != UINT32_MAX &&
m_att.throughputController.periodMillisecs != 0))
&& param.mode != ASYNCHRONOUS_WRITER)
{
logError(RTPS_PARTICIPANT,
"Writer has to be configured to publish asynchronously, because a flowcontroller was configured");
return false;
}
// Special case for DiscoveryProtocol::BACKUP, which abuses persistence guid
GUID_t former_persistence_guid = param.endpoint.persistence_guid;
if (param.endpoint.persistence_guid == c_Guid_Unknown)
{
if (m_persistence_guid != c_Guid_Unknown)
{
// Generate persistence guid from participant persistence guid
param.endpoint.persistence_guid = GUID_t(
m_persistence_guid.guidPrefix,
entity_id);
}
}
// Get persistence service
IPersistenceService* persistence = nullptr;
if (!get_persistence_service("writer", is_builtin, param.endpoint, persistence))
{
return false;
}
normalize_endpoint_locators(param.endpoint);
RTPSWriter* SWriter = nullptr;
GUID_t guid(m_guid.guidPrefix, entId);
SWriter = callback(guid, param, persistence, param.endpoint.reliabilityKind == RELIABLE);
// restore attributes
param.endpoint.persistence_guid = former_persistence_guid;
if (SWriter == nullptr)
{
return false;
}
#if HAVE_SECURITY
if (!is_builtin)
{
if (!m_security_manager.register_local_writer(SWriter->getGuid(),
param.endpoint.properties, SWriter->getAttributes().security_attributes()))
{
delete(SWriter);
return false;
}
}
else
{
if (!m_security_manager.register_local_builtin_writer(SWriter->getGuid(),
SWriter->getAttributes().security_attributes()))
{
delete(SWriter);
return false;
}
}
#endif // if HAVE_SECURITY
createSendResources(SWriter);
if (param.endpoint.reliabilityKind == RELIABLE)
{
if (!createAndAssociateReceiverswithEndpoint(SWriter))
{
delete(SWriter);
return false;
}
}
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
m_allWriterList.push_back(SWriter);
if (!is_builtin)
{
m_userWriterList.push_back(SWriter);
}
*writer_out = SWriter;
// If the terminal throughput controller has proper user defined values, instantiate it
if (param.throughputController.bytesPerPeriod != UINT32_MAX && param.throughputController.periodMillisecs != 0)
{
std::unique_ptr<FlowController> controller(new ThroughputController(param.throughputController, SWriter));
SWriter->add_flow_controller(std::move(controller));
}
return true;
}
template <typename Functor>
bool RTPSParticipantImpl::create_reader(
RTPSReader** reader_out,
ReaderAttributes& param,
const EntityId_t& entity_id,
bool is_builtin,
bool enable,
const Functor& callback)
{
std::string type = (param.endpoint.reliabilityKind == RELIABLE) ? "RELIABLE" : "BEST_EFFORT";
logInfo(RTPS_PARTICIPANT, "Creating reader of type " << type);
EntityId_t entId;
if (!preprocess_endpoint_attributes<READER, 0x04, 0x07>("reader", entity_id, param.endpoint, entId))
{
return false;
}
// Get persistence service
IPersistenceService* persistence = nullptr;
if (!get_persistence_service("reader", is_builtin, param.endpoint, persistence))
{
return false;
}
normalize_endpoint_locators(param.endpoint);
RTPSReader* SReader = nullptr;
GUID_t guid(m_guid.guidPrefix, entId);
SReader = callback(guid, param, persistence, param.endpoint.reliabilityKind == RELIABLE);
if (SReader == nullptr)
{
return false;
}
#if HAVE_SECURITY
if (!is_builtin)
{
if (!m_security_manager.register_local_reader(SReader->getGuid(),
param.endpoint.properties, SReader->getAttributes().security_attributes()))
{
delete(SReader);
return false;
}
}
else
{
if (!m_security_manager.register_local_builtin_reader(SReader->getGuid(),
SReader->getAttributes().security_attributes()))
{
delete(SReader);
return false;
}
}
#endif // if HAVE_SECURITY
if (param.endpoint.reliabilityKind == RELIABLE)
{
createSendResources(SReader);
}
if (is_builtin)
{
SReader->setTrustedWriter(TrustedWriter(SReader->getGuid().entityId));
}
if (enable)
{
if (!createAndAssociateReceiverswithEndpoint(SReader))
{
delete(SReader);
return false;
}
}
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
m_allReaderList.push_back(SReader);
if (!is_builtin)
{
m_userReaderList.push_back(SReader);
}
*reader_out = SReader;
return true;
}
/*
*
* MAIN RTPSParticipant IMPL API
*
*/
bool RTPSParticipantImpl::createWriter(
RTPSWriter** WriterOut,
WriterAttributes& param,
WriterHistory* hist,
WriterListener* listen,
const EntityId_t& entityId,
bool isBuiltin)
{
auto callback = [hist, listen, this]
(const GUID_t& guid, WriterAttributes& param, IPersistenceService* persistence,
bool is_reliable) -> RTPSWriter*
{
if (is_reliable)
{
if (persistence != nullptr)
{
return new StatefulPersistentWriter(this, guid, param, hist, listen, persistence);
}
else
{
return new StatefulWriter(this, guid, param, hist, listen);
}
}
else
{
if (persistence != nullptr)
{
return new StatelessPersistentWriter(this, guid, param, hist, listen, persistence);
}
else
{
return new StatelessWriter(this, guid, param, hist, listen);
}
}
};
return create_writer(WriterOut, param, entityId, isBuiltin, callback);
}
bool RTPSParticipantImpl::createWriter(
RTPSWriter** WriterOut,
WriterAttributes& param,
const std::shared_ptr<IPayloadPool>& payload_pool,
WriterHistory* hist,
WriterListener* listen,
const EntityId_t& entityId,
bool isBuiltin)
{
if (!payload_pool)
{
logError(RTPS_PARTICIPANT, "Trying to create writer with null payload pool");
return false;
}
auto callback = [hist, listen, &payload_pool, this]
(const GUID_t& guid, WriterAttributes& param, IPersistenceService* persistence,
bool is_reliable) -> RTPSWriter*
{
if (is_reliable)
{
if (persistence != nullptr)
{
return new StatefulPersistentWriter(this, guid, param, payload_pool, hist, listen, persistence);
}
else
{
return new StatefulWriter(this, guid, param, payload_pool, hist, listen);
}
}
else
{
if (persistence != nullptr)
{
return new StatelessPersistentWriter(this, guid, param, payload_pool, hist, listen,
persistence);
}
else
{
return new StatelessWriter(this, guid, param, payload_pool, hist, listen);
}
}
};
return create_writer(WriterOut, param, entityId, isBuiltin, callback);
}
bool RTPSParticipantImpl::createReader(
RTPSReader** ReaderOut,
ReaderAttributes& param,
ReaderHistory* hist,
ReaderListener* listen,
const EntityId_t& entityId,
bool isBuiltin,
bool enable)
{
auto callback = [hist, listen, this]
(const GUID_t& guid, ReaderAttributes& param, IPersistenceService* persistence,
bool is_reliable) -> RTPSReader*
{
if (is_reliable)
{
if (persistence != nullptr)
{
return new StatefulPersistentReader(this, guid, param, hist, listen, persistence);
}
else
{
return new StatefulReader(this, guid, param, hist, listen);
}
}
else
{
if (persistence != nullptr)
{
return new StatelessPersistentReader(this, guid, param, hist, listen, persistence);
}
else
{
return new StatelessReader(this, guid, param, hist, listen);
}
}
};
return create_reader(ReaderOut, param, entityId, isBuiltin, enable, callback);
}
bool RTPSParticipantImpl::createReader(
RTPSReader** ReaderOut,
ReaderAttributes& param,
const std::shared_ptr<IPayloadPool>& payload_pool,
ReaderHistory* hist,
ReaderListener* listen,
const EntityId_t& entityId,
bool isBuiltin,
bool enable)
{
if (!payload_pool)
{
logError(RTPS_PARTICIPANT, "Trying to create reader with null payload pool");
return false;
}
auto callback = [hist, listen, &payload_pool, this]
(const GUID_t& guid, ReaderAttributes& param, IPersistenceService* persistence,
bool is_reliable) -> RTPSReader*
{
if (is_reliable)
{
if (persistence != nullptr)
{
return new StatefulPersistentReader(this, guid, param, payload_pool, hist, listen, persistence);
}
else
{
return new StatefulReader(this, guid, param, payload_pool, hist, listen);
}
}
else
{
if (persistence != nullptr)
{
return new StatelessPersistentReader(this, guid, param, payload_pool, hist, listen,
persistence);
}
else
{
return new StatelessReader(this, guid, param, payload_pool, hist, listen);
}
}
};
return create_reader(ReaderOut, param, entityId, isBuiltin, enable, callback);
}
RTPSReader* RTPSParticipantImpl::find_local_reader(
const GUID_t& reader_guid)
{
// As this is only called from RTPSDomainImpl::find_local_reader, and it has
// the domain mutex taken, there is no need to take the participant mutex
// std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for (auto reader : m_allReaderList)
{
if (reader->getGuid() == reader_guid)
{
return reader;
}
}
return nullptr;
}
RTPSWriter* RTPSParticipantImpl::find_local_writer(
const GUID_t& writer_guid)
{
// As this is only called from RTPSDomainImpl::find_local_reader, and it has
// the domain mutex taken, there is no need to take the participant mutex
// std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for (auto writer : m_allWriterList)
{
if (writer->getGuid() == writer_guid)
{
return writer;
}
}
return nullptr;
}
bool RTPSParticipantImpl::enableReader(
RTPSReader* reader)
{
if (!assignEndpointListenResources(reader))
{
return false;
}
return true;
}
// Avoid to receive PDPSimple reader a DATA while calling ~PDPSimple and EDP was destroy already.
void RTPSParticipantImpl::disableReader(
RTPSReader* reader)
{
m_receiverResourcelistMutex.lock();
for (auto it = m_receiverResourcelist.begin(); it != m_receiverResourcelist.end(); ++it)
{
it->mp_receiver->removeEndpoint(reader);
}
m_receiverResourcelistMutex.unlock();
}
bool RTPSParticipantImpl::registerWriter(
RTPSWriter* Writer,
const TopicAttributes& topicAtt,
const WriterQos& wqos)
{
return this->mp_builtinProtocols->addLocalWriter(Writer, topicAtt, wqos);
}
bool RTPSParticipantImpl::registerReader(
RTPSReader* reader,
const TopicAttributes& topicAtt,
const ReaderQos& rqos)
{
return this->mp_builtinProtocols->addLocalReader(reader, topicAtt, rqos);
}
bool RTPSParticipantImpl::updateLocalWriter(
RTPSWriter* Writer,
const TopicAttributes& topicAtt,
const WriterQos& wqos)
{
return this->mp_builtinProtocols->updateLocalWriter(Writer, topicAtt, wqos);
}
bool RTPSParticipantImpl::updateLocalReader(
RTPSReader* reader,
const TopicAttributes& topicAtt,
const ReaderQos& rqos)
{
return this->mp_builtinProtocols->updateLocalReader(reader, topicAtt, rqos);
}
/*
*
* AUXILIARY METHODS
*
*
*/
bool RTPSParticipantImpl::existsEntityId(
const EntityId_t& ent,
EndpointKind_t kind) const
{
if (kind == WRITER)
{
for (std::vector<RTPSWriter*>::const_iterator it = m_userWriterList.begin(); it != m_userWriterList.end(); ++it)
{
if (ent == (*it)->getGuid().entityId)
{
return true;
}
}
}
else
{
for (std::vector<RTPSReader*>::const_iterator it = m_userReaderList.begin(); it != m_userReaderList.end(); ++it)
{
if (ent == (*it)->getGuid().entityId)
{
return true;
}
}
}
return false;
}
/*
*
* RECEIVER RESOURCE METHODS
*
*/
bool RTPSParticipantImpl::assignEndpointListenResources(
Endpoint* endp)
{
//Tag the endpoint with the ReceiverResources
bool valid = true;
/* No need to check for emptiness on the lists, as it was already done on part function
In case are using the default list of Locators they have already been embedded to the parameters
*/
//UNICAST
assignEndpoint2LocatorList(endp, endp->getAttributes().unicastLocatorList);
//MULTICAST
assignEndpoint2LocatorList(endp, endp->getAttributes().multicastLocatorList);
return valid;
}
bool RTPSParticipantImpl::createAndAssociateReceiverswithEndpoint(
Endpoint* pend)
{
/* This function...
- Asks the network factory for new resources
- Encapsulates the new resources within the ReceiverControlBlock list
- Associated the endpoint to the new elements in the list
- Launches the listener thread
*/
// 1 - Ask the network factory to generate the elements that do still not exist
std::vector<ReceiverResource> newItems; //Store the newly created elements
std::vector<ReceiverResource> newItemsBuffer; //Store intermediate results
//Iterate through the list of unicast and multicast locators the endpoint has... unless its empty
//In that case, just use the standard
if (pend->getAttributes().unicastLocatorList.empty() && pend->getAttributes().multicastLocatorList.empty())
{
//Default unicast
pend->getAttributes().unicastLocatorList = m_att.defaultUnicastLocatorList;
}
createReceiverResources(pend->getAttributes().unicastLocatorList, false, true);
createReceiverResources(pend->getAttributes().multicastLocatorList, false, true);
// Associate the Endpoint with ReceiverControlBlock
assignEndpointListenResources(pend);
return true;
}
bool RTPSParticipantImpl::assignEndpoint2LocatorList(
Endpoint* endp,
LocatorList_t& list)
{
/* Note:
The previous version of this function associated (or created) ListenResources and added the endpoint to them.
It then requested the list of Locators the Listener is listening to and appended to the LocatorList_t from the parameters.
This has been removed because it is considered redundant. For ReceiveResources that listen on multiple interfaces, only
one of the supported Locators is needed to make the match, and the case of new ListenResources being created has been removed
since its the NetworkFactory the one that takes care of Resource creation.
*/
LocatorList_t finalList;
for (auto lit = list.begin(); lit != list.end(); ++lit)
{
//Iteration of all Locators within the Locator list passed down as argument
std::lock_guard<std::mutex> guard(m_receiverResourcelistMutex);
//Check among ReceiverResources whether the locator is supported or not
for (auto it = m_receiverResourcelist.begin(); it != m_receiverResourcelist.end(); ++it)
{
//Take mutex for the resource since we are going to interact with shared resources
//std::lock_guard<std::mutex> guard((*it).mtx);
if (it->Receiver->SupportsLocator(*lit))
{
//Supported! Take mutex and update lists - We maintain reader/writer discrimination just in case
it->mp_receiver->associateEndpoint(endp);
// end association between reader/writer and the receive resources
}
}
//Finished iteratig through all ListenResources for a single Locator (from the parameter list).
//Since this function is called after checking with NetFactory we do not have to create any more resource.
}
return true;
}
bool RTPSParticipantImpl::createSendResources(
Endpoint* pend)
{
if (pend->m_att.remoteLocatorList.empty())
{
// Adds the default locators of every registered transport.
m_network_Factory.GetDefaultOutputLocators(pend->m_att.remoteLocatorList);
}
std::lock_guard<std::timed_mutex> guard(m_send_resources_mutex_);
//Output locators have been specified, create them
for (auto it = pend->m_att.remoteLocatorList.begin(); it != pend->m_att.remoteLocatorList.end(); ++it)
{
if (!m_network_Factory.build_send_resources(send_resource_list_, (*it)))
{
logWarning(RTPS_PARTICIPANT, "Cannot create send resource for endpoint remote locator (" <<
pend->getGuid() << ", " << (*it) << ")");
}
}
return true;
}
void RTPSParticipantImpl::createReceiverResources(
LocatorList_t& Locator_list,
bool ApplyMutation,
bool RegisterReceiver)
{
std::vector<std::shared_ptr<ReceiverResource>> newItemsBuffer;
#if HAVE_SECURITY
// An auxilary buffer is needed in the ReceiverResource to to decrypt the message,
// that imposes a limit in the received messages size even if the transport allows (uint32_t) messages size.
uint32_t max_receiver_buffer_size =
is_secure() ? std::numeric_limits<uint16_t>::max() : std::numeric_limits<uint32_t>::max();
#else
uint32_t max_receiver_buffer_size = std::numeric_limits<uint32_t>::max();
#endif // if HAVE_SECURITY
for (auto it_loc = Locator_list.begin(); it_loc != Locator_list.end(); ++it_loc)
{
bool ret = m_network_Factory.BuildReceiverResources(*it_loc, newItemsBuffer, max_receiver_buffer_size);
if (!ret && ApplyMutation)
{
uint32_t tries = 0;
while (!ret && (tries < m_att.builtin.mutation_tries))
{
tries++;
*it_loc = applyLocatorAdaptRule(*it_loc);
ret = m_network_Factory.BuildReceiverResources(*it_loc, newItemsBuffer, max_receiver_buffer_size);
}
}
for (auto it_buffer = newItemsBuffer.begin(); it_buffer != newItemsBuffer.end(); ++it_buffer)
{
std::lock_guard<std::mutex> lock(m_receiverResourcelistMutex);
//Push the new items into the ReceiverResource buffer
m_receiverResourcelist.emplace_back(*it_buffer);
//Create and init the MessageReceiver
auto mr = new MessageReceiver(this, (*it_buffer)->max_message_size());
m_receiverResourcelist.back().mp_receiver = mr;
//Start reception
if (RegisterReceiver)
{
m_receiverResourcelist.back().Receiver->RegisterReceiver(mr);
}
}
newItemsBuffer.clear();
}
}
void RTPSParticipantImpl::createSenderResources(
const LocatorList_t& locator_list)
{
std::unique_lock<std::timed_mutex> lock(m_send_resources_mutex_);
for (auto it_loc = locator_list.begin(); it_loc != locator_list.end(); ++it_loc)
{
m_network_Factory.build_send_resources(send_resource_list_, *it_loc);
}
}
void RTPSParticipantImpl::createSenderResources(
const Locator_t& locator)
{
std::unique_lock<std::timed_mutex> lock(m_send_resources_mutex_);
m_network_Factory.build_send_resources(send_resource_list_, locator);
}
bool RTPSParticipantImpl::deleteUserEndpoint(
Endpoint* p_endpoint)
{
m_receiverResourcelistMutex.lock();
for (auto it = m_receiverResourcelist.begin(); it != m_receiverResourcelist.end(); ++it)
{
it->mp_receiver->removeEndpoint(p_endpoint);
}
m_receiverResourcelistMutex.unlock();
bool found = false, found_in_users = false;
{
if (p_endpoint->getAttributes().endpointKind == WRITER)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for (auto wit = m_userWriterList.begin(); wit != m_userWriterList.end(); ++wit)
{
if ((*wit)->getGuid().entityId == p_endpoint->getGuid().entityId) //Found it
{
m_userWriterList.erase(wit);
found_in_users = true;
break;
}
}
for (auto wit = m_allWriterList.begin(); wit != m_allWriterList.end(); ++wit)
{
if ((*wit)->getGuid().entityId == p_endpoint->getGuid().entityId) //Found it
{
m_allWriterList.erase(wit);
found = true;
break;
}
}
}
else
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for (auto rit = m_userReaderList.begin(); rit != m_userReaderList.end(); ++rit)
{
if ((*rit)->getGuid().entityId == p_endpoint->getGuid().entityId) //Found it
{
m_userReaderList.erase(rit);
found_in_users = true;
break;
}
}
for (auto rit = m_allReaderList.begin(); rit != m_allReaderList.end(); ++rit)
{
if ((*rit)->getGuid().entityId == p_endpoint->getGuid().entityId) //Found it
{
m_allReaderList.erase(rit);
found = true;
break;
}
}
}
if (!found)
{
return false;
}
//REMOVE FOR BUILTINPROTOCOLS
if (p_endpoint->getAttributes().endpointKind == WRITER)
{
if (found_in_users)
{
mp_builtinProtocols->removeLocalWriter(static_cast<RTPSWriter*>(p_endpoint));
}
#if HAVE_SECURITY
if (p_endpoint->getAttributes().security_attributes().is_submessage_protected ||
p_endpoint->getAttributes().security_attributes().is_payload_protected)
{
m_security_manager.unregister_local_writer(p_endpoint->getGuid());
}
#endif // if HAVE_SECURITY
}
else
{
if (found_in_users)
{
mp_builtinProtocols->removeLocalReader(static_cast<RTPSReader*>(p_endpoint));
}
#if HAVE_SECURITY
if (p_endpoint->getAttributes().security_attributes().is_submessage_protected ||
p_endpoint->getAttributes().security_attributes().is_payload_protected)
{
m_security_manager.unregister_local_reader(p_endpoint->getGuid());
}
#endif // if HAVE_SECURITY
}
}
// std::lock_guard<std::recursive_mutex> guardEndpoint(*p_endpoint->getMutex());
delete(p_endpoint);
return true;
}
void RTPSParticipantImpl::normalize_endpoint_locators(
EndpointAttributes& endpoint_att)
{
// Locators with port 0, calculate port.
for (Locator_t& loc : endpoint_att.unicastLocatorList)
{
m_network_Factory.fill_default_locator_port(domain_id_, loc, m_att, false);
}
for (Locator_t& loc : endpoint_att.multicastLocatorList)
{
m_network_Factory.fill_default_locator_port(domain_id_, loc, m_att, true);
}
// Normalize unicast locators
if (!endpoint_att.unicastLocatorList.empty())
{
m_network_Factory.NormalizeLocators(endpoint_att.unicastLocatorList);
}
}
std::vector<std::string> RTPSParticipantImpl::getParticipantNames() const
{
std::vector<std::string> participant_names;
auto pdp = mp_builtinProtocols->mp_PDP;
for (auto it = pdp->ParticipantProxiesBegin(); it != pdp->ParticipantProxiesEnd(); ++it)
{
participant_names.emplace_back((*it)->m_participantName.to_string());
}
return participant_names;
}
void RTPSParticipantImpl::setGuid(
GUID_t& guid)
{
m_guid = guid;
}
void RTPSParticipantImpl::announceRTPSParticipantState()
{
return mp_builtinProtocols->announceRTPSParticipantState();
}
void RTPSParticipantImpl::stopRTPSParticipantAnnouncement()
{
return mp_builtinProtocols->stopRTPSParticipantAnnouncement();
}
void RTPSParticipantImpl::resetRTPSParticipantAnnouncement()
{
return mp_builtinProtocols->resetRTPSParticipantAnnouncement();
}
void RTPSParticipantImpl::loose_next_change()
{
//NOTE: This is replaced by the test transport
//this->mp_send_thr->loose_next_change();
}
bool RTPSParticipantImpl::newRemoteEndpointDiscovered(
const GUID_t& pguid,
int16_t userDefinedId,
EndpointKind_t kind)
{
if (m_att.builtin.discovery_config.discoveryProtocol != DiscoveryProtocol::SIMPLE ||
m_att.builtin.discovery_config.use_STATIC_EndpointDiscoveryProtocol == false)
{
logWarning(RTPS_PARTICIPANT,
"Remote Endpoints can only be activated with static discovery protocol over PDP simple protocol");
return false;
}
if (PDPSimple* pS = dynamic_cast<PDPSimple*>(mp_builtinProtocols->mp_PDP))
{
return pS->newRemoteEndpointStaticallyDiscovered(pguid, userDefinedId, kind);
}
return false;
}
void RTPSParticipantImpl::ResourceSemaphorePost()
{
if (mp_ResourceSemaphore != nullptr)
{
mp_ResourceSemaphore->post();
}
}
void RTPSParticipantImpl::ResourceSemaphoreWait()
{
if (mp_ResourceSemaphore != nullptr)
{
mp_ResourceSemaphore->wait();
}
}
void RTPSParticipantImpl::assert_remote_participant_liveliness(
const GuidPrefix_t& remote_guid)
{
if (mp_builtinProtocols && mp_builtinProtocols->mp_PDP)
{
mp_builtinProtocols->mp_PDP->assert_remote_participant_liveliness(remote_guid);
}
}
uint32_t RTPSParticipantImpl::getMaxMessageSize() const
{
#if HAVE_SECURITY
// An auxilary buffer is needed in the ReceiverResource to to decrypt the message,
// that imposes a limit in the received messages size even if the transport allows (uint32_t) messages size.
// So the sender limits also its size.
uint32_t max_receiver_buffer_size =
is_secure() ? std::numeric_limits<uint16_t>::max() : std::numeric_limits<uint32_t>::max();
#else
uint32_t max_receiver_buffer_size = std::numeric_limits<uint32_t>::max();
#endif // if HAVE_SECURITY
return (std::min)(
m_network_Factory.get_max_message_size_between_transports(),
max_receiver_buffer_size);
}
uint32_t RTPSParticipantImpl::getMaxDataSize()
{
return calculateMaxDataSize(getMaxMessageSize());
}
uint32_t RTPSParticipantImpl::calculateMaxDataSize(
uint32_t length)
{
uint32_t maxDataSize = length;
#if HAVE_SECURITY
// If there is rtps messsage protection, reduce max size for messages,
// because extra data is added on encryption.
if (security_attributes_.is_rtps_protected)
{
maxDataSize -= m_security_manager.calculate_extra_size_for_rtps_message();
}
#endif // if HAVE_SECURITY
// RTPS header
maxDataSize -= RTPSMESSAGE_HEADER_SIZE;
return maxDataSize;
}
bool RTPSParticipantImpl::networkFactoryHasRegisteredTransports() const
{
return m_network_Factory.numberOfRegisteredTransports() > 0;
}
#if HAVE_SECURITY
bool RTPSParticipantImpl::pairing_remote_reader_with_local_writer_after_security(
const GUID_t& local_writer,
const ReaderProxyData& remote_reader_data)
{
bool return_value;
return_value = mp_builtinProtocols->mp_PDP->getEDP()->pairing_remote_reader_with_local_writer_after_security(
local_writer, remote_reader_data);
if (!return_value && mp_builtinProtocols->mp_WLP != nullptr)
{
return_value = mp_builtinProtocols->mp_WLP->pairing_remote_reader_with_local_writer_after_security(
local_writer, remote_reader_data);
}
return return_value;
}
bool RTPSParticipantImpl::pairing_remote_writer_with_local_reader_after_security(
const GUID_t& local_reader,
const WriterProxyData& remote_writer_data)
{
bool return_value;
return_value = mp_builtinProtocols->mp_PDP->getEDP()->pairing_remote_writer_with_local_reader_after_security(
local_reader, remote_writer_data);
if (!return_value && mp_builtinProtocols->mp_WLP != nullptr)
{
return_value = mp_builtinProtocols->mp_WLP->pairing_remote_writer_with_local_reader_after_security(
local_reader, remote_writer_data);
}
return return_value;
}
#endif // if HAVE_SECURITY
PDPSimple* RTPSParticipantImpl::pdpsimple()
{
return dynamic_cast<PDPSimple*>(mp_builtinProtocols->mp_PDP);
}
WLP* RTPSParticipantImpl::wlp()
{
return mp_builtinProtocols->mp_WLP;
}
fastdds::dds::builtin::TypeLookupManager* RTPSParticipantImpl::typelookup_manager() const
{
return mp_builtinProtocols->tlm_;
}
IPersistenceService* RTPSParticipantImpl::get_persistence_service(
const EndpointAttributes& param)
{
IPersistenceService* ret_val;
ret_val = PersistenceFactory::create_persistence_service(param.properties);
return ret_val != nullptr ?
ret_val :
PersistenceFactory::create_persistence_service(m_att.properties);
}
bool RTPSParticipantImpl::get_persistence_service(
const char* debug_label,
bool is_builtin,
const EndpointAttributes& param,
IPersistenceService*& service)
{
service = nullptr;
// Check if also support persistence with TRANSIENT_LOCAL.
DurabilityKind_t durability_red_line = get_persistence_durability_red_line(is_builtin);
if (param.durabilityKind >= durability_red_line)
{
if (param.persistence_guid == c_Guid_Unknown)
{
logError(RTPS_PARTICIPANT, "Cannot create persistence service. Persistence GUID not specified");
return false;
}
service = get_persistence_service(param);
if (service == nullptr)
{
logError(RTPS_PARTICIPANT,
"Couldn't create writer persistence service for transient/persistent " << debug_label);
return false;
}
}
return true;
}
bool RTPSParticipantImpl::get_new_entity_id(
EntityId_t& entityId)
{
if (entityId == c_EntityId_Unknown)
{
uint32_t idnum = ++IdCounter;
octet* c = reinterpret_cast<octet*>(&idnum);
entityId.value[2] = c[0];
entityId.value[1] = c[1];
entityId.value[0] = c[2];
entityId.value[3] = 0x01; // Vendor specific
}
else
{
return !existsEntityId(entityId, READER) && !existsEntityId(entityId, WRITER);
}
return true;
}
void RTPSParticipantImpl::set_check_type_function(
std::function<bool(const std::string&)>&& check_type)
{
type_check_fn_ = std::move(check_type);
}
std::unique_ptr<RTPSMessageGroup_t> RTPSParticipantImpl::get_send_buffer()
{
return send_buffers_->get_buffer(this);
}
void RTPSParticipantImpl::return_send_buffer(
std::unique_ptr <RTPSMessageGroup_t>&& buffer)
{
send_buffers_->return_buffer(std::move(buffer));
}
uint32_t RTPSParticipantImpl::get_domain_id() const
{
return domain_id_;
}
//!Compare metatraffic locators list searching for mutations
bool RTPSParticipantImpl::did_mutation_took_place_on_meta(
const LocatorList_t& MulticastLocatorList,
const LocatorList_t& UnicastLocatorList) const
{
if (m_att.builtin.metatrafficMulticastLocatorList == MulticastLocatorList
&& m_att.builtin.metatrafficUnicastLocatorList == UnicastLocatorList)
{
// no mutation
return false;
}
// If one of the locators is 0.0.0.0 we must replace it by all local interfaces like the framework does
std::list<Locator_t> unicast_real_locators;
LocatorListConstIterator it = UnicastLocatorList.begin(), old_it;
LocatorList_t locals;
do
{
// copy ordinary locators till the first ANY
old_it = it;
it = std::find_if(it, UnicastLocatorList.end(), IPLocator::isAny);
// copy ordinary locators
std::copy(old_it, it, std::back_inserter(unicast_real_locators));
// transform new ones if needed
if (it != UnicastLocatorList.end())
{
const Locator_t& an_any = *it;
// load interfaces if needed
if (locals.empty())
{
IPFinder::getIP4Address(&locals);
}
// add a locator for each local
std::transform(locals.begin(),
locals.end(),
std::back_inserter(unicast_real_locators),
[&an_any](const Locator_t& loc) -> Locator_t
{
Locator_t specific(loc);
specific.port = an_any.port;
specific.kind = an_any.kind;
return specific;
});
// search for the next if any
++it;
}
} while (it != UnicastLocatorList.end());
// TCP is a special case because physical ports are taken from the TransportDescriptors
struct ResetLogical
{
// use of std::unary_function to introduce the following aliases is deprecated
// using argument_type = Locator_t;
// using result_type = Locator_t&;
typedef std::vector<std::shared_ptr<fastdds::rtps::TransportDescriptorInterface>> Transports;
ResetLogical(
const Transports& tp)
: Transports_(tp)
, tcp4(nullptr)
, tcp6(nullptr)
{
for (auto desc : Transports_)
{
if (nullptr == tcp4)
{
tcp4 = dynamic_cast<fastdds::rtps::TCPv4TransportDescriptor*>(desc.get());
}
if (nullptr == tcp6)
{
tcp6 = dynamic_cast<fastdds::rtps::TCPv6TransportDescriptor*>(desc.get());
}
}
}
uint16_t Tcp4ListeningPort() const
{
return tcp4 ? ( tcp4->listening_ports.empty() ? 0 : tcp4->listening_ports[0]) : 0;
}
uint16_t Tcp6ListeningPort() const
{
return tcp6 ? ( tcp6->listening_ports.empty() ? 0 : tcp6->listening_ports[0]) : 0;
}
Locator_t operator ()(
const Locator_t& loc) const
{
Locator_t ret(loc);
switch (loc.kind)
{
case LOCATOR_KIND_TCPv4:
IPLocator::setPhysicalPort(ret, Tcp4ListeningPort());
break;
case LOCATOR_KIND_TCPv6:
IPLocator::setPhysicalPort(ret, Tcp6ListeningPort());
break;
}
return ret;
}
// reference to the transports
const Transports& Transports_;
TCPTransportDescriptor* tcp4, * tcp6;
}
transform_functor(m_att.userTransports);
// transform-copy
std::set<Locator_t> update_attributes;
std::transform(m_att.builtin.metatrafficMulticastLocatorList.begin(),
m_att.builtin.metatrafficMulticastLocatorList.end(),
std::inserter(update_attributes, update_attributes.begin()),
transform_functor);
std::transform(m_att.builtin.metatrafficUnicastLocatorList.begin(),
m_att.builtin.metatrafficUnicastLocatorList.end(),
std::inserter(update_attributes, update_attributes.begin()),
transform_functor);
std::set<Locator_t> original_ones;
std::transform(MulticastLocatorList.begin(),
MulticastLocatorList.end(),
std::inserter(original_ones, original_ones.begin()),
transform_functor);
std::transform(unicast_real_locators.begin(),
unicast_real_locators.end(),
std::inserter(original_ones, original_ones.begin()),
transform_functor);
// if equal then no mutation took place on physical ports
return !(update_attributes == original_ones);
}
DurabilityKind_t RTPSParticipantImpl::get_persistence_durability_red_line(
bool is_builtin_endpoint)
{
DurabilityKind_t durability_red_line = TRANSIENT;
if (!is_builtin_endpoint)
{
std::string* persistence_support_transient_local_property = PropertyPolicyHelper::find_property(
m_att.properties, "dds.persistence.also-support-transient-local");
if (nullptr != persistence_support_transient_local_property &&
0 == persistence_support_transient_local_property->compare("true"))
{
durability_red_line = TRANSIENT_LOCAL;
}
}
return durability_red_line;
}
} /* namespace rtps */
} /* namespace fastrtps */
} /* namespace eprosima */
| 1 | 20,414 | What about async user writers? We could be checking `param.mode == ASYNCHRONOUS_WRITER` | eProsima-Fast-DDS | cpp |
@@ -84,7 +84,7 @@ public class UseUtilityClassRule extends AbstractLombokAwareRule {
private boolean isOkUsingLombok(ASTClassOrInterfaceDeclaration parent) {
// check if there's a lombok no arg private constructor, if so skip the rest of the rules
if (hasClassLombokAnnotation()) {
- ASTAnnotation annotation = getLombokAnnotation(parent, "NoArgsConstructor");
+ ASTAnnotation annotation = parent.getAnnotation("lombok.NoArgsConstructor");
if (annotation != null) {
| 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.rule.design;
import java.util.List;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.java.ast.ASTAnnotation;
import net.sourceforge.pmd.lang.java.ast.ASTClassOrInterfaceBody;
import net.sourceforge.pmd.lang.java.ast.ASTClassOrInterfaceDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTClassOrInterfaceType;
import net.sourceforge.pmd.lang.java.ast.ASTConstructorDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTExtendsList;
import net.sourceforge.pmd.lang.java.ast.ASTFieldDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTMemberValuePair;
import net.sourceforge.pmd.lang.java.ast.ASTMethodDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTName;
import net.sourceforge.pmd.lang.java.ast.ASTResultType;
import net.sourceforge.pmd.lang.java.rule.AbstractLombokAwareRule;
public class UseUtilityClassRule extends AbstractLombokAwareRule {
@Override
public Object visit(ASTClassOrInterfaceBody decl, Object data) {
if (decl.jjtGetParent() instanceof ASTClassOrInterfaceDeclaration) {
ASTClassOrInterfaceDeclaration parent = (ASTClassOrInterfaceDeclaration) decl.jjtGetParent();
if (parent.isAbstract() || parent.isInterface() || isExceptionType(parent)) {
return super.visit(decl, data);
}
if (isOkUsingLombok(parent)) {
return super.visit(decl, data);
}
int i = decl.jjtGetNumChildren();
int methodCount = 0;
boolean isOK = false;
while (i > 0) {
Node p = decl.jjtGetChild(--i);
if (p.jjtGetNumChildren() == 0) {
continue;
}
Node n = skipAnnotations(p);
if (n instanceof ASTFieldDeclaration) {
if (!((ASTFieldDeclaration) n).isStatic()) {
isOK = true;
break;
}
} else if (n instanceof ASTConstructorDeclaration) {
if (((ASTConstructorDeclaration) n).isPrivate()) {
isOK = true;
break;
}
} else if (n instanceof ASTMethodDeclaration) {
ASTMethodDeclaration m = (ASTMethodDeclaration) n;
if (!m.isPrivate()) {
methodCount++;
}
if (!m.isStatic()) {
isOK = true;
break;
}
// TODO use symbol table
if (m.getMethodName().equals("suite")) {
ASTResultType res = m.getResultType();
ASTClassOrInterfaceType c = res.getFirstDescendantOfType(ASTClassOrInterfaceType.class);
if (c != null && c.hasImageEqualTo("Test")) {
isOK = true;
break;
}
}
}
}
if (!isOK && methodCount > 0) {
addViolation(data, decl);
}
}
return super.visit(decl, data);
}
private boolean isOkUsingLombok(ASTClassOrInterfaceDeclaration parent) {
// check if there's a lombok no arg private constructor, if so skip the rest of the rules
if (hasClassLombokAnnotation()) {
ASTAnnotation annotation = getLombokAnnotation(parent, "NoArgsConstructor");
if (annotation != null) {
List<ASTMemberValuePair> memberValuePairs = annotation.findDescendantsOfType(ASTMemberValuePair.class);
for (ASTMemberValuePair memberValuePair : memberValuePairs) {
// to set the access level of a constructor in lombok, you set the access property on the annotation
if ("access".equals(memberValuePair.getImage())) {
List<ASTName> names = memberValuePair.findDescendantsOfType(ASTName.class);
for (ASTName name : names) {
// check to see if the value of the member value pair ends PRIVATE. This is from the AccessLevel enum in Lombok
if (name.getImage().endsWith("PRIVATE")) {
// if the constructor is found and the accesslevel is private no need to check anything else
return true;
}
}
}
}
}
}
return false;
}
private Node skipAnnotations(Node p) {
int index = 0;
Node n = p.jjtGetChild(index++);
while (n instanceof ASTAnnotation && index < p.jjtGetNumChildren()) {
n = p.jjtGetChild(index++);
}
return n;
}
private boolean isExceptionType(ASTClassOrInterfaceDeclaration parent) {
ASTExtendsList extendsList = parent.getFirstChildOfType(ASTExtendsList.class);
if (extendsList != null) {
ASTClassOrInterfaceType superClass = extendsList.getFirstChildOfType(ASTClassOrInterfaceType.class);
if (superClass.getType() != null && Throwable.class.isAssignableFrom(superClass.getType())) {
return true;
}
if (superClass.getType() == null && superClass.getImage().endsWith("Exception")) {
return true;
}
}
return false;
}
}
| 1 | 13,769 | this double check for lombok annotation + specific lombok annotation seems pointless, just keep the second (specific) check. Moreover, since we have a property with ignored annotations... why don't we use it? a user may setup the property and it will be ignored here. Also, this class should probably not extend `AbstractLombokAwareRule` anymore. We have a configurable set of annotations that suppress the warning, defaulting to ALL lombok annotations, yet we only check for `lombok.NoArgsConstructor`... Maybe directly extending `AbstractIgnoredAnnotRule` and settig the default property to `lombok.NoArgsConstructor`? Or, to avoid a breaking API change, keep extending it, but override the default. For 7.0.0 we can change the class hierarchy. | pmd-pmd | java |
@@ -67,9 +67,11 @@ func Mux(pattern string, mux *http.ServeMux) InboundOption {
// route requests through YARPC. The http.Handler returned by this function
// may delegate requests to the provided YARPC handler to route them through
// YARPC.
+// If more than one Interception is provided, they will be invoked in the same
+// order as they are passed in.
func Interceptor(interceptor func(yarpcHandler http.Handler) http.Handler) InboundOption {
return func(i *Inbound) {
- i.interceptor = interceptor
+ i.interceptors = append(i.interceptors, interceptor)
}
}
| 1 | // Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"context"
"net"
"net/http"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/api/x/introspection"
intnet "go.uber.org/yarpc/internal/net"
"go.uber.org/yarpc/pkg/lifecycle"
"go.uber.org/yarpc/yarpcerrors"
"go.uber.org/zap"
)
// We want a value that's around 5 seconds, but slightly higher than how
// long a successful HTTP shutdown can take.
// There's a specific path in the HTTP shutdown path that can take 5 seconds:
// https://golang.org/src/net/http/server.go?s=83923:83977#L2710
// This avoids timeouts in shutdown caused by new idle connections, without
// making the timeout too large.
const defaultShutdownTimeout = 6 * time.Second
// InboundOption customizes the behavior of an HTTP Inbound constructed with
// NewInbound.
type InboundOption func(*Inbound)
func (InboundOption) httpOption() {}
// Mux specifies that the HTTP server should make the YARPC endpoint available
// under the given pattern on the given ServeMux. By default, the YARPC
// service is made available on all paths of the HTTP server. By specifying a
// ServeMux, users can narrow the endpoints under which the YARPC service is
// available and offer their own non-YARPC endpoints.
func Mux(pattern string, mux *http.ServeMux) InboundOption {
return func(i *Inbound) {
i.mux = mux
i.muxPattern = pattern
}
}
// Interceptor specifies a function which can wrap the YARPC handler. If
// provided, this function will be called with an http.Handler which will
// route requests through YARPC. The http.Handler returned by this function
// may delegate requests to the provided YARPC handler to route them through
// YARPC.
func Interceptor(interceptor func(yarpcHandler http.Handler) http.Handler) InboundOption {
return func(i *Inbound) {
i.interceptor = interceptor
}
}
// GrabHeaders specifies additional headers that are not prefixed with
// ApplicationHeaderPrefix that should be propagated to the caller.
//
// All headers given must begin with x- or X- or the Inbound that the
// returned option is passed to will return an error when Start is called.
//
// Headers specified with GrabHeaders are case-insensitive.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
func GrabHeaders(headers ...string) InboundOption {
return func(i *Inbound) {
for _, header := range headers {
i.grabHeaders[strings.ToLower(header)] = struct{}{}
}
}
}
// ShutdownTimeout specifies the maximum duration the inbound should wait for
// closing idle connections, and pending calls to complete.
//
// Set to 0 to wait for a complete drain.
//
// Defaults to 5 seconds.
func ShutdownTimeout(timeout time.Duration) InboundOption {
return func(i *Inbound) {
i.shutdownTimeout = timeout
}
}
// NewInbound builds a new HTTP inbound that listens on the given address and
// sharing this transport.
func (t *Transport) NewInbound(addr string, opts ...InboundOption) *Inbound {
i := &Inbound{
once: lifecycle.NewOnce(),
addr: addr,
shutdownTimeout: defaultShutdownTimeout,
tracer: t.tracer,
logger: t.logger,
transport: t,
grabHeaders: make(map[string]struct{}),
bothResponseError: true,
}
for _, opt := range opts {
opt(i)
}
return i
}
// Inbound receives YARPC requests using an HTTP server. It may be constructed
// using the NewInbound method on the Transport.
type Inbound struct {
addr string
mux *http.ServeMux
muxPattern string
server *intnet.HTTPServer
shutdownTimeout time.Duration
router transport.Router
tracer opentracing.Tracer
logger *zap.Logger
transport *Transport
grabHeaders map[string]struct{}
interceptor func(http.Handler) http.Handler
once *lifecycle.Once
// should only be false in testing
bothResponseError bool
}
// Tracer configures a tracer on this inbound.
func (i *Inbound) Tracer(tracer opentracing.Tracer) *Inbound {
i.tracer = tracer
return i
}
// SetRouter configures a router to handle incoming requests.
// This satisfies the transport.Inbound interface, and would be called
// by a dispatcher when it starts.
func (i *Inbound) SetRouter(router transport.Router) {
i.router = router
}
// Transports returns the inbound's HTTP transport.
func (i *Inbound) Transports() []transport.Transport {
return []transport.Transport{i.transport}
}
// Start starts the inbound with a given service detail, opening a listening
// socket.
func (i *Inbound) Start() error {
return i.once.Start(i.start)
}
func (i *Inbound) start() error {
if i.router == nil {
return yarpcerrors.Newf(yarpcerrors.CodeInternal, "no router configured for transport inbound")
}
for header := range i.grabHeaders {
if !strings.HasPrefix(header, "x-") {
return yarpcerrors.Newf(yarpcerrors.CodeInvalidArgument, "header %s does not begin with 'x-'", header)
}
}
var httpHandler http.Handler = handler{
router: i.router,
tracer: i.tracer,
grabHeaders: i.grabHeaders,
bothResponseError: i.bothResponseError,
logger: i.logger,
}
if i.interceptor != nil {
httpHandler = i.interceptor(httpHandler)
}
if i.mux != nil {
i.mux.Handle(i.muxPattern, httpHandler)
httpHandler = i.mux
}
i.server = intnet.NewHTTPServer(&http.Server{
Addr: i.addr,
Handler: httpHandler,
})
if err := i.server.ListenAndServe(); err != nil {
return err
}
i.addr = i.server.Listener().Addr().String() // in case it changed
i.logger.Info("started HTTP inbound", zap.String("address", i.addr))
if len(i.router.Procedures()) == 0 {
i.logger.Warn("no procedures specified for HTTP inbound")
}
return nil
}
// Stop the inbound using Shutdown.
func (i *Inbound) Stop() error {
ctx, cancel := context.WithTimeout(context.Background(), i.shutdownTimeout)
defer cancel()
return i.shutdown(ctx)
}
// shutdown the inbound, closing the listening socket, closing idle
// connections, and waiting for all pending calls to complete.
func (i *Inbound) shutdown(ctx context.Context) error {
return i.once.Stop(func() error {
if i.server == nil {
return nil
}
return i.server.Shutdown(ctx)
})
}
// IsRunning returns whether the inbound is currently running
func (i *Inbound) IsRunning() bool {
return i.once.IsRunning()
}
// Addr returns the address on which the server is listening. Returns nil if
// Start has not been called yet.
func (i *Inbound) Addr() net.Addr {
if i.server == nil {
return nil
}
listener := i.server.Listener()
if listener == nil {
return nil
}
return listener.Addr()
}
// Introspect returns the state of the inbound for introspection purposes.
func (i *Inbound) Introspect() introspection.InboundStatus {
state := "Stopped"
if i.IsRunning() {
state = "Started"
}
var addrString string
if addr := i.Addr(); addr != nil {
addrString = addr.String()
}
return introspection.InboundStatus{
Transport: "http",
Endpoint: addrString,
State: state,
}
}
| 1 | 19,768 | nit: the ordering may be misunderstood, as "invoked in the same order" could suggest the passed-in functions are called in-order, but the actual wrapping is LIFO. Some other ways to describe it (don't think any of these are ideal, but maybe it will help you come up with something better), * the handled returned by the first interceptor is executed first * interceptors are applied outside-in (don't like the terms outside/in though) * interceptors are applied in LIFO order, leading to an earlier interceptor's handler being executed before latter interceptor handlers | yarpc-yarpc-go | go |
@@ -101,6 +101,8 @@ import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
+import static org.apache.solr.common.params.QueryElevationParams.ELEVATE_DOCS_WITHOUT_MATCHING_Q;
+
/**
* A component to elevate some documents to the top of the result set.
* | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler.component;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.lang.invoke.MethodHandles;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Queue;
import java.util.Set;
import java.util.SortedSet;
import java.util.WeakHashMap;
import java.util.function.Consumer;
import com.carrotsearch.hppc.IntIntHashMap;
import com.carrotsearch.hppc.cursors.IntIntCursor;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Collections2;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.ObjectArrays;
import com.google.common.collect.Sets;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.FieldComparator;
import org.apache.lucene.search.FieldComparatorSource;
import org.apache.lucene.search.SimpleFieldComparator;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.QueryElevationParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.DOMUtil;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.XmlConfigFile;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.transform.ElevatedMarkerFactory;
import org.apache.solr.response.transform.ExcludedMarkerFactory;
import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.QueryParsing;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SortSpec;
import org.apache.solr.search.grouping.GroupingSpecification;
import org.apache.solr.util.RefCounted;
import org.apache.solr.util.VersionedFile;
import org.apache.solr.util.plugin.SolrCoreAware;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
/**
* A component to elevate some documents to the top of the result set.
*
* @since solr 1.3
*/
@SuppressWarnings("WeakerAccess")
public class QueryElevationComponent extends SearchComponent implements SolrCoreAware {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
// Constants used in solrconfig.xml
@VisibleForTesting
static final String FIELD_TYPE = "queryFieldType";
@VisibleForTesting
static final String CONFIG_FILE = "config-file";
private static final String EXCLUDE = "exclude";
/** @see #getBoostDocs(SolrIndexSearcher, Set, Map) */
private static final String BOOSTED_DOCIDS = "BOOSTED_DOCIDS";
/** Key to {@link SolrQueryRequest#getContext()} for a {@code Set<BytesRef>} of included IDs in configured
* order (so-called priority). */
public static final String BOOSTED = "BOOSTED";
/** Key to {@link SolrQueryRequest#getContext()} for a {@code Set<BytesRef>} of excluded IDs. */
public static final String EXCLUDED = "EXCLUDED";
private static final boolean DEFAULT_FORCE_ELEVATION = false;
private static final boolean DEFAULT_USE_CONFIGURED_ELEVATED_ORDER = true;
private static final boolean DEFAULT_SUBSET_MATCH = false;
private static final String DEFAULT_EXCLUDE_MARKER_FIELD_NAME = "excluded";
private static final String DEFAULT_EDITORIAL_MARKER_FIELD_NAME = "elevated";
protected SolrParams initArgs;
protected Analyzer queryAnalyzer;
protected SchemaField uniqueKeyField;
/** @see QueryElevationParams#FORCE_ELEVATION */
protected boolean forceElevation;
/** @see QueryElevationParams#USE_CONFIGURED_ELEVATED_ORDER */
protected boolean useConfiguredElevatedOrder;
protected boolean initialized;
/**
* For each IndexReader, keep an ElevationProvider when the configuration is loaded from the data directory.
* The key is null if loaded from the config directory, and is never re-loaded.
*/
private final Map<IndexReader, ElevationProvider> elevationProviderCache = new WeakHashMap<>();
@Override
public void init(@SuppressWarnings({"rawtypes"})NamedList args) {
this.initArgs = args.toSolrParams();
}
@Override
public void inform(SolrCore core) {
initialized = false;
try {
parseFieldType(core);
setUniqueKeyField(core);
parseExcludedMarkerFieldName(core);
parseEditorialMarkerFieldName(core);
parseForceElevation();
parseUseConfiguredOrderForElevations();
loadElevationConfiguration(core);
initialized = true;
} catch (InitializationException e) {
assert !initialized;
handleInitializationException(e, e.exceptionCause);
} catch (Exception e) {
assert !initialized;
handleInitializationException(e, InitializationExceptionCause.OTHER);
}
}
private void parseFieldType(SolrCore core) throws InitializationException {
String a = initArgs.get(FIELD_TYPE);
if (a != null) {
FieldType ft = core.getLatestSchema().getFieldTypes().get(a);
if (ft == null) {
throw new InitializationException("Parameter " + FIELD_TYPE + " defines an unknown field type \"" + a + "\"", InitializationExceptionCause.UNKNOWN_FIELD_TYPE);
}
queryAnalyzer = ft.getQueryAnalyzer();
}
}
private void setUniqueKeyField(SolrCore core) throws InitializationException {
uniqueKeyField = core.getLatestSchema().getUniqueKeyField();
if (uniqueKeyField == null) {
throw new InitializationException("This component requires the schema to have a uniqueKeyField", InitializationExceptionCause.MISSING_UNIQUE_KEY_FIELD);
}
}
private void parseExcludedMarkerFieldName(SolrCore core) {
String markerName = initArgs.get(QueryElevationParams.EXCLUDE_MARKER_FIELD_NAME, DEFAULT_EXCLUDE_MARKER_FIELD_NAME);
core.addTransformerFactory(markerName, new ExcludedMarkerFactory());
}
private void parseEditorialMarkerFieldName(SolrCore core) {
String markerName = initArgs.get(QueryElevationParams.EDITORIAL_MARKER_FIELD_NAME, DEFAULT_EDITORIAL_MARKER_FIELD_NAME);
core.addTransformerFactory(markerName, new ElevatedMarkerFactory());
}
private void parseForceElevation() {
forceElevation = initArgs.getBool(QueryElevationParams.FORCE_ELEVATION, DEFAULT_FORCE_ELEVATION);
}
private void parseUseConfiguredOrderForElevations() {
useConfiguredElevatedOrder = initArgs.getBool(QueryElevationParams.USE_CONFIGURED_ELEVATED_ORDER, DEFAULT_USE_CONFIGURED_ELEVATED_ORDER);
}
/**
* (Re)Loads elevation configuration.
*
* @param core The core holding this component.
* @return The number of elevation rules parsed.
*/
protected int loadElevationConfiguration(SolrCore core) throws Exception {
synchronized (elevationProviderCache) {
elevationProviderCache.clear();
String configFileName = initArgs.get(CONFIG_FILE);
if (configFileName == null) {
// Throw an exception which is handled by handleInitializationException().
// If not overridden handleInitializationException() simply skips this exception.
throw new InitializationException("Missing component parameter " + CONFIG_FILE + " - it has to define the path to the elevation configuration file", InitializationExceptionCause.NO_CONFIG_FILE_DEFINED);
}
boolean configFileExists = false;
ElevationProvider elevationProvider = NO_OP_ELEVATION_PROVIDER;
// check if using ZooKeeper
ZkController zkController = core.getCoreContainer().getZkController();
if (zkController != null) {
// TODO : shouldn't have to keep reading the config name when it has been read before
configFileExists = zkController.configFileExists(zkController.getZkStateReader().readConfigName(core.getCoreDescriptor().getCloudDescriptor().getCollectionName()), configFileName);
} else {
File fC = new File(core.getResourceLoader().getConfigDir(), configFileName);
File fD = new File(core.getDataDir(), configFileName);
if (fC.exists() == fD.exists()) {
InitializationException e = new InitializationException("Missing config file \"" + configFileName + "\" - either " + fC.getAbsolutePath() + " or " + fD.getAbsolutePath() + " must exist, but not both", InitializationExceptionCause.MISSING_CONFIG_FILE);
elevationProvider = handleConfigLoadingException(e, true);
elevationProviderCache.put(null, elevationProvider);
} else if (fC.exists()) {
if (fC.length() == 0) {
InitializationException e = new InitializationException("Empty config file \"" + configFileName + "\" - " + fC.getAbsolutePath(), InitializationExceptionCause.EMPTY_CONFIG_FILE);
elevationProvider = handleConfigLoadingException(e, true);
} else {
configFileExists = true;
if (log.isInfoEnabled()) {
log.info("Loading QueryElevation from: {}", fC.getAbsolutePath());
}
XmlConfigFile cfg = new XmlConfigFile(core.getResourceLoader(), configFileName);
elevationProvider = loadElevationProvider(cfg);
}
elevationProviderCache.put(null, elevationProvider);
}
}
//in other words, we think this is in the data dir, not the conf dir
if (!configFileExists) {
// preload the first data
RefCounted<SolrIndexSearcher> searchHolder = null;
try {
searchHolder = core.getNewestSearcher(false);
if (searchHolder == null) {
elevationProvider = NO_OP_ELEVATION_PROVIDER;
} else {
IndexReader reader = searchHolder.get().getIndexReader();
elevationProvider = getElevationProvider(reader, core);
}
} finally {
if (searchHolder != null) searchHolder.decref();
}
}
return elevationProvider.size();
}
}
/**
* Handles the exception that occurred while initializing this component.
* If this method does not throw an exception, this component silently fails to initialize
* and is muted with field {@link #initialized} which becomes {@code false}.
*/
protected void handleInitializationException(Exception exception, InitializationExceptionCause cause) {
if (cause != InitializationExceptionCause.NO_CONFIG_FILE_DEFINED) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Error initializing " + QueryElevationComponent.class.getSimpleName(), exception);
}
}
/**
* Handles an exception that occurred while loading the configuration resource.
*
* @param e The exception caught.
* @param resourceAccessIssue <code>true</code> if the exception has been thrown
* because the resource could not be accessed (missing or cannot be read)
* or the config file is empty; <code>false</code> if the resource has
* been found and accessed but the error occurred while loading the resource
* (invalid format, incomplete or corrupted).
* @return The {@link ElevationProvider} to use if the exception is absorbed. If {@code null}
* is returned, the {@link #NO_OP_ELEVATION_PROVIDER} is used but not cached in
* the {@link ElevationProvider} cache.
* @throws E If the exception is not absorbed.
*/
protected <E extends Exception> ElevationProvider handleConfigLoadingException(E e, boolean resourceAccessIssue) throws E {
throw e;
}
/**
* Gets the {@link ElevationProvider} from the data dir or from the cache.
*
* @return The cached or loaded {@link ElevationProvider}.
* @throws java.io.IOException If the configuration resource cannot be found, or if an I/O error occurs while analyzing the triggering queries.
* @throws org.xml.sax.SAXException If the configuration resource is not a valid XML content.
* @throws javax.xml.parsers.ParserConfigurationException If the configuration resource is not a valid XML configuration.
* @throws RuntimeException If the configuration resource is not an XML content of the expected format
* (either {@link RuntimeException} or {@link org.apache.solr.common.SolrException}).
*/
@VisibleForTesting
ElevationProvider getElevationProvider(IndexReader reader, SolrCore core) throws Exception {
synchronized (elevationProviderCache) {
ElevationProvider elevationProvider;
elevationProvider = elevationProviderCache.get(null);
if (elevationProvider != null) return elevationProvider;
elevationProvider = elevationProviderCache.get(reader);
if (elevationProvider == null) {
Exception loadingException = null;
boolean resourceAccessIssue = false;
try {
elevationProvider = loadElevationProvider(core);
} catch (IOException e) {
loadingException = e;
resourceAccessIssue = true;
} catch (Exception e) {
loadingException = e;
}
boolean shouldCache = true;
if (loadingException != null) {
elevationProvider = handleConfigLoadingException(loadingException, resourceAccessIssue);
if (elevationProvider == null) {
elevationProvider = NO_OP_ELEVATION_PROVIDER;
shouldCache = false;
}
}
if (shouldCache) {
elevationProviderCache.put(reader, elevationProvider);
}
}
assert elevationProvider != null;
return elevationProvider;
}
}
/**
* Loads the {@link ElevationProvider} from the data dir.
*
* @return The loaded {@link ElevationProvider}.
* @throws java.io.IOException If the configuration resource cannot be found, or if an I/O error occurs while analyzing the triggering queries.
* @throws org.xml.sax.SAXException If the configuration resource is not a valid XML content.
* @throws javax.xml.parsers.ParserConfigurationException If the configuration resource is not a valid XML configuration.
* @throws RuntimeException If the configuration resource is not an XML content of the expected format
* (either {@link RuntimeException} or {@link org.apache.solr.common.SolrException}).
*/
private ElevationProvider loadElevationProvider(SolrCore core) throws IOException, SAXException, ParserConfigurationException {
String configFileName = initArgs.get(CONFIG_FILE);
if (configFileName == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"QueryElevationComponent must specify argument: " + CONFIG_FILE);
}
log.info("Loading QueryElevation from data dir: {}", configFileName);
XmlConfigFile cfg;
ZkController zkController = core.getCoreContainer().getZkController();
if (zkController != null) {
cfg = new XmlConfigFile(core.getResourceLoader(), configFileName, null, null);
} else {
InputStream is = VersionedFile.getLatestFile(core.getDataDir(), configFileName);
cfg = new XmlConfigFile(core.getResourceLoader(), configFileName, new InputSource(is), null);
}
ElevationProvider elevationProvider = loadElevationProvider(cfg);
assert elevationProvider != null;
return elevationProvider;
}
/**
* Loads the {@link ElevationProvider}.
*
* @throws RuntimeException If the config does not provide an XML content of the expected format
* (either {@link RuntimeException} or {@link org.apache.solr.common.SolrException}).
*/
protected ElevationProvider loadElevationProvider(XmlConfigFile config) {
Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap = new LinkedHashMap<>();
XPath xpath = XPathFactory.newInstance().newXPath();
NodeList nodes = (NodeList) config.evaluate("elevate/query", XPathConstants.NODESET);
for (int i = 0; i < nodes.getLength(); i++) {
Node node = nodes.item(i);
String queryString = DOMUtil.getAttr(node, "text", "missing query 'text'");
String matchString = DOMUtil.getAttr(node, "match");
ElevatingQuery elevatingQuery = new ElevatingQuery(queryString, isSubsetMatchPolicy(matchString));
NodeList children;
try {
children = (NodeList) xpath.evaluate("doc", node, XPathConstants.NODESET);
} catch (XPathExpressionException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"query requires '<doc .../>' child");
}
if (children.getLength() == 0) { // weird
continue;
}
ElevationBuilder elevationBuilder = new ElevationBuilder();
for (int j = 0; j < children.getLength(); j++) {
Node child = children.item(j);
String id = DOMUtil.getAttr(child, "id", "missing 'id'");
String e = DOMUtil.getAttr(child, EXCLUDE, null);
if (e != null) {
if (Boolean.valueOf(e)) {
elevationBuilder.addExcludedIds(Collections.singleton(id));
continue;
}
}
elevationBuilder.addElevatedIds(Collections.singletonList(id));
}
// It is allowed to define multiple times different elevations for the same query. In this case the elevations
// are merged in the ElevationBuilder (they will be triggered at the same time).
ElevationBuilder previousElevationBuilder = elevationBuilderMap.get(elevatingQuery);
if (previousElevationBuilder == null) {
elevationBuilderMap.put(elevatingQuery, elevationBuilder);
} else {
previousElevationBuilder.merge(elevationBuilder);
}
}
return createElevationProvider(elevationBuilderMap);
}
protected boolean isSubsetMatchPolicy(String matchString) {
if (matchString == null) {
return DEFAULT_SUBSET_MATCH;
} else if (matchString.equalsIgnoreCase("exact")) {
return false;
} else if (matchString.equalsIgnoreCase("subset")) {
return true;
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"invalid value \"" + matchString + "\" for query match attribute");
}
}
//---------------------------------------------------------------------------------
// SearchComponent
//---------------------------------------------------------------------------------
@Override
public void prepare(ResponseBuilder rb) throws IOException {
if (!initialized || !rb.req.getParams().getBool(QueryElevationParams.ENABLE, true)) {
return;
}
Elevation elevation = getElevation(rb);
if (elevation != null) {
setQuery(rb, elevation);
setSort(rb, elevation);
}
if (rb.isDebug() && rb.isDebugQuery()) {
addDebugInfo(rb, elevation);
}
}
@Override
public void process(ResponseBuilder rb) throws IOException {
// Do nothing -- the real work is modifying the input query
}
protected Elevation getElevation(ResponseBuilder rb) {
SolrParams localParams = rb.getQparser().getLocalParams();
String queryString = localParams == null ? rb.getQueryString() : localParams.get(QueryParsing.V);
if (queryString == null || rb.getQuery() == null) {
return null;
}
SolrParams params = rb.req.getParams();
String paramElevatedIds = params.get(QueryElevationParams.IDS);
String paramExcludedIds = params.get(QueryElevationParams.EXCLUDE);
try {
if (paramElevatedIds != null || paramExcludedIds != null) {
List<String> elevatedIds = paramElevatedIds != null ? StrUtils.splitSmart(paramElevatedIds,",", true) : Collections.emptyList();
List<String> excludedIds = paramExcludedIds != null ? StrUtils.splitSmart(paramExcludedIds, ",", true) : Collections.emptyList();
return new ElevationBuilder().addElevatedIds(elevatedIds).addExcludedIds(excludedIds).build();
} else {
IndexReader reader = rb.req.getSearcher().getIndexReader();
return getElevationProvider(reader, rb.req.getCore()).getElevationForQuery(queryString);
}
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error loading elevation", e);
}
}
private void setQuery(ResponseBuilder rb, Elevation elevation) {
rb.req.getContext().put(BOOSTED, elevation.elevatedIds);
// Change the query to insert forced documents
SolrParams params = rb.req.getParams();
if (params.getBool(QueryElevationParams.EXCLUSIVE, false)) {
// We only want these elevated results
rb.setQuery(new BoostQuery(elevation.includeQuery, 0f));
} else {
BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder();
queryBuilder.add(rb.getQuery(), BooleanClause.Occur.SHOULD);
queryBuilder.add(new BoostQuery(elevation.includeQuery, 0f), BooleanClause.Occur.SHOULD);
if (elevation.excludeQueries != null) {
if (params.getBool(QueryElevationParams.MARK_EXCLUDES, false)) {
// We are only going to mark items as excluded, not actually exclude them.
// This works with the EditorialMarkerFactory.
rb.req.getContext().put(EXCLUDED, elevation.excludedIds);
} else {
for (TermQuery tq : elevation.excludeQueries) {
queryBuilder.add(tq, BooleanClause.Occur.MUST_NOT);
}
}
}
rb.setQuery(queryBuilder.build());
}
}
private void setSort(ResponseBuilder rb, Elevation elevation) throws IOException {
if (elevation.elevatedIds.isEmpty()) {
return;
}
boolean forceElevation = rb.req.getParams().getBool(QueryElevationParams.FORCE_ELEVATION, this.forceElevation);
boolean useConfigured = rb.req.getParams().getBool(QueryElevationParams.USE_CONFIGURED_ELEVATED_ORDER, this.useConfiguredElevatedOrder);
final IntIntHashMap elevatedWithPriority = getBoostDocs(rb.req.getSearcher(), elevation.elevatedIds, rb.req.getContext());
ElevationComparatorSource comparator = new ElevationComparatorSource(elevatedWithPriority, useConfigured);
setSortSpec(rb, forceElevation, comparator);
setGroupingSpec(rb, forceElevation, comparator);
}
private void setSortSpec(ResponseBuilder rb, boolean forceElevation, ElevationComparatorSource comparator) {
// if the sort is 'score desc' use a custom sorting method to
// insert documents in their proper place
SortSpec sortSpec = rb.getSortSpec();
if (sortSpec.getSort() == null) {
sortSpec.setSortAndFields(
new Sort(
new SortField("_elevate_", comparator, true),
new SortField(null, SortField.Type.SCORE, false)),
Arrays.asList(new SchemaField[2]));
} else {
// Check if the sort is based on score
SortSpec modSortSpec = this.modifySortSpec(sortSpec, forceElevation, comparator);
if (null != modSortSpec) {
rb.setSortSpec(modSortSpec);
}
}
}
private void setGroupingSpec(ResponseBuilder rb, boolean forceElevation, ElevationComparatorSource comparator) {
// alter the sorting in the grouping specification if there is one
GroupingSpecification groupingSpec = rb.getGroupingSpec();
if(groupingSpec != null) {
SortSpec groupSortSpec = groupingSpec.getGroupSortSpec();
SortSpec modGroupSortSpec = this.modifySortSpec(groupSortSpec, forceElevation, comparator);
if (modGroupSortSpec != null) {
groupingSpec.setGroupSortSpec(modGroupSortSpec);
}
SortSpec withinGroupSortSpec = groupingSpec.getWithinGroupSortSpec();
SortSpec modWithinGroupSortSpec = this.modifySortSpec(withinGroupSortSpec, forceElevation, comparator);
if (modWithinGroupSortSpec != null) {
groupingSpec.setWithinGroupSortSpec(modWithinGroupSortSpec);
}
}
}
private SortSpec modifySortSpec(SortSpec current, boolean forceElevation, ElevationComparatorSource comparator) {
boolean modify = false;
SortField[] currentSorts = current.getSort().getSort();
List<SchemaField> currentFields = current.getSchemaFields();
ArrayList<SortField> sorts = new ArrayList<>(currentSorts.length + 1);
List<SchemaField> fields = new ArrayList<>(currentFields.size() + 1);
// Perhaps force it to always sort by score
if (forceElevation && currentSorts[0].getType() != SortField.Type.SCORE) {
sorts.add(new SortField("_elevate_", comparator, true));
fields.add(null);
modify = true;
}
for (int i = 0; i < currentSorts.length; i++) {
SortField sf = currentSorts[i];
if (sf.getType() == SortField.Type.SCORE) {
sorts.add(new SortField("_elevate_", comparator, !sf.getReverse()));
fields.add(null);
modify = true;
}
sorts.add(sf);
fields.add(currentFields.get(i));
}
return modify ?
new SortSpec(new Sort(sorts.toArray(new SortField[0])),
fields,
current.getCount(),
current.getOffset())
: null;
}
private void addDebugInfo(ResponseBuilder rb, Elevation elevation) {
List<String> match = null;
if (elevation != null) {
// Extract the elevated terms into a list
match = new ArrayList<>(elevation.includeQuery.clauses().size());
for (BooleanClause clause : elevation.includeQuery.clauses()) {
TermQuery tq = (TermQuery) clause.getQuery();
match.add(tq.getTerm().text());
}
}
SimpleOrderedMap<Object> dbg = new SimpleOrderedMap<>();
dbg.add("q", rb.getQueryString());
dbg.add("match", match);
rb.addDebugInfo("queryBoosting", dbg);
}
//---------------------------------------------------------------------------------
// Boosted docs helper
//---------------------------------------------------------------------------------
/**
* Resolves a set of boosted docs by uniqueKey to a map of docIds mapped to a priority value > 0.
* @param indexSearcher the SolrIndexSearcher; required
* @param boosted are the set of uniqueKey values to be boosted in priority order. If null; returns null.
* @param context the {@link SolrQueryRequest#getContext()} or null if none. We'll cache our results here.
*/
//TODO consider simplifying to remove "boosted" arg which can be looked up in context via BOOSTED key?
@SuppressWarnings({"unchecked"})
public static IntIntHashMap getBoostDocs(SolrIndexSearcher indexSearcher, Set<BytesRef> boosted,
@SuppressWarnings({"rawtypes"})Map context) throws IOException {
IntIntHashMap boostDocs = null;
if (boosted != null) {
//First see if it's already in the request context. Could have been put there by another caller.
if (context != null) {
boostDocs = (IntIntHashMap) context.get(BOOSTED_DOCIDS);
if (boostDocs != null) {
return boostDocs;
}
}
//Not in the context yet so load it.
boostDocs = new IntIntHashMap(boosted.size()); // docId to boost
int priority = boosted.size() + 1; // the corresponding priority for each boosted key (starts at this; decrements down)
for (BytesRef uniqueKey : boosted) {
priority--; // therefore first == bosted.size(); last will be 1
long segAndId = indexSearcher.lookupId(uniqueKey); // higher 32 bits == segment ID, low 32 bits == doc ID
if (segAndId == -1) { // not found
continue;
}
int seg = (int) (segAndId >> 32);
int localDocId = (int) segAndId;
final IndexReaderContext indexReaderContext = indexSearcher.getTopReaderContext().children().get(seg);
int docId = indexReaderContext.docBaseInParent + localDocId;
boostDocs.put(docId, priority);
}
assert priority == 1; // the last priority (lowest)
}
if (context != null) {
context.put(BOOSTED_DOCIDS, boostDocs);
}
return boostDocs;
}
//---------------------------------------------------------------------------------
// SolrInfoBean
//---------------------------------------------------------------------------------
@Override
public String getDescription() {
return "Query Boosting -- boost particular documents for a given query";
}
//---------------------------------------------------------------------------------
// Overrides
//---------------------------------------------------------------------------------
/**
* Creates the {@link ElevationProvider} to set during configuration loading. The same instance will be used later
* when elevating results for queries.
*
* @param elevationBuilderMap map of all {@link ElevatingQuery} and their corresponding {@link ElevationBuilder}.
* @return The created {@link ElevationProvider}.
*/
protected ElevationProvider createElevationProvider(Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap) {
return new DefaultElevationProvider(new TrieSubsetMatcher.Builder<>(), elevationBuilderMap);
}
//---------------------------------------------------------------------------------
// Query analysis and tokenization
//---------------------------------------------------------------------------------
/**
* Analyzes the provided query string and returns a concatenation of the analyzed tokens.
*/
public String analyzeQuery(String query) {
StringBuilder concatTerms = new StringBuilder();
analyzeQuery(query, concatTerms::append);
return concatTerms.toString();
}
/**
* Analyzes the provided query string, tokenizes the terms, and adds them to the provided {@link Consumer}.
*/
protected void analyzeQuery(String query, Consumer<CharSequence> termsConsumer) {
try (TokenStream tokens = queryAnalyzer.tokenStream("", query)) {
tokens.reset();
CharTermAttribute termAtt = tokens.addAttribute(CharTermAttribute.class);
while (tokens.incrementToken()) {
termsConsumer.accept(termAtt);
}
tokens.end();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
//---------------------------------------------------------------------------------
// Testing
//---------------------------------------------------------------------------------
/**
* Helpful for testing without loading config.xml.
*
* @param reader The {@link org.apache.lucene.index.IndexReader}.
* @param queryString The query for which to elevate some documents. If the query has already been defined an
* elevation, this method overwrites it.
* @param subsetMatch <code>true</code> for query subset match; <code>false</code> for query exact match.
* @param elevatedIds The readable ids of the documents to set as top results for the provided query.
* @param excludedIds The readable ids of the document to exclude from results for the provided query.
*/
@VisibleForTesting
void setTopQueryResults(IndexReader reader, String queryString, boolean subsetMatch,
String[] elevatedIds, String[] excludedIds) {
clearElevationProviderCache();
ElevatingQuery elevatingQuery = new ElevatingQuery(queryString, subsetMatch);
ElevationBuilder elevationBuilder = new ElevationBuilder();
elevationBuilder.addElevatedIds(elevatedIds == null ? Collections.emptyList() : Arrays.asList(elevatedIds));
elevationBuilder.addExcludedIds(excludedIds == null ? Collections.emptyList() : Arrays.asList(excludedIds));
Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap = ImmutableMap.of(elevatingQuery, elevationBuilder);
synchronized (elevationProviderCache) {
elevationProviderCache.computeIfAbsent(reader, k -> createElevationProvider(elevationBuilderMap));
}
}
@VisibleForTesting
void clearElevationProviderCache() {
synchronized (elevationProviderCache) {
elevationProviderCache.clear();
}
}
//---------------------------------------------------------------------------------
// Exception
//---------------------------------------------------------------------------------
private static class InitializationException extends Exception {
private final InitializationExceptionCause exceptionCause;
InitializationException(String message, InitializationExceptionCause exceptionCause) {
super(message);
this.exceptionCause = exceptionCause;
}
}
protected enum InitializationExceptionCause {
/**
* The component parameter {@link #FIELD_TYPE} defines an unknown field type.
*/
UNKNOWN_FIELD_TYPE,
/**
* This component requires the schema to have a uniqueKeyField, which it does not have.
*/
MISSING_UNIQUE_KEY_FIELD,
/**
* Missing component parameter {@link #CONFIG_FILE} - it has to define the path to the elevation configuration file (e.g. elevate.xml).
*/
NO_CONFIG_FILE_DEFINED,
/**
* The elevation configuration file (e.g. elevate.xml) cannot be found, or is defined in both conf/ and data/ directories.
*/
MISSING_CONFIG_FILE,
/**
* The elevation configuration file (e.g. elevate.xml) is empty.
*/
EMPTY_CONFIG_FILE,
/**
* Unclassified exception cause.
*/
OTHER,
}
//---------------------------------------------------------------------------------
// Elevation classes
//---------------------------------------------------------------------------------
/**
* Provides the elevations defined for queries.
*/
protected interface ElevationProvider {
/**
* Gets the elevation associated to the provided query.
* <p>
* By contract and by design, only one elevation may be associated
* to a given query (this can be safely verified by an assertion).
*
* @param queryString The query string (not {@link #analyzeQuery(String) analyzed} yet,
* this {@link ElevationProvider} is in charge of analyzing it).
* @return The elevation associated with the query; or <code>null</code> if none.
*/
Elevation getElevationForQuery(String queryString);
/**
* Gets the number of query elevations in this {@link ElevationProvider}.
*/
@VisibleForTesting
int size();
}
/**
* {@link ElevationProvider} that returns no elevation.
*/
@SuppressWarnings("WeakerAccess")
protected static final ElevationProvider NO_OP_ELEVATION_PROVIDER = new ElevationProvider() {
@Override
public Elevation getElevationForQuery(String queryString) {
return null;
}
@Override
public int size() {
return 0;
}
};
/**
* Provides elevations with either:
* <ul>
* <li><b>subset match</b> - all the elevating terms are matched in the search query, in any order.</li>
* <li><b>exact match</b> - the elevating query matches fully (all terms in same order) the search query.</li>
* </ul>
* The terms are tokenized with the query analyzer.
*/
protected class DefaultElevationProvider implements ElevationProvider {
private final TrieSubsetMatcher<String, Elevation> subsetMatcher;
private final Map<String, Elevation> exactMatchElevationMap;
/**
* @param subsetMatcherBuilder The {@link TrieSubsetMatcher.Builder} to build the {@link TrieSubsetMatcher}.
* @param elevationBuilderMap The map of elevation rules.
*/
protected DefaultElevationProvider(TrieSubsetMatcher.Builder<String, Elevation> subsetMatcherBuilder,
Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap) {
exactMatchElevationMap = new LinkedHashMap<>();
Collection<String> queryTerms = new ArrayList<>();
Consumer<CharSequence> termsConsumer = term -> queryTerms.add(term.toString());
StringBuilder concatTerms = new StringBuilder();
Consumer<CharSequence> concatConsumer = concatTerms::append;
for (Map.Entry<ElevatingQuery, ElevationBuilder> entry : elevationBuilderMap.entrySet()) {
ElevatingQuery elevatingQuery = entry.getKey();
Elevation elevation = entry.getValue().build();
if (elevatingQuery.subsetMatch) {
queryTerms.clear();
analyzeQuery(elevatingQuery.queryString, termsConsumer);
subsetMatcherBuilder.addSubset(queryTerms, elevation);
} else {
concatTerms.setLength(0);
analyzeQuery(elevatingQuery.queryString, concatConsumer);
exactMatchElevationMap.put(concatTerms.toString(), elevation);
}
}
this.subsetMatcher = subsetMatcherBuilder.build();
}
@Override
public Elevation getElevationForQuery(String queryString) {
boolean hasExactMatchElevationRules = exactMatchElevationMap.size() != 0;
if (subsetMatcher.getSubsetCount() == 0) {
if (!hasExactMatchElevationRules) {
return null;
}
return exactMatchElevationMap.get(analyzeQuery(queryString));
}
Collection<String> queryTerms = new ArrayList<>();
Consumer<CharSequence> termsConsumer = term -> queryTerms.add(term.toString());
StringBuilder concatTerms = null;
if (hasExactMatchElevationRules) {
concatTerms = new StringBuilder();
termsConsumer = termsConsumer.andThen(concatTerms::append);
}
analyzeQuery(queryString, termsConsumer);
Elevation mergedElevation = null;
if (hasExactMatchElevationRules) {
mergedElevation = exactMatchElevationMap.get(concatTerms.toString());
}
Iterator<Elevation> elevationIterator = subsetMatcher.findSubsetsMatching(queryTerms);
while (elevationIterator.hasNext()) {
Elevation elevation = elevationIterator.next();
mergedElevation = mergedElevation == null ? elevation : mergedElevation.mergeWith(elevation);
}
return mergedElevation;
}
@Override
public int size() {
return exactMatchElevationMap.size() + subsetMatcher.getSubsetCount();
}
}
/**
* Query triggering elevation.
*/
@SuppressWarnings("WeakerAccess")
protected static class ElevatingQuery {
public final String queryString;
public final boolean subsetMatch;
/**
* @param queryString The query to elevate documents for (not the analyzed form).
* @param subsetMatch Whether to match a subset of query terms.
*/
protected ElevatingQuery(String queryString, boolean subsetMatch) {
this.queryString = queryString;
this.subsetMatch = subsetMatch;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof ElevatingQuery)) {
return false;
}
ElevatingQuery eq = (ElevatingQuery) o;
return queryString.equals(eq.queryString) && subsetMatch == eq.subsetMatch;
}
@Override
public int hashCode() {
return queryString.hashCode() + (subsetMatch ? 1 : 0);
}
}
/**
* Builds an {@link Elevation}. This class is used to start defining query elevations, but allowing the merge of
* multiple elevations for the same query.
*/
@SuppressWarnings("WeakerAccess")
public class ElevationBuilder {
/**
* The ids of the elevated documents that should appear on top of search results; can be <code>null</code>.
* The order is retained.
*/
private LinkedHashSet<BytesRef> elevatedIds;
/**
* The ids of the excluded documents that should not appear in search results; can be <code>null</code>.
*/
private Set<BytesRef> excludedIds;
// for temporary/transient use when adding an elevated or excluded ID
private final BytesRefBuilder scratch = new BytesRefBuilder();
public ElevationBuilder addElevatedIds(List<String> ids) {
if (elevatedIds == null) {
elevatedIds = new LinkedHashSet<>(Math.max(10, ids.size()));
}
for (String id : ids) {
elevatedIds.add(toBytesRef(id));
}
return this;
}
public ElevationBuilder addExcludedIds(Collection<String> ids) {
if (excludedIds == null) {
excludedIds = new HashSet<>(Math.max(10, ids.size()));
}
for (String id : ids) {
excludedIds.add(toBytesRef(id));
}
return this;
}
public BytesRef toBytesRef(String id) {
uniqueKeyField.getType().readableToIndexed(id, scratch);
return scratch.toBytesRef();
}
public ElevationBuilder merge(ElevationBuilder elevationBuilder) {
if (elevatedIds == null) {
elevatedIds = elevationBuilder.elevatedIds;
} else if (elevationBuilder.elevatedIds != null) {
elevatedIds.addAll(elevationBuilder.elevatedIds);
}
if (excludedIds == null) {
excludedIds = elevationBuilder.excludedIds;
} else if (elevationBuilder.excludedIds != null) {
excludedIds.addAll(elevationBuilder.excludedIds);
}
return this;
}
public Elevation build() {
return new Elevation(elevatedIds, excludedIds, uniqueKeyField.getName());
}
}
/**
* Elevation of some documents in search results, with potential exclusion of others.
* Immutable.
*/
protected static class Elevation {
private static final BooleanQuery EMPTY_QUERY = new BooleanQuery.Builder().build();
public final Set<BytesRef> elevatedIds; // in configured order; not null
public final BooleanQuery includeQuery; // not null
public final Set<BytesRef> excludedIds; // not null
//just keep the term query, b/c we will not always explicitly exclude the item based on markExcludes query time param
public final TermQuery[] excludeQueries; //may be null
/**
* Constructs an elevation.
*
* @param elevatedIds The ids of the elevated documents that should appear on top of search results, in configured order;
* can be <code>null</code>.
* @param excludedIds The ids of the excluded documents that should not appear in search results; can be <code>null</code>.
* @param queryFieldName The field name to use to create query terms.
*/
public Elevation(Set<BytesRef> elevatedIds, Set<BytesRef> excludedIds, String queryFieldName) {
if (elevatedIds == null || elevatedIds.isEmpty()) {
includeQuery = EMPTY_QUERY;
this.elevatedIds = Collections.emptySet();
} else {
this.elevatedIds = ImmutableSet.copyOf(elevatedIds);
BooleanQuery.Builder includeQueryBuilder = new BooleanQuery.Builder();
for (BytesRef elevatedId : elevatedIds) {
includeQueryBuilder.add(new TermQuery(new Term(queryFieldName, elevatedId)), BooleanClause.Occur.SHOULD);
}
includeQuery = includeQueryBuilder.build();
}
if (excludedIds == null || excludedIds.isEmpty()) {
this.excludedIds = Collections.emptySet();
excludeQueries = null;
} else {
this.excludedIds = ImmutableSet.copyOf(excludedIds);
List<TermQuery> excludeQueriesBuilder = new ArrayList<>(excludedIds.size());
for (BytesRef excludedId : excludedIds) {
excludeQueriesBuilder.add(new TermQuery(new Term(queryFieldName, excludedId)));
}
excludeQueries = excludeQueriesBuilder.toArray(new TermQuery[0]);
}
}
protected Elevation(Set<BytesRef> elevatedIds, BooleanQuery includeQuery, Set<BytesRef> excludedIds, TermQuery[] excludeQueries) {
this.elevatedIds = elevatedIds;
this.includeQuery = includeQuery;
this.excludedIds = excludedIds;
this.excludeQueries = excludeQueries;
}
/**
* Merges this {@link Elevation} with another and creates a new {@link Elevation}.
* @return A new instance containing the merging of the two elevations; or directly this elevation if the other
* is <code>null</code>.
*/
protected Elevation mergeWith(Elevation elevation) {
if (elevation == null) {
return this;
}
Set<BytesRef> elevatedIds = ImmutableSet.<BytesRef>builder().addAll(this.elevatedIds).addAll(elevation.elevatedIds).build();
boolean overlappingElevatedIds = elevatedIds.size() != (this.elevatedIds.size() + elevation.elevatedIds.size());
BooleanQuery.Builder includeQueryBuilder = new BooleanQuery.Builder();
Set<BooleanClause> clauseSet = (overlappingElevatedIds ? Sets.newHashSetWithExpectedSize(elevatedIds.size()) : null);
for (BooleanClause clause : this.includeQuery.clauses()) {
if (!overlappingElevatedIds || clauseSet.add(clause)) {
includeQueryBuilder.add(clause);
}
}
for (BooleanClause clause : elevation.includeQuery.clauses()) {
if (!overlappingElevatedIds || clauseSet.add(clause)) {
includeQueryBuilder.add(clause);
}
}
Set<BytesRef> excludedIds = ImmutableSet.<BytesRef>builder().addAll(this.excludedIds).addAll(elevation.excludedIds).build();
TermQuery[] excludeQueries;
if (this.excludeQueries == null) {
excludeQueries = elevation.excludeQueries;
} else if (elevation.excludeQueries == null) {
excludeQueries = this.excludeQueries;
} else {
boolean overlappingExcludedIds = excludedIds.size() != (this.excludedIds.size() + elevation.excludedIds.size());
if (overlappingExcludedIds) {
excludeQueries = ImmutableSet.<TermQuery>builder().add(this.excludeQueries).add(elevation.excludeQueries)
.build().toArray(new TermQuery[0]);
} else {
excludeQueries = ObjectArrays.concat(this.excludeQueries, elevation.excludeQueries, TermQuery.class);
}
}
return new Elevation(elevatedIds, includeQueryBuilder.build(), excludedIds, excludeQueries);
}
@Override
public String toString() {
return "{elevatedIds=" + Collections2.transform(elevatedIds, BytesRef::utf8ToString) +
", excludedIds=" + Collections2.transform(excludedIds, BytesRef::utf8ToString) + "}";
}
}
/** Elevates certain docs to the top. */
private class ElevationComparatorSource extends FieldComparatorSource {
private final IntIntHashMap elevatedWithPriority;
private final boolean useConfiguredElevatedOrder;
private final int[] sortedElevatedDocIds;
private ElevationComparatorSource(IntIntHashMap elevatedWithPriority, boolean useConfiguredElevatedOrder) {
this.elevatedWithPriority = elevatedWithPriority;
this.useConfiguredElevatedOrder = useConfiguredElevatedOrder;
// copy elevatedWithPriority keys (doc IDs) into sortedElevatedDocIds, sorted
sortedElevatedDocIds = new int[elevatedWithPriority.size()];
final Iterator<IntIntCursor> iterator = elevatedWithPriority.iterator();
for (int i = 0; i < sortedElevatedDocIds.length; i++) {
IntIntCursor next = iterator.next();
sortedElevatedDocIds[i] = next.key;
}
assert iterator.hasNext() == false;
Arrays.sort(sortedElevatedDocIds);
}
@Override
public FieldComparator<Integer> newComparator(String fieldName, final int numHits, int sortPos, boolean reversed) {
return new SimpleFieldComparator<>() {
final int[] values = new int[numHits];
int bottomVal;
int topVal;
int docBase;
boolean hasElevatedDocsThisSegment;
@Override
protected void doSetNextReader(LeafReaderContext context) {
docBase = context.docBase;
// ascertain if hasElevatedDocsThisSegment
final int idx = Arrays.binarySearch(sortedElevatedDocIds, docBase);
if (idx < 0) {
//first doc in segment isn't elevated (typical). Maybe another is?
int nextIdx = -idx - 1;
if (nextIdx < sortedElevatedDocIds.length) {
int nextElevatedDocId = sortedElevatedDocIds[nextIdx];
if (nextElevatedDocId > docBase + context.reader().maxDoc()) {
hasElevatedDocsThisSegment = false;
return;
}
}
}
hasElevatedDocsThisSegment = true;
}
@Override
public int compare(int slot1, int slot2) {
return values[slot1] - values[slot2]; // values will be small enough that there is no overflow concern
}
@Override
public void setBottom(int slot) {
bottomVal = values[slot];
}
@Override
public void setTopValue(Integer value) {
topVal = value;
}
private int docVal(int doc) {
if (!hasElevatedDocsThisSegment) {
assert elevatedWithPriority.containsKey(docBase + doc) == false;
return -1;
} else if (useConfiguredElevatedOrder) {
return elevatedWithPriority.getOrDefault(docBase + doc, -1);
} else {
return elevatedWithPriority.containsKey(docBase + doc) ? 1 : -1;
}
}
@Override
public int compareBottom(int doc) {
return bottomVal - docVal(doc);
}
@Override
public void copy(int slot, int doc) {
values[slot] = docVal(doc);
}
@Override
public Integer value(int slot) {
return values[slot];
}
@Override
public int compareTop(int doc) {
final int docValue = docVal(doc);
return topVal - docValue; // values will be small enough that there is no overflow concern
}
};
}
}
/**
* Matches a potentially large collection of subsets with a trie implementation.
* <p>
* Given a collection of subsets <code>N</code>, finds all the subsets that are contained (ignoring duplicate elements)
* by a provided set <code>s</code>.
* That is, finds all subsets <code>n</code> in <code>N</code> for which <code>s.containsAll(n)</code>
* (<code>s</code> contains all the elements of <code>n</code>, in any order).
* <p>
* Associates a match value of type <M> to each subset and provides it each time the subset matches (i.e. is
* contained by the provided set).
* <p>
* This matcher imposes the elements are {@link Comparable}.
* It does not keep the subset insertion order.
* Duplicate subsets stack their match values.
* <p>
* The time complexity of adding a subset is <code>O(n.log(n))</code>, where <code>n</code> is the size of the subset.
* <p>
* The worst case time complexity of the subset matching is <code>O(2^s)</code>, however a more typical case time
* complexity is <code>O(s^3)</code> where s is the size of the set to partially match.
* Note it does not depend on <code>N</code>, the size of the collection of subsets, nor on <code>n</code>, the size of
* a subset.
*
* @param <E> Subset element type.
* @param <M> Subset match value type.
*/
protected static class TrieSubsetMatcher<E extends Comparable<? super E>, M> {
/*
Trie structure:
---------------
- A subset element on each edge.
- Each node may contain zero or more match values.
Sample construction:
--------------------
- given the subsets "B A C", "A B", "A B A", "B", "D B".
- remove duplicates and sort each subset => "A B C", "A B", "A B", "B", "B D".
- N() means a node with no match value.
- N(x, y) means a node with 2 match values x and y.
root
--A--> N()
--B--> N("A B", "A B A")
--C--> N("B A C")
--B--> N("B")
--D--> N("D B")
Subset matching algorithm:
--------------------------
- given a set s
In the above sample, with s="A B C B", then the matching subsets are "B A C", "A B", "A B A", "B"
remove duplicates in s
sort s
keep a queue Q of current nodes
Add root node to Q
Another queue Q' will hold the child nodes (initially empty)
for each element e in s {
for each current node in Q {
if current node has a child for edge e {
add the child to Q'
record the child match values
}
if e is greater than or equal to current node greatest edge {
remove current node from Q (as we are sure this current node children cannot match anymore)
}
}
Move all child nodes from Q' to Q
}
Time complexity:
----------------
s = size of the set to partially match
N = size of the collection of subsets
n = size of a subset
The time complexity depends on the number of current nodes in Q.
The worst case time complexity:
For a given set s:
- initially Q contains only 1 current node, the root
=> 1 node
- for first element e1 in s, at most 1 node is added to Q
=> 2 nodes
- for element e2 in s, at most 2 new nodes are added to Q
=> 4 nodes
- for element e3 in s, at most 4 new nodes are added to Q
=> 8 nodes
- for element ek in s, at most 2^(k-1) new nodes are added to Q
=> 2^k nodes
- however there are, in worst case, a maximum of N.n nodes
Sum[k=0 to s](2^k) = 2^(s+1)-1
So the worst case time complexity is: min(O(2^s), O(s.N.n))
A more typical case time complexity:
For a given set s:
- initially Q contains only 1 current node, the root
=> 1 node
- for first element e1 in s, 1 node is added to Q
=> 2 nodes
- for element e2 in s, 2 new nodes are added to Q
=> 4 nodes
- for element e3 in s, 3 new nodes are added to Q
=> 7 nodes
- for element ek in s, k new nodes are added to Q
=> previous nodes + k : q(k) = q(k-1) + k
Solution is q(k) = 1/2 (k^2+k+2)
Sum[k=0 to s](k^2+k+2)/2 = 1/6 (s+1) (s^2+2s+6)
So a more typical case time complexity is: min(O(s^3), O(s.N.n))
*/
public static class Builder<E extends Comparable<? super E>, M> {
private final TrieSubsetMatcher.Node<E, M> root = new TrieSubsetMatcher.Node<>();
private int subsetCount;
/**
* Adds a subset. If the subset is already registered, the new match value is added to the previous one(s).
*
* @param subset The subset of {@link Comparable} elements; it is copied. It is ignored if its size is <code>0</code>.
* Any subset added is guaranteed to be returned by {@link TrieSubsetMatcher#findSubsetsMatching}
* if it matches (i.e. is contained), even if two or more subsets are equal, or equal when ignoring
* duplicate elements.
* @param matchValue The match value provided each time the subset matches.
* @return This builder.
*/
public Builder<E, M> addSubset(Collection<E> subset, M matchValue) {
if (!subset.isEmpty()) {
TrieSubsetMatcher.Node<E, M> node = root;
for (E e : ImmutableSortedSet.copyOf(subset)) {
node = node.getOrCreateChild(e);
}
node.addMatchValue(matchValue);
subsetCount++;
}
return this;
}
public TrieSubsetMatcher<E, M> build() {
root.trimAndMakeImmutable();
return new TrieSubsetMatcher<>(root, subsetCount);
}
}
private final Node<E, M> root;
private final int subsetCount;
private TrieSubsetMatcher(Node<E, M> root, int subsetCount) {
this.root = root;
this.subsetCount = subsetCount;
}
/**
* Gets the number of subsets in this matcher.
*/
public int getSubsetCount() {
return subsetCount;
}
/**
* Returns an iterator over all the subsets that are contained by the provided set.
* The returned iterator does not support removal.
*
* @param set This set is copied to a new {@link ImmutableSortedSet} with natural ordering.
*/
public Iterator<M> findSubsetsMatching(Collection<E> set) {
return new MatchIterator(ImmutableSortedSet.copyOf(set));
}
/**
* Trie node.
*/
private static class Node<E extends Comparable<? super E>, M> {
private Map<E, Node<E, M>> children;
private E greatestEdge;
private List<M> matchValues;
/**
* Gets the child node for the provided element; or <code>null</code> if none.
*/
Node<E, M> getChild(E e) {
return (children == null ? null : children.get(e));
}
/**
* Gets the child node for the provided element, or creates it if it does not exist.
*/
Node<E, M> getOrCreateChild(E e) {
if (children == null) {
children = new HashMap<>(4);
}
Node<E, M> child = children.get(e);
if (child == null) {
child = new Node<>();
children.put(e, child);
if (greatestEdge == null || e.compareTo(greatestEdge) > 0) {
greatestEdge = e;
}
}
return child;
}
/**
* Indicates whether this node has more children for edges greater than the given element.
*
* @return <code>true</code> if this node has more children for edges greater than the given element;
* <code>false</code> otherwise.
*/
boolean hasMorePotentialChildren(E e) {
return greatestEdge != null && e.compareTo(greatestEdge) < 0;
}
/**
* Decorates this node with an additional match value.
*/
void addMatchValue(M matchValue) {
if (matchValues == null) {
matchValues = new ArrayList<>(1);
}
matchValues.add(matchValue);
}
/**
* Gets the match values decorating this node.
*/
List<M> getMatchValues() {
return (matchValues == null ? Collections.emptyList() : matchValues);
}
/**
* Trims and makes this node, as well as all descendant nodes, immutable.
* This may reduce its memory usage and make it more efficient.
*/
void trimAndMakeImmutable() {
if (children != null && !(children instanceof ImmutableMap)) {
for (Node<E, M> child : children.values())
child.trimAndMakeImmutable();
children = ImmutableMap.copyOf(children);
}
if (matchValues != null && !(matchValues instanceof ImmutableList)) {
matchValues = ImmutableList.copyOf(matchValues);
}
}
}
private class MatchIterator implements Iterator<M> {
private final Iterator<E> sortedSetIterator;
private final Queue<TrieSubsetMatcher.Node<E, M>> currentNodes;
private final Queue<M> nextMatchValues;
MatchIterator(SortedSet<E> set) {
sortedSetIterator = set.iterator();
currentNodes = new ArrayDeque<>();
currentNodes.offer(root);
nextMatchValues = new ArrayDeque<>();
}
@Override
public boolean hasNext() {
return !nextMatchValues.isEmpty() || nextSubsetMatch();
}
@Override
public M next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
assert !nextMatchValues.isEmpty();
return nextMatchValues.poll();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private boolean nextSubsetMatch() {
while (sortedSetIterator.hasNext()) {
E e = sortedSetIterator.next();
int currentNodeCount = currentNodes.size();
for (int i = 0; i < currentNodeCount; i++) {
TrieSubsetMatcher.Node<E, M> currentNode = currentNodes.remove();
TrieSubsetMatcher.Node<E, M> child = currentNode.getChild(e);
if (child != null) {
currentNodes.offer(child);
nextMatchValues.addAll(child.getMatchValues());
}
if (currentNode.hasMorePotentialChildren(e)) {
currentNodes.offer(currentNode);
}
}
if (!nextMatchValues.isEmpty()) {
return true;
}
}
return false;
}
}
}
} | 1 | 38,868 | We should not use a static import to be consistent with other existing use of the QueryElevationParams. | apache-lucene-solr | java |
@@ -277,7 +277,7 @@ void *ap6_thread(void *thread_context)
uint32_t num_matches = 0;
struct bitstream_info null_gbs_info ;
- memset(&null_gbs_info, 0, sizeof(null_gbs_info));
+ memset_s(&null_gbs_info, sizeof(null_gbs_info), 0);
ON_GOTO(c->config->num_null_gbs == 0, out_exit, "no NULL bitstreams registered.");
| 1 | // Copyright(c) 2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/*
* ap6.c : handles NULL bitstream programming on AP6
*/
#include <opae/fpga.h>
#include <unistd.h>
#include <errno.h>
#include <stdlib.h>
#include "ap6.h"
#include "config_int.h"
#include "log.h"
#include "bitstream_int.h"
#include "safe_string/safe_string.h"
#include "bitstream-tools.h"
/*
* macro to check FPGA return codes, print error message, and goto cleanup label
* NOTE: this changes the program flow (uses goto)!
*/
#define ON_GOTO(cond, label, desc, ...) \
do { \
if (cond) { \
dlog("ap6[%i]: " desc "\n", c->socket, ## __VA_ARGS__); \
goto label; \
} \
} while (0)
sem_t ap6_sem[MAX_SOCKETS];
struct bitstream_info {
const char *filename;
uint8_t *data;
size_t data_len;
uint8_t *rbf_data;
size_t rbf_len;
fpga_guid interface_id;
};
/*
* Check for bitstream header and fill out bistream_info fields
*/
#define MAGIC 0x1d1f8680
#define MAGIC_SIZE 4
#define HEADER_SIZE 20
int parse_metadata(struct bitstream_info *info)
{
unsigned i;
if (!info)
return -EINVAL;
if (info->data_len < HEADER_SIZE) {
fprintf(stderr, "File too small to be GBS\n");
return -1;
}
if (((uint32_t *)info->data)[0] != MAGIC) {
fprintf(stderr, "No valid GBS header\n");
return -1;
}
/* reverse byte order when reading GBS */
for (i = 0; i < sizeof(info->interface_id); i++)
info->interface_id[i] =
info->data[MAGIC_SIZE+sizeof(info->interface_id)-1-i];
info->rbf_data = &info->data[HEADER_SIZE];
info->rbf_len = info->data_len - HEADER_SIZE;
return 0;
}
/*
* Read inferface id from bistream
*/
static fpga_result get_bitstream_ifc_id(const uint8_t *bitstream, fpga_guid *guid)
{
fpga_result result = FPGA_EXCEPTION;
char *json_metadata = NULL;
uint32_t json_len = 0;
const uint8_t *json_metadata_ptr = NULL;
json_object *root = NULL;
json_object *afu_image = NULL;
json_object *interface_id = NULL;
errno_t e;
if (check_bitstream_guid(bitstream) != FPGA_OK)
goto out_free;
json_len = read_int_from_bitstream(bitstream + METADATA_GUID_LEN, sizeof(uint32_t));
if (json_len == 0) {
PRINT_MSG("Bitstream has no metadata");
result = FPGA_OK;
goto out_free;
}
json_metadata_ptr = bitstream + METADATA_GUID_LEN + sizeof(uint32_t);
json_metadata = (char *) malloc(json_len + 1);
if (json_metadata == NULL) {
PRINT_ERR("Could not allocate memory for metadata!");
return FPGA_NO_MEMORY;
}
e = memcpy_s(json_metadata, json_len+1,
json_metadata_ptr, json_len);
if (EOK != e) {
PRINT_ERR("memcpy_s failed");
result = FPGA_EXCEPTION;
goto out_free;
}
json_metadata[json_len] = '\0';
root = json_tokener_parse(json_metadata);
if (root != NULL) {
if (json_object_object_get_ex(root, GBS_AFU_IMAGE, &afu_image)) {
json_object_object_get_ex(afu_image, BBS_INTERFACE_ID, &interface_id);
if (interface_id == NULL) {
PRINT_ERR("Invalid metadata");
result = FPGA_INVALID_PARAM;
goto out_free;
}
result = string_to_guid(json_object_get_string(interface_id), guid);
if (result != FPGA_OK) {
PRINT_ERR("Invalid BBS interface id ");
goto out_free;
}
} else {
PRINT_ERR("Invalid metadata");
result = FPGA_INVALID_PARAM;
goto out_free;
}
}
out_free:
if (root)
json_object_put(root);
if (json_metadata)
free(json_metadata);
return result;
}
/*
* Read bitstream from file and populate bitstream_info structure
*/
//TODO: remove this check when all bitstreams conform to JSON
//metadata spec.
static bool skip_header_checks;
int read_bitstream(const char *filename, struct bitstream_info *info)
{
FILE *f;
long len;
int ret;
if (!filename || !info)
return -EINVAL;
info->filename = filename;
/* open file */
f = fopen(filename, "rb");
if (!f) {
perror(filename);
return -1;
}
/* get filesize */
ret = fseek(f, 0, SEEK_END);
if (ret < 0) {
perror(filename);
goto out_close;
}
len = ftell(f);
if (len < 0) {
perror(filename);
goto out_close;
}
/* allocate memory */
info->data = (uint8_t *)malloc(len);
if (!info->data) {
perror("malloc");
goto out_close;
}
/* read bistream data */
ret = fseek(f, 0, SEEK_SET);
if (ret < 0) {
perror(filename);
goto out_free;
}
info->data_len = fread(info->data, 1, len, f);
if (ferror(f)) {
perror(filename);
goto out_free;
}
if (info->data_len != (size_t)len) {
fprintf(stderr,
"Filesize and number of bytes read don't match\n");
goto out_free;
}
if (check_bitstream_guid(info->data) == FPGA_OK) {
skip_header_checks = true;
printf(" skip_header_checks = true;\n");
if (get_bitstream_ifc_id(info->data, &(info->interface_id))
!= FPGA_OK) {
fprintf(stderr, "Invalid metadata in the bitstream\n");
goto out_free;
}
}
if (!skip_header_checks) {
/* populate remaining bitstream_info fields */
ret = parse_metadata(info);
if (ret < 0)
goto out_free;
}
fclose(f);
return 0;
out_free:
if (info->data)
free((void *)info->data);
info->data = NULL;
out_close:
fclose(f);
return -1;
}
void *ap6_thread(void *thread_context)
{
struct ap6_context *c = (struct ap6_context *)thread_context;
unsigned i;
int ret;
struct timespec ts = { .tv_sec = 0, .tv_nsec = 100000 }; /* 100ms */
fpga_token fme_token;
fpga_handle fme_handle;
fpga_properties filter;
fpga_result res;
uint32_t num_matches = 0;
struct bitstream_info null_gbs_info ;
memset(&null_gbs_info, 0, sizeof(null_gbs_info));
ON_GOTO(c->config->num_null_gbs == 0, out_exit, "no NULL bitstreams registered.");
res = fpgaGetProperties(NULL, &filter);
ON_GOTO(res != FPGA_OK, out_exit, "enumeration failed");
for (i = 0; i < c->config->num_null_gbs; i++) {
ret = read_bitstream(c->config->null_gbs[i], &null_gbs_info);
if (ret < 0) {
dlog("ap6[%i]: \tfailed to read bitstream\n", c->socket);
if (null_gbs_info.data)
free((void *)null_gbs_info.data);
null_gbs_info.data = NULL;
continue;
}
res = fpgaClearProperties(filter);
ON_GOTO(res != FPGA_OK, out_destroy_filter, "enumeration failed");
res = fpgaPropertiesSetObjectType(filter, FPGA_DEVICE);
res += fpgaPropertiesSetSocketID(filter, c->socket);
res += fpgaPropertiesSetGUID(filter, null_gbs_info.interface_id);
ON_GOTO(res != FPGA_OK, out_destroy_filter, "enumeration failed");
res = fpgaEnumerate(&filter, 1, &fme_token, 1, &num_matches);
ON_GOTO(res != FPGA_OK, out_destroy_filter, "enumeration failed");
if (num_matches > 0)
break;
}
res = fpgaDestroyProperties(&filter);
ON_GOTO(res != FPGA_OK, out_exit, "enumeration failed");
/* if we didn't find a matching FPGA, bail out */
if (i == c->config->num_null_gbs)
goto out_exit;
/* now, fme_token holds the token for an FPGA on our socket matching the
* interface ID of the NULL GBS */
dlog("ap6[%i]: waiting for AP6, will write the following bitstream: \"%s\"\n", c->socket, c->config->null_gbs[i]);
while (c->config->running) {
/* wait for event */
ret = sem_timedwait(&ap6_sem[c->socket], &ts);
/* if AP6 */
if (ret == 0) {
/* program NULL bitstream */
dlog("ap6[%i]: writing NULL bitstreams.\n", c->socket);
res = fpgaOpen(fme_token, &fme_handle, 0);
if (res != FPGA_OK) {
dlog("ap6[%i]: failed to open FPGA.\n", c->socket);
/* TODO: retry? */
continue;
}
res = fpgaReconfigureSlot(fme_handle, 0, null_gbs_info.data, null_gbs_info.data_len, 0);
if (res != FPGA_OK) {
dlog("ap6[%i]: failed to write bitstream.\n", c->socket);
/* TODO: retry? */
}
res = fpgaClose(fme_handle);
if (res != FPGA_OK) {
dlog("ap6[%i]: failed to close FPGA.\n", c->socket);
}
}
}
out_exit:
if (null_gbs_info.data)
free(null_gbs_info.data);
return NULL;
out_destroy_filter:
fpgaDestroyProperties(&filter);
goto out_exit;
}
| 1 | 15,941 | No need to check return value? | OPAE-opae-sdk | c |
@@ -42,6 +42,11 @@ from typing import (Any, Callable, IO, Iterator, Optional, Sequence, Tuple, Type
from PyQt5.QtCore import QUrl, QVersionNumber
from PyQt5.QtGui import QClipboard, QDesktopServices
from PyQt5.QtWidgets import QApplication
+# We cannot use the stdlib version on 3.7-3.8 because we need the files() API.
+if sys.version_info >= (3, 9):
+ import importlib.resources as importlib_resources
+else:
+ import importlib_resources
import pkg_resources
import yaml
try: | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Other utilities which don't fit anywhere else."""
import os
import os.path
import io
import re
import sys
import enum
import json
import datetime
import traceback
import functools
import contextlib
import posixpath
import shlex
import glob
import mimetypes
import ctypes
import ctypes.util
from typing import (Any, Callable, IO, Iterator, Optional, Sequence, Tuple, Type, Union,
TYPE_CHECKING, cast)
from PyQt5.QtCore import QUrl, QVersionNumber
from PyQt5.QtGui import QClipboard, QDesktopServices
from PyQt5.QtWidgets import QApplication
import pkg_resources
import yaml
try:
from yaml import (CSafeLoader as YamlLoader,
CSafeDumper as YamlDumper)
YAML_C_EXT = True
except ImportError: # pragma: no cover
from yaml import (SafeLoader as YamlLoader, # type: ignore[misc]
SafeDumper as YamlDumper)
YAML_C_EXT = False
import qutebrowser
from qutebrowser.utils import log
fake_clipboard = None
log_clipboard = False
_resource_cache = {}
is_mac = sys.platform.startswith('darwin')
is_linux = sys.platform.startswith('linux')
is_windows = sys.platform.startswith('win')
is_posix = os.name == 'posix'
try:
# Protocol was added in Python 3.8
from typing import Protocol
except ImportError: # pragma: no cover
if not TYPE_CHECKING:
class Protocol:
"""Empty stub at runtime."""
class SupportsLessThan(Protocol):
"""Protocol for a "comparable" object."""
def __lt__(self, other: Any) -> bool:
...
if TYPE_CHECKING:
class VersionNumber(SupportsLessThan, QVersionNumber):
"""WORKAROUND for incorrect PyQt stubs."""
else:
class VersionNumber:
"""We can't inherit from Protocol and QVersionNumber at runtime."""
class Unreachable(Exception):
"""Raised when there was unreachable code."""
class ClipboardError(Exception):
"""Raised if the clipboard contents are unavailable for some reason."""
class SelectionUnsupportedError(ClipboardError):
"""Raised if [gs]et_clipboard is used and selection=True is unsupported."""
def __init__(self) -> None:
super().__init__("Primary selection is not supported on this "
"platform!")
class ClipboardEmptyError(ClipboardError):
"""Raised if get_clipboard is used and the clipboard is empty."""
def elide(text: str, length: int) -> str:
"""Elide text so it uses a maximum of length chars."""
if length < 1:
raise ValueError("length must be >= 1!")
if len(text) <= length:
return text
else:
return text[:length - 1] + '\u2026'
def elide_filename(filename: str, length: int) -> str:
"""Elide a filename to the given length.
The difference to the elide() is that the text is removed from
the middle instead of from the end. This preserves file name extensions.
Additionally, standard ASCII dots are used ("...") instead of the unicode
"…" (U+2026) so it works regardless of the filesystem encoding.
This function does not handle path separators.
Args:
filename: The filename to elide.
length: The maximum length of the filename, must be at least 3.
Return:
The elided filename.
"""
elidestr = '...'
if length < len(elidestr):
raise ValueError('length must be greater or equal to 3')
if len(filename) <= length:
return filename
# Account for '...'
length -= len(elidestr)
left = length // 2
right = length - left
if right == 0:
return filename[:left] + elidestr
else:
return filename[:left] + elidestr + filename[-right:]
def compact_text(text: str, elidelength: int = None) -> str:
"""Remove leading whitespace and newlines from a text and maybe elide it.
Args:
text: The text to compact.
elidelength: To how many chars to elide.
"""
lines = []
for line in text.splitlines():
lines.append(line.strip())
out = ''.join(lines)
if elidelength is not None:
out = elide(out, elidelength)
return out
def preload_resources() -> None:
"""Load resource files into the cache."""
for subdir, pattern in [('html', '*.html'), ('javascript', '*.js')]:
path = resource_filename(subdir)
for full_path in glob.glob(os.path.join(path, pattern)):
sub_path = '/'.join([subdir, os.path.basename(full_path)])
_resource_cache[sub_path] = read_file(sub_path)
# FIXME:typing Return value should be bytes/str
def read_file(filename: str, binary: bool = False) -> Any:
"""Get the contents of a file contained with qutebrowser.
Args:
filename: The filename to open as string.
binary: Whether to return a binary string.
If False, the data is UTF-8-decoded.
Return:
The file contents as string.
"""
assert not posixpath.isabs(filename), filename
assert os.path.pardir not in filename.split(posixpath.sep), filename
if not binary and filename in _resource_cache:
return _resource_cache[filename]
if hasattr(sys, 'frozen'):
# PyInstaller doesn't support pkg_resources :(
# https://github.com/pyinstaller/pyinstaller/wiki/FAQ#misc
fn = os.path.join(os.path.dirname(sys.executable), filename)
if binary:
f: IO
with open(fn, 'rb') as f:
return f.read()
else:
with open(fn, 'r', encoding='utf-8') as f:
return f.read()
else:
data = pkg_resources.resource_string(
qutebrowser.__name__, filename)
if binary:
return data
return data.decode('UTF-8')
def resource_filename(filename: str) -> str:
"""Get the absolute filename of a file contained with qutebrowser.
Args:
filename: The filename.
Return:
The absolute filename.
"""
if hasattr(sys, 'frozen'):
return os.path.join(os.path.dirname(sys.executable), filename)
return pkg_resources.resource_filename(qutebrowser.__name__, filename)
def parse_version(version: str) -> VersionNumber:
"""Parse a version string."""
v_q, _suffix = QVersionNumber.fromString(version)
return cast(VersionNumber, v_q.normalized())
def format_seconds(total_seconds: int) -> str:
"""Format a count of seconds to get a [H:]M:SS string."""
prefix = '-' if total_seconds < 0 else ''
hours, rem = divmod(abs(round(total_seconds)), 3600)
minutes, seconds = divmod(rem, 60)
chunks = []
if hours:
chunks.append(str(hours))
min_format = '{:02}'
else:
min_format = '{}'
chunks.append(min_format.format(minutes))
chunks.append('{:02}'.format(seconds))
return prefix + ':'.join(chunks)
def format_size(size: Optional[float], base: int = 1024, suffix: str = '') -> str:
"""Format a byte size so it's human readable.
Inspired by http://stackoverflow.com/q/1094841
"""
prefixes = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
if size is None:
return '?.??' + suffix
for p in prefixes:
if -base < size < base:
return '{:.02f}{}{}'.format(size, p, suffix)
size /= base
return '{:.02f}{}{}'.format(size, prefixes[-1], suffix)
class FakeIOStream(io.TextIOBase):
"""A fake file-like stream which calls a function for write-calls."""
def __init__(self, write_func: Callable[[str], int]) -> None:
super().__init__()
self.write = write_func # type: ignore[assignment]
@contextlib.contextmanager
def fake_io(write_func: Callable[[str], int]) -> Iterator[None]:
"""Run code with stdout and stderr replaced by FakeIOStreams.
Args:
write_func: The function to call when write is called.
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
fake_stderr = FakeIOStream(write_func)
fake_stdout = FakeIOStream(write_func)
sys.stderr = fake_stderr # type: ignore[assignment]
sys.stdout = fake_stdout # type: ignore[assignment]
try:
yield
finally:
# If the code we did run did change sys.stdout/sys.stderr, we leave it
# unchanged. Otherwise, we reset it.
if sys.stdout is fake_stdout: # type: ignore[comparison-overlap]
sys.stdout = old_stdout
if sys.stderr is fake_stderr: # type: ignore[comparison-overlap]
sys.stderr = old_stderr
@contextlib.contextmanager
def disabled_excepthook() -> Iterator[None]:
"""Run code with the exception hook temporarily disabled."""
old_excepthook = sys.excepthook
sys.excepthook = sys.__excepthook__
try:
yield
finally:
# If the code we did run did change sys.excepthook, we leave it
# unchanged. Otherwise, we reset it.
if sys.excepthook is sys.__excepthook__:
sys.excepthook = old_excepthook
class prevent_exceptions: # noqa: N801,N806 pylint: disable=invalid-name
"""Decorator to ignore and log exceptions.
This needs to be used for some places where PyQt segfaults on exceptions or
silently ignores them.
We used to re-raise the exception with a single-shot QTimer in a similar
case, but that lead to a strange problem with a KeyError with some random
jinja template stuff as content. For now, we only log it, so it doesn't
pass 100% silently.
This could also be a function, but as a class (with a "wrong" name) it's
much cleaner to implement.
Attributes:
_retval: The value to return in case of an exception.
_predicate: The condition which needs to be True to prevent exceptions
"""
def __init__(self, retval: Any, predicate: bool = True) -> None:
"""Save decorator arguments.
Gets called on parse-time with the decorator arguments.
Args:
See class attributes.
"""
self._retval = retval
self._predicate = predicate
def __call__(self, func: Callable) -> Callable:
"""Called when a function should be decorated.
Args:
func: The function to be decorated.
Return:
The decorated function.
"""
if not self._predicate:
return func
retval = self._retval
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
"""Call the original function."""
try:
return func(*args, **kwargs)
except BaseException:
log.misc.exception("Error in {}".format(qualname(func)))
return retval
return wrapper
def is_enum(obj: Any) -> bool:
"""Check if a given object is an enum."""
try:
return issubclass(obj, enum.Enum)
except TypeError:
return False
def get_repr(obj: Any, constructor: bool = False, **attrs: Any) -> str:
"""Get a suitable __repr__ string for an object.
Args:
obj: The object to get a repr for.
constructor: If True, show the Foo(one=1, two=2) form instead of
<Foo one=1 two=2>.
attrs: The attributes to add.
"""
cls = qualname(obj.__class__)
parts = []
items = sorted(attrs.items())
for name, val in items:
parts.append('{}={!r}'.format(name, val))
if constructor:
return '{}({})'.format(cls, ', '.join(parts))
else:
if parts:
return '<{} {}>'.format(cls, ' '.join(parts))
else:
return '<{}>'.format(cls)
def qualname(obj: Any) -> str:
"""Get the fully qualified name of an object.
Based on twisted.python.reflect.fullyQualifiedName.
Should work with:
- functools.partial objects
- functions
- classes
- methods
- modules
"""
if isinstance(obj, functools.partial):
obj = obj.func
if hasattr(obj, '__module__'):
prefix = '{}.'.format(obj.__module__)
else:
prefix = ''
if hasattr(obj, '__qualname__'):
return '{}{}'.format(prefix, obj.__qualname__)
elif hasattr(obj, '__name__'):
return '{}{}'.format(prefix, obj.__name__)
else:
return repr(obj)
_ExceptionType = Union[Type[BaseException], Tuple[Type[BaseException]]]
def raises(exc: _ExceptionType, func: Callable, *args: Any) -> bool:
"""Check if a function raises a given exception.
Args:
exc: A single exception or an iterable of exceptions.
func: A function to call.
*args: The arguments to pass to the function.
Returns:
True if the exception was raised, False otherwise.
"""
try:
func(*args)
except exc:
return True
else:
return False
def force_encoding(text: str, encoding: str) -> str:
"""Make sure a given text is encodable with the given encoding.
This replaces all chars not encodable with question marks.
"""
return text.encode(encoding, errors='replace').decode(encoding)
def sanitize_filename(name: str,
replacement: Optional[str] = '_',
shorten: bool = False) -> str:
"""Replace invalid filename characters.
Note: This should be used for the basename, as it also removes the path
separator.
Args:
name: The filename.
replacement: The replacement character (or None).
shorten: Shorten the filename if it's too long for the filesystem.
"""
if replacement is None:
replacement = ''
# Remove chars which can't be encoded in the filename encoding.
# See https://github.com/qutebrowser/qutebrowser/issues/427
encoding = sys.getfilesystemencoding()
name = force_encoding(name, encoding)
# See also
# https://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
if is_windows:
bad_chars = '\\/:*?"<>|'
elif is_mac:
# Colons can be confusing in finder https://superuser.com/a/326627
bad_chars = '/:'
else:
bad_chars = '/'
for bad_char in bad_chars:
name = name.replace(bad_char, replacement)
if not shorten:
return name
# Truncate the filename if it's too long.
# Most filesystems have a maximum filename length of 255 bytes:
# https://en.wikipedia.org/wiki/Comparison_of_file_systems#Limits
# We also want to keep some space for QtWebEngine's ".download" suffix, as
# well as deduplication counters.
max_bytes = 255 - len("(123).download")
root, ext = os.path.splitext(name)
root = root[:max_bytes - len(ext)]
excess = len(os.fsencode(root + ext)) - max_bytes
while excess > 0 and root:
# Max 4 bytes per character is assumed.
# Integer division floors to -∞, not to 0.
root = root[:(-excess // 4)]
excess = len(os.fsencode(root + ext)) - max_bytes
if not root:
# Trimming the root is not enough. We must trim the extension.
# We leave one character in the root, so that the filename
# doesn't start with a dot, which makes the file hidden.
root = name[0]
excess = len(os.fsencode(root + ext)) - max_bytes
while excess > 0 and ext:
ext = ext[:(-excess // 4)]
excess = len(os.fsencode(root + ext)) - max_bytes
assert ext, name
name = root + ext
return name
def set_clipboard(data: str, selection: bool = False) -> None:
"""Set the clipboard to some given data."""
global fake_clipboard
if selection and not supports_selection():
raise SelectionUnsupportedError
if log_clipboard:
what = 'primary selection' if selection else 'clipboard'
log.misc.debug("Setting fake {}: {}".format(what, json.dumps(data)))
fake_clipboard = data
else:
mode = QClipboard.Selection if selection else QClipboard.Clipboard
QApplication.clipboard().setText(data, mode=mode)
def get_clipboard(selection: bool = False, fallback: bool = False) -> str:
"""Get data from the clipboard.
Args:
selection: Use the primary selection.
fallback: Fall back to the clipboard if primary selection is
unavailable.
"""
global fake_clipboard
if fallback and not selection:
raise ValueError("fallback given without selection!")
if selection and not supports_selection():
if fallback:
selection = False
else:
raise SelectionUnsupportedError
if fake_clipboard is not None:
data = fake_clipboard
fake_clipboard = None
else:
mode = QClipboard.Selection if selection else QClipboard.Clipboard
data = QApplication.clipboard().text(mode=mode)
target = "Primary selection" if selection else "Clipboard"
if not data.strip():
raise ClipboardEmptyError("{} is empty.".format(target))
log.misc.debug("{} contained: {!r}".format(target, data))
return data
def supports_selection() -> bool:
"""Check if the OS supports primary selection."""
return QApplication.clipboard().supportsSelection()
def open_file(filename: str, cmdline: str = None) -> None:
"""Open the given file.
If cmdline is not given, downloads.open_dispatcher is used.
If open_dispatcher is unset, the system's default application is used.
Args:
filename: The filename to open.
cmdline: The command to use as string. A `{}` is expanded to the
filename. None means to use the system's default application
or `downloads.open_dispatcher` if set. If no `{}` is found,
the filename is appended to the cmdline.
"""
# Import late to avoid circular imports:
# - usertypes -> utils -> guiprocess -> message -> usertypes
# - usertypes -> utils -> config -> configdata -> configtypes ->
# cmdutils -> command -> message -> usertypes
from qutebrowser.config import config
from qutebrowser.misc import guiprocess
from qutebrowser.utils import version, message
# the default program to open downloads with - will be empty string
# if we want to use the default
override = config.val.downloads.open_dispatcher
if version.is_sandboxed():
if cmdline:
message.error("Cannot spawn download dispatcher from sandbox")
return
if override:
message.warning("Ignoring download dispatcher from config in "
"sandbox environment")
override = None
# precedence order: cmdline > downloads.open_dispatcher > openUrl
if cmdline is None and not override:
log.misc.debug("Opening {} with the system application"
.format(filename))
url = QUrl.fromLocalFile(filename)
QDesktopServices.openUrl(url)
return
if cmdline is None and override:
cmdline = override
assert cmdline is not None
cmd, *args = shlex.split(cmdline)
args = [arg.replace('{}', filename) for arg in args]
if '{}' not in cmdline:
args.append(filename)
log.misc.debug("Opening {} with {}"
.format(filename, [cmd] + args))
proc = guiprocess.GUIProcess(what='open-file')
proc.start_detached(cmd, args)
def unused(_arg: Any) -> None:
"""Function which does nothing to avoid pylint complaining."""
def expand_windows_drive(path: str) -> str:
r"""Expand a drive-path like E: into E:\.
Does nothing for other paths.
Args:
path: The path to expand.
"""
# Usually, "E:" on Windows refers to the current working directory on drive
# E:\. The correct way to specifify drive E: is "E:\", but most users
# probably don't use the "multiple working directories" feature and expect
# "E:" and "E:\" to be equal.
if re.fullmatch(r'[A-Z]:', path, re.IGNORECASE):
return path + "\\"
else:
return path
def yaml_load(f: Union[str, IO[str]]) -> Any:
"""Wrapper over yaml.load using the C loader if possible."""
start = datetime.datetime.now()
# WORKAROUND for https://github.com/yaml/pyyaml/pull/181
with log.py_warning_filter(
category=DeprecationWarning,
message=r"Using or importing the ABCs from 'collections' instead "
r"of from 'collections\.abc' is deprecated.*"):
try:
data = yaml.load(f, Loader=YamlLoader)
except ValueError as e:
if str(e).startswith('could not convert string to float'):
# WORKAROUND for https://github.com/yaml/pyyaml/issues/168
raise yaml.YAMLError(e)
raise # pragma: no cover
end = datetime.datetime.now()
delta = (end - start).total_seconds()
deadline = 10 if 'CI' in os.environ else 2
if delta > deadline: # pragma: no cover
log.misc.warning(
"YAML load took unusually long, please report this at "
"https://github.com/qutebrowser/qutebrowser/issues/2777\n"
"duration: {}s\n"
"PyYAML version: {}\n"
"C extension: {}\n"
"Stack:\n\n"
"{}".format(
delta, yaml.__version__, YAML_C_EXT,
''.join(traceback.format_stack())))
return data
def yaml_dump(data: Any, f: IO[str] = None) -> Optional[str]:
"""Wrapper over yaml.dump using the C dumper if possible.
Also returns a str instead of bytes.
"""
yaml_data = yaml.dump(data, f, Dumper=YamlDumper, default_flow_style=False,
encoding='utf-8', allow_unicode=True)
if yaml_data is None:
return None
else:
return yaml_data.decode('utf-8')
def chunk(elems: Sequence, n: int) -> Iterator[Sequence]:
"""Yield successive n-sized chunks from elems.
If elems % n != 0, the last chunk will be smaller.
"""
if n < 1:
raise ValueError("n needs to be at least 1!")
for i in range(0, len(elems), n):
yield elems[i:i + n]
def guess_mimetype(filename: str, fallback: bool = False) -> str:
"""Guess a mimetype based on a filename.
Args:
filename: The filename to check.
fallback: Fall back to application/octet-stream if unknown.
"""
mimetype, _encoding = mimetypes.guess_type(filename)
if mimetype is None:
if fallback:
return 'application/octet-stream'
else:
raise ValueError("Got None mimetype for {}".format(filename))
return mimetype
def ceil_log(number: int, base: int) -> int:
"""Compute max(1, ceil(log(number, base))).
Use only integer arithmetic in order to avoid numerical error.
"""
if number < 1 or base < 2:
raise ValueError("math domain error")
result = 1
accum = base
while accum < number:
result += 1
accum *= base
return result
def libgl_workaround() -> None:
"""Work around QOpenGLShaderProgram issues, especially for Nvidia.
See https://bugs.launchpad.net/ubuntu/+source/python-qt4/+bug/941826
"""
if os.environ.get('QUTE_SKIP_LIBGL_WORKAROUND'):
return
libgl = ctypes.util.find_library("GL")
if libgl is not None: # pragma: no branch
ctypes.CDLL(libgl, mode=ctypes.RTLD_GLOBAL)
| 1 | 25,445 | ~~Feel free to ignore that one, I'll fix things up when regenerating the `requirements.txt`.~~ As for the one below, this smells like a pylint bug... | qutebrowser-qutebrowser | py |
@@ -279,6 +279,7 @@ func (fa *flowAggregator) InitCollectingProcess() error {
IsEncrypted: false,
}
}
+ cpInput.NumExtraElements = len(antreaSourceStatsElementList) + len(antreaDestinationStatsElementList) + len(antreaLabelsElementList)
var err error
fa.collectingProcess, err = ipfix.NewIPFIXCollectingProcess(cpInput)
return err | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flowaggregator
import (
"bytes"
"encoding/json"
"fmt"
"time"
"github.com/vmware/go-ipfix/pkg/collector"
ipfixentities "github.com/vmware/go-ipfix/pkg/entities"
"github.com/vmware/go-ipfix/pkg/exporter"
ipfixintermediate "github.com/vmware/go-ipfix/pkg/intermediate"
ipfixregistry "github.com/vmware/go-ipfix/pkg/registry"
corev1 "k8s.io/api/core/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"antrea.io/antrea/pkg/ipfix"
)
var (
ianaInfoElementsCommon = []string{
"flowStartSeconds",
"flowEndSeconds",
"flowEndReason",
"sourceTransportPort",
"destinationTransportPort",
"protocolIdentifier",
"packetTotalCount",
"octetTotalCount",
"packetDeltaCount",
"octetDeltaCount",
}
ianaInfoElementsIPv4 = append(ianaInfoElementsCommon, []string{"sourceIPv4Address", "destinationIPv4Address"}...)
ianaInfoElementsIPv6 = append(ianaInfoElementsCommon, []string{"sourceIPv6Address", "destinationIPv6Address"}...)
ianaReverseInfoElements = []string{
"reversePacketTotalCount",
"reverseOctetTotalCount",
"reversePacketDeltaCount",
"reverseOctetDeltaCount",
}
antreaInfoElementsCommon = []string{
"sourcePodName",
"sourcePodNamespace",
"sourceNodeName",
"destinationPodName",
"destinationPodNamespace",
"destinationNodeName",
"destinationServicePort",
"destinationServicePortName",
"ingressNetworkPolicyName",
"ingressNetworkPolicyNamespace",
"ingressNetworkPolicyType",
"ingressNetworkPolicyRuleName",
"ingressNetworkPolicyRuleAction",
"egressNetworkPolicyName",
"egressNetworkPolicyNamespace",
"egressNetworkPolicyType",
"egressNetworkPolicyRuleName",
"egressNetworkPolicyRuleAction",
"tcpState",
"flowType",
}
antreaInfoElementsIPv4 = append(antreaInfoElementsCommon, []string{"destinationClusterIPv4"}...)
antreaInfoElementsIPv6 = append(antreaInfoElementsCommon, []string{"destinationClusterIPv6"}...)
nonStatsElementList = []string{
"flowEndSeconds",
"flowEndReason",
"tcpState",
}
statsElementList = []string{
"octetDeltaCount",
"octetTotalCount",
"packetDeltaCount",
"packetTotalCount",
"reverseOctetDeltaCount",
"reverseOctetTotalCount",
"reversePacketDeltaCount",
"reversePacketTotalCount",
}
antreaSourceStatsElementList = []string{
"octetDeltaCountFromSourceNode",
"octetTotalCountFromSourceNode",
"packetDeltaCountFromSourceNode",
"packetTotalCountFromSourceNode",
"reverseOctetDeltaCountFromSourceNode",
"reverseOctetTotalCountFromSourceNode",
"reversePacketDeltaCountFromSourceNode",
"reversePacketTotalCountFromSourceNode",
}
antreaDestinationStatsElementList = []string{
"octetDeltaCountFromDestinationNode",
"octetTotalCountFromDestinationNode",
"packetDeltaCountFromDestinationNode",
"packetTotalCountFromDestinationNode",
"reverseOctetDeltaCountFromDestinationNode",
"reverseOctetTotalCountFromDestinationNode",
"reversePacketDeltaCountFromDestinationNode",
"reversePacketTotalCountFromDestinationNode",
}
antreaLabelsElementList = []string{
"sourcePodLabels",
"destinationPodLabels",
}
aggregationElements = &ipfixintermediate.AggregationElements{
NonStatsElements: nonStatsElementList,
StatsElements: statsElementList,
AggregatedSourceStatsElements: antreaSourceStatsElementList,
AggregatedDestinationStatsElements: antreaDestinationStatsElementList,
}
correlateFields = []string{
"sourcePodName",
"sourcePodNamespace",
"sourceNodeName",
"destinationPodName",
"destinationPodNamespace",
"destinationNodeName",
"destinationClusterIPv4",
"destinationClusterIPv6",
"destinationServicePort",
"destinationServicePortName",
"ingressNetworkPolicyName",
"ingressNetworkPolicyNamespace",
"ingressNetworkPolicyRuleAction",
"ingressNetworkPolicyType",
"ingressNetworkPolicyRuleName",
"egressNetworkPolicyName",
"egressNetworkPolicyNamespace",
"egressNetworkPolicyRuleAction",
"egressNetworkPolicyType",
"egressNetworkPolicyRuleName",
}
)
const (
aggregationWorkerNum = 2
udpTransport = "udp"
tcpTransport = "tcp"
collectorAddress = "0.0.0.0:4739"
// PodInfo index name for Pod cache.
podInfoIndex = "podInfo"
)
type AggregatorTransportProtocol string
const (
AggregatorTransportProtocolTCP AggregatorTransportProtocol = "TCP"
AggregatorTransportProtocolTLS AggregatorTransportProtocol = "TLS"
AggregatorTransportProtocolUDP AggregatorTransportProtocol = "UDP"
)
type flowAggregator struct {
externalFlowCollectorAddr string
externalFlowCollectorProto string
aggregatorTransportProtocol AggregatorTransportProtocol
collectingProcess ipfix.IPFIXCollectingProcess
aggregationProcess ipfix.IPFIXAggregationProcess
activeFlowRecordTimeout time.Duration
inactiveFlowRecordTimeout time.Duration
exportingProcess ipfix.IPFIXExportingProcess
templateIDv4 uint16
templateIDv6 uint16
registry ipfix.IPFIXRegistry
set ipfixentities.Set
flowAggregatorAddress string
k8sClient kubernetes.Interface
observationDomainID uint32
podInformer coreinformers.PodInformer
}
func NewFlowAggregator(
externalFlowCollectorAddr string,
externalFlowCollectorProto string,
activeFlowRecTimeout time.Duration,
inactiveFlowRecTimeout time.Duration,
aggregatorTransportProtocol AggregatorTransportProtocol,
flowAggregatorAddress string,
k8sClient kubernetes.Interface,
observationDomainID uint32,
podInformer coreinformers.PodInformer,
) *flowAggregator {
registry := ipfix.NewIPFIXRegistry()
registry.LoadRegistry()
fa := &flowAggregator{
externalFlowCollectorAddr: externalFlowCollectorAddr,
externalFlowCollectorProto: externalFlowCollectorProto,
aggregatorTransportProtocol: aggregatorTransportProtocol,
activeFlowRecordTimeout: activeFlowRecTimeout,
inactiveFlowRecordTimeout: inactiveFlowRecTimeout,
registry: registry,
set: ipfixentities.NewSet(false),
flowAggregatorAddress: flowAggregatorAddress,
k8sClient: k8sClient,
observationDomainID: observationDomainID,
podInformer: podInformer,
}
podInformer.Informer().AddIndexers(cache.Indexers{podInfoIndex: podInfoIndexFunc})
return fa
}
func podInfoIndexFunc(obj interface{}) ([]string, error) {
pod, ok := obj.(*corev1.Pod)
if !ok {
return nil, fmt.Errorf("obj is not pod: %+v", obj)
}
if len(pod.Status.PodIPs) > 0 && pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed {
indexes := make([]string, len(pod.Status.PodIPs))
for i := range pod.Status.PodIPs {
indexes[i] = pod.Status.PodIPs[i].IP
}
return indexes, nil
}
return nil, nil
}
func (fa *flowAggregator) InitCollectingProcess() error {
var cpInput collector.CollectorInput
if fa.aggregatorTransportProtocol == AggregatorTransportProtocolTLS {
parentCert, privateKey, caCert, err := generateCACertKey()
if err != nil {
return fmt.Errorf("error when generating CA certificate: %v", err)
}
serverCert, serverKey, err := generateCertKey(parentCert, privateKey, true, fa.flowAggregatorAddress)
if err != nil {
return fmt.Errorf("error when creating server certificate: %v", err)
}
clientCert, clientKey, err := generateCertKey(parentCert, privateKey, false, "")
if err != nil {
return fmt.Errorf("error when creating client certificate: %v", err)
}
err = syncCAAndClientCert(caCert, clientCert, clientKey, fa.k8sClient)
if err != nil {
return fmt.Errorf("error when synchronizing client certificate: %v", err)
}
cpInput = collector.CollectorInput{
Address: collectorAddress,
Protocol: tcpTransport,
MaxBufferSize: 65535,
TemplateTTL: 0,
IsEncrypted: true,
CACert: caCert,
ServerKey: serverKey,
ServerCert: serverCert,
}
} else if fa.aggregatorTransportProtocol == AggregatorTransportProtocolTCP {
cpInput = collector.CollectorInput{
Address: collectorAddress,
Protocol: tcpTransport,
MaxBufferSize: 65535,
TemplateTTL: 0,
IsEncrypted: false,
}
} else {
cpInput = collector.CollectorInput{
Address: collectorAddress,
Protocol: udpTransport,
MaxBufferSize: 1024,
TemplateTTL: 0,
IsEncrypted: false,
}
}
var err error
fa.collectingProcess, err = ipfix.NewIPFIXCollectingProcess(cpInput)
return err
}
func (fa *flowAggregator) InitAggregationProcess() error {
var err error
apInput := ipfixintermediate.AggregationInput{
MessageChan: fa.collectingProcess.GetMsgChan(),
WorkerNum: aggregationWorkerNum,
CorrelateFields: correlateFields,
ActiveExpiryTimeout: fa.activeFlowRecordTimeout,
InactiveExpiryTimeout: fa.inactiveFlowRecordTimeout,
AggregateElements: aggregationElements,
}
fa.aggregationProcess, err = ipfix.NewIPFIXAggregationProcess(apInput)
return err
}
func (fa *flowAggregator) createAndSendTemplate(isRecordIPv6 bool) error {
templateID := fa.exportingProcess.NewTemplateID()
recordIPFamily := "IPv4"
if isRecordIPv6 {
recordIPFamily = "IPv6"
}
if isRecordIPv6 {
fa.templateIDv6 = templateID
} else {
fa.templateIDv4 = templateID
}
bytesSent, err := fa.sendTemplateSet(isRecordIPv6)
if err != nil {
fa.exportingProcess.CloseConnToCollector()
fa.exportingProcess = nil
fa.set.ResetSet()
return fmt.Errorf("sending %s template set failed, err: %v", recordIPFamily, err)
}
klog.V(2).InfoS("Exporting process initialized", "bytesSent", bytesSent, "templateSetIPFamily", recordIPFamily)
return nil
}
func (fa *flowAggregator) initExportingProcess() error {
// TODO: This code can be further simplified by changing the go-ipfix API to accept
// externalFlowCollectorAddr and externalFlowCollectorProto instead of net.Addr input.
var expInput exporter.ExporterInput
if fa.externalFlowCollectorProto == "tcp" {
// TCP transport does not need any tempRefTimeout, so sending 0.
expInput = exporter.ExporterInput{
CollectorAddress: fa.externalFlowCollectorAddr,
CollectorProtocol: fa.externalFlowCollectorProto,
ObservationDomainID: fa.observationDomainID,
TempRefTimeout: 0,
PathMTU: 0,
IsEncrypted: false,
}
} else {
// For UDP transport, hardcoding tempRefTimeout value as 1800s. So we will send out template every 30 minutes.
expInput = exporter.ExporterInput{
CollectorAddress: fa.externalFlowCollectorAddr,
CollectorProtocol: fa.externalFlowCollectorProto,
ObservationDomainID: fa.observationDomainID,
TempRefTimeout: 1800,
PathMTU: 0,
IsEncrypted: false,
}
}
ep, err := ipfix.NewIPFIXExportingProcess(expInput)
if err != nil {
return fmt.Errorf("got error when initializing IPFIX exporting process: %v", err)
}
fa.exportingProcess = ep
// Currently, we send two templates for IPv4 and IPv6 regardless of the IP families supported by cluster
if err = fa.createAndSendTemplate(false); err != nil {
return err
}
if err = fa.createAndSendTemplate(true); err != nil {
return err
}
return nil
}
func (fa *flowAggregator) Run(stopCh <-chan struct{}) {
go fa.collectingProcess.Start()
defer fa.collectingProcess.Stop()
go fa.aggregationProcess.Start()
defer fa.aggregationProcess.Stop()
go fa.flowRecordExpiryCheck(stopCh)
<-stopCh
}
func (fa *flowAggregator) flowRecordExpiryCheck(stopCh <-chan struct{}) {
expireTimer := time.NewTimer(fa.activeFlowRecordTimeout)
for {
select {
case <-stopCh:
if fa.exportingProcess != nil {
fa.exportingProcess.CloseConnToCollector()
}
expireTimer.Stop()
return
case <-expireTimer.C:
if fa.exportingProcess == nil {
err := fa.initExportingProcess()
if err != nil {
klog.Errorf("Error when initializing exporting process: %v, will retry in %s", err, fa.activeFlowRecordTimeout)
// Initializing exporting process fails, will retry in next cycle.
expireTimer.Reset(fa.activeFlowRecordTimeout)
continue
}
}
// Pop the flow record item from expire priority queue in the Aggregation
// Process and send the flow records.
if err := fa.aggregationProcess.ForAllExpiredFlowRecordsDo(fa.sendFlowKeyRecord); err != nil {
klog.Errorf("Error when sending expired flow records: %v", err)
// If there is an error when sending flow records because of intermittent connectivity, we reset the connection
// to IPFIX collector and retry in the next export cycle to reinitialize the connection and send flow records.
fa.exportingProcess.CloseConnToCollector()
fa.exportingProcess = nil
expireTimer.Reset(fa.activeFlowRecordTimeout)
continue
}
// Get the new expiry and reset the timer.
expireTimer.Reset(fa.aggregationProcess.GetExpiryFromExpirePriorityQueue())
}
}
}
func (fa *flowAggregator) sendFlowKeyRecord(key ipfixintermediate.FlowKey, record *ipfixintermediate.AggregationFlowRecord) error {
isRecordIPv4 := fa.aggregationProcess.IsAggregatedRecordIPv4(*record)
templateID := fa.templateIDv4
if !isRecordIPv4 {
templateID = fa.templateIDv6
}
// TODO: more records per data set will be supported when go-ipfix supports size check when adding records
fa.set.ResetSet()
if err := fa.set.PrepareSet(ipfixentities.Data, templateID); err != nil {
return err
}
if !fa.aggregationProcess.AreCorrelatedFieldsFilled(*record) {
fa.fillK8sMetadata(key, record.Record)
fa.aggregationProcess.SetCorrelatedFieldsFilled(record)
}
if !fa.aggregationProcess.AreExternalFieldsFilled(*record) {
fa.fillPodLabels(key, record.Record)
fa.aggregationProcess.SetExternalFieldsFilled(record)
}
err := fa.set.AddRecord(record.Record.GetOrderedElementList(), templateID)
if err != nil {
return err
}
sentBytes, err := fa.exportingProcess.SendSet(fa.set)
if err != nil {
return err
}
if err = fa.aggregationProcess.ResetStatElementsInRecord(record.Record); err != nil {
return err
}
klog.V(4).Infof("Data set sent successfully: %d Bytes sent", sentBytes)
return nil
}
func (fa *flowAggregator) sendTemplateSet(isIPv6 bool) (int, error) {
elements := make([]*ipfixentities.InfoElementWithValue, 0)
ianaInfoElements := ianaInfoElementsIPv4
antreaInfoElements := antreaInfoElementsIPv4
templateID := fa.templateIDv4
if isIPv6 {
ianaInfoElements = ianaInfoElementsIPv6
antreaInfoElements = antreaInfoElementsIPv6
templateID = fa.templateIDv6
}
for _, ie := range ianaInfoElements {
element, err := fa.registry.GetInfoElement(ie, ipfixregistry.IANAEnterpriseID)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
ie := ipfixentities.NewInfoElementWithValue(element, nil)
elements = append(elements, ie)
}
for _, ie := range ianaReverseInfoElements {
element, err := fa.registry.GetInfoElement(ie, ipfixregistry.IANAReversedEnterpriseID)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
ie := ipfixentities.NewInfoElementWithValue(element, nil)
elements = append(elements, ie)
}
for _, ie := range antreaInfoElements {
element, err := fa.registry.GetInfoElement(ie, ipfixregistry.AntreaEnterpriseID)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
ie := ipfixentities.NewInfoElementWithValue(element, nil)
elements = append(elements, ie)
}
for _, ie := range antreaSourceStatsElementList {
element, err := fa.registry.GetInfoElement(ie, ipfixregistry.AntreaEnterpriseID)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
ie := ipfixentities.NewInfoElementWithValue(element, nil)
elements = append(elements, ie)
}
for _, ie := range antreaDestinationStatsElementList {
element, err := fa.registry.GetInfoElement(ie, ipfixregistry.AntreaEnterpriseID)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
ie := ipfixentities.NewInfoElementWithValue(element, nil)
elements = append(elements, ie)
}
for _, ie := range antreaLabelsElementList {
element, err := fa.registry.GetInfoElement(ie, ipfixregistry.AntreaEnterpriseID)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
ie := ipfixentities.NewInfoElementWithValue(element, nil)
elements = append(elements, ie)
}
fa.set.ResetSet()
if err := fa.set.PrepareSet(ipfixentities.Template, templateID); err != nil {
return 0, err
}
err := fa.set.AddRecord(elements, templateID)
if err != nil {
return 0, fmt.Errorf("error when adding record to set, error: %v", err)
}
bytesSent, err := fa.exportingProcess.SendSet(fa.set)
return bytesSent, err
}
// fillK8sMetadata fills Pod name, Pod namespace and Node name for inter-Node flows
// that have incomplete info due to deny network policy.
func (fa *flowAggregator) fillK8sMetadata(key ipfixintermediate.FlowKey, record ipfixentities.Record) {
// fill source Pod info when sourcePodName is empty
if sourcePodName, exist := record.GetInfoElementWithValue("sourcePodName"); exist {
if sourcePodName.Value == "" {
pods, err := fa.podInformer.Informer().GetIndexer().ByIndex(podInfoIndex, key.SourceAddress)
if err == nil && len(pods) > 0 {
pod, ok := pods[0].(*corev1.Pod)
if !ok {
klog.Warningf("Invalid Pod obj in cache")
}
sourcePodName.Value = pod.Name
if sourcePodNamespace, exist := record.GetInfoElementWithValue("sourcePodNamespace"); exist {
sourcePodNamespace.Value = pod.Namespace
}
if sourceNodeName, exist := record.GetInfoElementWithValue("sourceNodeName"); exist {
sourceNodeName.Value = pod.Spec.NodeName
}
} else {
klog.Warning(err)
}
}
}
// fill destination Pod info when destinationPodName is empty
if destinationPodName, exist := record.GetInfoElementWithValue("destinationPodName"); exist {
if destinationPodName.Value == "" {
pods, err := fa.podInformer.Informer().GetIndexer().ByIndex(podInfoIndex, key.DestinationAddress)
if len(pods) > 0 && err == nil {
pod, ok := pods[0].(*corev1.Pod)
if !ok {
klog.Warningf("Invalid Pod obj in cache")
}
destinationPodName.Value = pod.Name
if destinationPodNamespace, exist := record.GetInfoElementWithValue("destinationPodNamespace"); exist {
destinationPodNamespace.Value = pod.Namespace
}
if destinationNodeName, exist := record.GetInfoElementWithValue("destinationNodeName"); exist {
destinationNodeName.Value = pod.Spec.NodeName
}
} else {
klog.Warning(err)
}
}
}
}
func (fa *flowAggregator) fetchPodLabels(podAddress string) string {
pods, err := fa.podInformer.Informer().GetIndexer().ByIndex(podInfoIndex, podAddress)
if err != nil {
klog.Warning(err)
return ""
} else if len(pods) == 0 {
return ""
}
pod, ok := pods[0].(*corev1.Pod)
if !ok {
klog.Warningf("Invalid Pod obj in cache")
}
labelsJSON, err := json.Marshal(pod.GetLabels())
if err != nil {
klog.Warningf("JSON encoding of Pod labels failed: %v", err)
return ""
}
return string(labelsJSON)
}
func (fa *flowAggregator) fillPodLabels(key ipfixintermediate.FlowKey, record ipfixentities.Record) {
podLabelString := fa.fetchPodLabels(key.SourceAddress)
sourcePodLabelsElement, err := fa.registry.GetInfoElement("sourcePodLabels", ipfixregistry.AntreaEnterpriseID)
if err == nil {
sourcePodLabelsIE := ipfixentities.NewInfoElementWithValue(sourcePodLabelsElement, bytes.NewBufferString(podLabelString).Bytes())
err = record.AddInfoElement(sourcePodLabelsIE)
if err != nil {
klog.Warningf("Add sourcePodLabels InfoElementWithValue failed: %v", err)
}
} else {
klog.Warningf("Get sourcePodLabels InfoElement failed: %v", err)
}
podLabelString = fa.fetchPodLabels(key.DestinationAddress)
destinationPodLabelsElement, err := fa.registry.GetInfoElement("destinationPodLabels", ipfixregistry.AntreaEnterpriseID)
if err == nil {
destinationPodLabelsIE := ipfixentities.NewInfoElementWithValue(destinationPodLabelsElement, bytes.NewBufferString(podLabelString).Bytes())
err = record.AddInfoElement(destinationPodLabelsIE)
if err != nil {
klog.Warningf("Add destinationPodLabels InfoElementWithValue failed: %v", err)
}
} else {
klog.Warningf("Get destinationPodLabels InfoElement failed: %v", err)
}
}
| 1 | 42,528 | it's a bit strange that this doesn't match `aggregationElements` which is what I would expect. I guess I am not familiar enough with go-ipfix. | antrea-io-antrea | go |
@@ -35,11 +35,13 @@
#include "instr.h"
static int num_simd_saved;
+static int num_simd_saved_abs;
void
proc_init_arch(void)
{
num_simd_saved = MCXT_NUM_SIMD_SLOTS;
+ num_simd_saved_abs = MCXT_NUM_SIMD_SLOTS;
/* FIXME i#1569: NYI */
} | 1 | /* **********************************************************
* Copyright (c) 2016 ARM Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include "../globals.h"
#include "proc.h"
#include "instr.h"
static int num_simd_saved;
void
proc_init_arch(void)
{
num_simd_saved = MCXT_NUM_SIMD_SLOTS;
/* FIXME i#1569: NYI */
}
bool
proc_has_feature(feature_bit_t f)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return false;
}
void
machine_cache_sync(void *pc_start, void *pc_end, bool flush_icache)
{
clear_icache(pc_start, pc_end);
}
DR_API
size_t
proc_fpstate_save_size(void)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return 0;
}
DR_API
int
proc_num_simd_saved(void)
{
return num_simd_saved;
}
DR_API
size_t
proc_save_fpstate(byte *buf)
{
/* All registers are saved by insert_push_all_registers so nothing extra
* needs to be saved here.
*/
return DR_FPSTATE_BUF_SIZE;
}
DR_API
void
proc_restore_fpstate(byte *buf)
{
/* Nothing to restore. */
}
void
dr_insert_save_fpstate(void *drcontext, instrlist_t *ilist, instr_t *where, opnd_t buf)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
}
void
dr_insert_restore_fpstate(void *drcontext, instrlist_t *ilist, instr_t *where, opnd_t buf)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
}
uint64
proc_get_timestamp(void)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return 0;
}
| 1 | 15,716 | Same thing: `num_simd_registers`. Ditto below. | DynamoRIO-dynamorio | c |
@@ -47,7 +47,7 @@ DEPENDENCY_LINKS = [
]
setup(name='kinto',
- version='2.2.0.dev0',
+ version='2.2.1',
description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
long_description=README + "\n\n" + CHANGELOG + "\n\n" + CONTRIBUTORS,
license='Apache License (2.0)', | 1 | import codecs
import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
def read_file(filename):
"""Open a related file and return its content."""
with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
content = f.read()
return content
README = read_file('README.rst')
CHANGELOG = read_file('CHANGELOG.rst')
CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
REQUIREMENTS = [
'waitress',
'cliquet>=3.1,<4',
'jsonschema',
]
POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [
'cliquet[postgresql]>=3.1,<4'
]
MONITORING_REQUIREMENTS = REQUIREMENTS + [
'cliquet[monitoring]>=3.1,<4'
]
FXA_REQUIREMENTS = REQUIREMENTS + [
'cliquet-fxa<2'
]
ENTRY_POINTS = {
'paste.app_factory': [
'main = kinto:main',
],
'console_scripts': [
'kinto = kinto.__main__:main'
],
}
DEPENDENCY_LINKS = [
]
setup(name='kinto',
version='2.2.0.dev0',
description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
long_description=README + "\n\n" + CHANGELOG + "\n\n" + CONTRIBUTORS,
license='Apache License (2.0)',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"License :: OSI Approved :: Apache Software License"
],
keywords="web sync json storage",
author='Mozilla Services',
author_email='[email protected]',
url='https://github.com/Kinto/kinto',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
extras_require={
'postgresql': POSTGRESQL_REQUIREMENTS,
'monitoring': MONITORING_REQUIREMENTS,
'fxa': FXA_REQUIREMENTS,
":python_version=='2.7'": ["functools32"],
},
test_suite="kinto.tests",
entry_points=ENTRY_POINTS,
dependency_links=DEPENDENCY_LINKS)
| 1 | 9,078 | Should be 2.1.1 | Kinto-kinto | py |
@@ -7,14 +7,16 @@ from localstack.utils.common import to_str, short_uid
TEST_BUCKET_NAME_WITH_NOTIFICATIONS = 'test_bucket_notif_1'
TEST_QUEUE_NAME_FOR_S3 = 'test_queue'
TEST_TOPIC_NAME = 'test_topic_name_for_sqs'
+TEST_S3_TOPIC_NAME = 'test_topic_name_for_s3_to_sns_to_sqs'
TEST_QUEUE_NAME_FOR_SNS = 'test_queue_for_sns'
-def receive_assert_delete(queue_url, assertions, sqs_client=None):
+def receive_assert_delete(queue_url, assertions, sqs_client=None, required_subject=None):
if not sqs_client:
sqs_client = aws_stack.connect_to_service('sqs')
response = sqs_client.receive_message(QueueUrl=queue_url)
+
messages = [json.loads(to_str(m['Body'])) for m in response['Messages']]
testutil.assert_objects(assertions, messages)
for message in response['Messages']: | 1 | import json
from io import BytesIO
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.utils.common import to_str, short_uid
TEST_BUCKET_NAME_WITH_NOTIFICATIONS = 'test_bucket_notif_1'
TEST_QUEUE_NAME_FOR_S3 = 'test_queue'
TEST_TOPIC_NAME = 'test_topic_name_for_sqs'
TEST_QUEUE_NAME_FOR_SNS = 'test_queue_for_sns'
def receive_assert_delete(queue_url, assertions, sqs_client=None):
if not sqs_client:
sqs_client = aws_stack.connect_to_service('sqs')
response = sqs_client.receive_message(QueueUrl=queue_url)
messages = [json.loads(to_str(m['Body'])) for m in response['Messages']]
testutil.assert_objects(assertions, messages)
for message in response['Messages']:
sqs_client.delete_message(QueueUrl=queue_url, ReceiptHandle=message['ReceiptHandle'])
def test_sqs_queue_names():
sqs_client = aws_stack.connect_to_service('sqs')
queue_name = '%s.fifo' % short_uid()
# make sure we can create *.fifo queues
queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
sqs_client.delete_queue(QueueUrl=queue_url)
def test_sns_to_sqs():
sqs_client = aws_stack.connect_to_service('sqs')
sns_client = aws_stack.connect_to_service('sns')
# create topic and queue
queue_info = sqs_client.create_queue(QueueName=TEST_QUEUE_NAME_FOR_SNS)
topic_info = sns_client.create_topic(Name=TEST_TOPIC_NAME)
# subscribe SQS to SNS, publish message
sns_client.subscribe(TopicArn=topic_info['TopicArn'], Protocol='sqs',
Endpoint=aws_stack.sqs_queue_arn(TEST_QUEUE_NAME_FOR_SNS))
test_value = short_uid()
sns_client.publish(TopicArn=topic_info['TopicArn'], Message='test message for SQS',
MessageAttributes={'attr1': {'DataType': 'String', 'StringValue': test_value}})
# receive, assert, and delete message from SQS
queue_url = queue_info['QueueUrl']
assertions = []
# make sure we receive the correct topic ARN in notifications
assertions.append({'TopicArn': topic_info['TopicArn']})
# make sure the notification contains message attributes
assertions.append({'Value': test_value})
receive_assert_delete(queue_url, assertions, sqs_client)
def _delete_notification_config():
s3_client = aws_stack.connect_to_service('s3')
s3_client.put_bucket_notification_configuration(
Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS, NotificationConfiguration={})
config = s3_client.get_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
assert not config.get('QueueConfigurations')
def test_bucket_notifications():
s3_resource = aws_stack.connect_to_resource('s3')
s3_client = aws_stack.connect_to_service('s3')
sqs_client = aws_stack.connect_to_service('sqs')
# create test bucket and queue
s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
queue_info = sqs_client.create_queue(QueueName=TEST_QUEUE_NAME_FOR_S3)
# create notification on bucket
queue_url = queue_info['QueueUrl']
queue_arn = aws_stack.sqs_queue_arn(TEST_QUEUE_NAME_FOR_S3)
events = ['s3:ObjectCreated:*', 's3:ObjectRemoved:Delete']
filter_rules = {
'FilterRules': [{
'Name': 'prefix',
'Value': 'testupload/'
}, {
'Name': 'suffix',
'Value': 'testfile.txt'
}]
}
s3_client.put_bucket_notification_configuration(
Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
NotificationConfiguration={
'QueueConfigurations': [{
'Id': 'id123456',
'QueueArn': queue_arn,
'Events': events,
'Filter': {
'Key': filter_rules
}
}]
}
)
# retrieve and check notification config
config = s3_client.get_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
config = config['QueueConfigurations'][0]
assert events == config['Events']
assert filter_rules == config['Filter']['Key']
# upload file to S3 (this should NOT trigger a notification)
test_key1 = '/testdata'
test_data1 = b'{"test": "bucket_notification1"}'
s3_client.upload_fileobj(BytesIO(test_data1), TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key1)
# upload file to S3 (this should trigger a notification)
test_key2 = 'testupload/dir1/testfile.txt'
test_data2 = b'{"test": "bucket_notification2"}'
s3_client.upload_fileobj(BytesIO(test_data2), TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key2)
# receive, assert, and delete message from SQS
receive_assert_delete(queue_url, [{'key': test_key2}, {'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS}], sqs_client)
# delete notification config
_delete_notification_config()
# put notification config with single event type
event = 's3:ObjectCreated:*'
s3_client.put_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
NotificationConfiguration={
'QueueConfigurations': [{
'Id': 'id123456',
'QueueArn': queue_arn,
'Events': [event]
}]
}
)
config = s3_client.get_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
config = config['QueueConfigurations'][0]
assert config['Events'] == [event]
# put notification config with single event type
event = 's3:ObjectCreated:*'
filter_rules = {
'FilterRules': [{
'Name': 'prefix',
'Value': 'testupload/'
}]
}
s3_client.put_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
NotificationConfiguration={
'QueueConfigurations': [{
'Id': 'id123456',
'QueueArn': queue_arn,
'Events': [event],
'Filter': {
'Key': filter_rules
}
}]
}
)
config = s3_client.get_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
config = config['QueueConfigurations'][0]
assert config['Events'] == [event]
assert filter_rules == config['Filter']['Key']
# upload file to S3 (this should trigger a notification)
test_key2 = 'testupload/dir1/testfile.txt'
test_data2 = b'{"test": "bucket_notification2"}'
s3_client.upload_fileobj(BytesIO(test_data2), TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key2)
# receive, assert, and delete message from SQS
receive_assert_delete(queue_url, [{'key': test_key2}, {'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS}], sqs_client)
# delete notification config
_delete_notification_config()
| 1 | 9,264 | nitpick: `required_subject` doesn't seem to be used here | localstack-localstack | py |
@@ -19,6 +19,7 @@ package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
+
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
import java.net.URI;
public class GridData implements DataFetcher {
private final Distributor distributor;
private final URI publicUri;
public GridData(Distributor distributor, URI publicUri) {
this.distributor = Require.nonNull("Distributor", distributor);
this.publicUri = Require.nonNull("Grid's public URI", publicUri);
}
@Override
public Object get(DataFetchingEnvironment environment) {
return new Grid(distributor, publicUri);
}
}
| 1 | 17,791 | We can revert this to reduce the diff of the PR. | SeleniumHQ-selenium | rb |
@@ -871,6 +871,14 @@ class UIA(Window):
states.add(controlTypes.STATE_CHECKED)
return states
+ def _get_presentationType(self):
+ presentationType=super(UIA,self).presentationType
+ # UIA NVDAObjects can only be considered content if UI Automation considers them both a control and content.
+ if presentationType==self.presType_content and not (self.UIAElement.cachedIsContentElement and self.UIAElement.cachedIsControlElement):
+ presentationType=self.presType_layout
+ return presentationType
+
+
def correctAPIForRelation(self, obj, relation=None):
if obj and self.windowHandle != obj.windowHandle and not obj.UIAElement.cachedNativeWindowHandle:
# The target element is not the root element for the window, so don't change API class; i.e. always use UIA. | 1 | #NVDAObjects/UIA/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2009-2016 NV Access Limited, Joseph Lee, Mohammad Suliman
from ctypes import byref
from ctypes.wintypes import POINT, RECT
from comtypes import COMError
from comtypes.automation import VARIANT
import weakref
import sys
import numbers
import languageHandler
import UIAHandler
import globalVars
import eventHandler
import controlTypes
import config
import speech
import api
import textInfos
from logHandler import log
from UIAUtils import *
from NVDAObjects.window import Window
from NVDAObjects import NVDAObjectTextInfo, InvalidNVDAObject
from NVDAObjects.behaviors import ProgressBar, EditableTextWithoutAutoSelectDetection, Dialog, Notification
import braille
class UIATextInfo(textInfos.TextInfo):
#: The UI Automation text units (in order of resolution) that should be used when fetching formatting.
UIAFormatUnits=[
UIAHandler.TextUnit_Format,
UIAHandler.TextUnit_Word,
UIAHandler.TextUnit_Character
] if UIAHandler.isUIAAvailable else []
def find(self,text,caseSensitive=False,reverse=False):
tempRange=self._rangeObj.clone()
documentRange=self.obj.UIATextPattern.documentRange
if reverse:
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_Start,documentRange,UIAHandler.TextPatternRangeEndpoint_Start)
else:
if tempRange.move(UIAHandler.TextUnit_Character,1)==0:
return False
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,documentRange,UIAHandler.TextPatternRangeEndpoint_End)
try:
r=tempRange.findText(text,reverse,not caseSensitive)
except COMError:
r=None
if r:
r.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,r,UIAHandler.TextPatternRangeEndpoint_Start)
self._rangeObj=r
return True
return False
def _getFormatFieldAtRange(self,range,formatConfig,ignoreMixedValues=False):
"""
Fetches formatting for the given UI Automation Text range.
@ param range: the text range whos formatting should be fetched.
@type range: L{UIAutomation.IUIAutomationTextRange}
@param formatConfig: the types of formatting requested.
@ type formatConfig: a dictionary of NVDA document formatting configuration keys with values set to true for those types that should be fetched.
@param ignoreMixedValues: If True, formatting that is mixed according to UI Automation will not be included. If False, L{UIAUtils.MixedAttributeError} will be raised if UI Automation gives back a mixed attribute value signifying that the caller may want to try again with a smaller range.
@type: bool
@return: The formatting for the given text range.
@rtype: L{textInfos.FormatField}
"""
formatField=textInfos.FormatField()
if formatConfig["reportFontName"]:
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_FontNameAttributeId,ignoreMixedValues=ignoreMixedValues)
if not UIAHandler.handler.clientObject.checkNotSupported(val):
formatField["font-name"]=val
if formatConfig["reportFontSize"]:
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_FontSizeAttributeId,ignoreMixedValues=ignoreMixedValues)
if isinstance(val,numbers.Number):
formatField['font-size']="%g pt"%float(val)
if formatConfig["reportFontAttributes"]:
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_FontWeightAttributeId,ignoreMixedValues=ignoreMixedValues)
if isinstance(val,int):
formatField['bold']=(val>=700)
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_IsItalicAttributeId,ignoreMixedValues=ignoreMixedValues)
if not UIAHandler.handler.clientObject.checkNotSupported(val):
formatField['italic']=val
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_UnderlineStyleAttributeId,ignoreMixedValues=ignoreMixedValues)
if not UIAHandler.handler.clientObject.checkNotSupported(val):
formatField['underline']=bool(val)
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_StrikethroughStyleAttributeId,ignoreMixedValues=ignoreMixedValues)
if not UIAHandler.handler.clientObject.checkNotSupported(val):
formatField['strikethrough']=bool(val)
textPosition=None
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_IsSuperscriptAttributeId,ignoreMixedValues=ignoreMixedValues)
if not UIAHandler.handler.clientObject.checkNotSupported(val) and val:
textPosition='super'
else:
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_IsSubscriptAttributeId,ignoreMixedValues=ignoreMixedValues)
if not UIAHandler.handler.clientObject.checkNotSupported(val) and val:
textPosition="sub"
else:
textPosition="baseline"
if textPosition:
formatField['text-position']=textPosition
if formatConfig["reportAlignment"]:
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_HorizontalTextAlignmentAttributeId,ignoreMixedValues=ignoreMixedValues)
if val==UIAHandler.HorizontalTextAlignment_Left:
val="left"
elif val==UIAHandler.HorizontalTextAlignment_Centered:
val="center"
elif val==UIAHandler.HorizontalTextAlignment_Right:
val="right"
elif val==UIAHandler.HorizontalTextAlignment_Justified:
val="justify"
else:
val=None
if val:
formatField['text-align']=val
if formatConfig["reportColor"]:
import colors
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_BackgroundColorAttributeId,ignoreMixedValues=ignoreMixedValues)
if isinstance(val,int):
formatField['background-color']=colors.RGB.fromCOLORREF(val)
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_ForegroundColorAttributeId,ignoreMixedValues=ignoreMixedValues)
if isinstance(val,int):
formatField['color']=colors.RGB.fromCOLORREF(val)
if formatConfig['reportLinks']:
val=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_LinkAttributeId,ignoreMixedValues=ignoreMixedValues)
if not UIAHandler.handler.clientObject.checkNotSupported(val):
if val:
formatField['link']
if formatConfig["reportHeadings"]:
styleIDValue=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_StyleIdAttributeId,ignoreMixedValues=ignoreMixedValues)
if UIAHandler.StyleId_Heading1<=styleIDValue<=UIAHandler.StyleId_Heading9:
formatField["heading-level"]=(styleIDValue-UIAHandler.StyleId_Heading1)+1
if formatConfig["reportSpellingErrors"]:
annotationTypes=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_AnnotationTypesAttributeId,ignoreMixedValues=ignoreMixedValues)
if annotationTypes==UIAHandler.AnnotationType_SpellingError:
formatField["invalid-spelling"]=True
cultureVal=getUIATextAttributeValueFromRange(range,UIAHandler.UIA_CultureAttributeId,ignoreMixedValues=ignoreMixedValues)
if cultureVal and isinstance(cultureVal,int):
try:
formatField['language']=languageHandler.windowsLCIDToLocaleName(cultureVal)
except:
log.debugWarning("language error",exc_info=True)
pass
return textInfos.FieldCommand("formatChange",formatField)
def __init__(self,obj,position,_rangeObj=None):
super(UIATextInfo,self).__init__(obj,position)
if _rangeObj:
self._rangeObj=_rangeObj.clone()
elif position in (textInfos.POSITION_CARET,textInfos.POSITION_SELECTION):
try:
sel=self.obj.UIATextPattern.GetSelection()
except COMError:
raise RuntimeError("No selection available")
if sel.length>0:
self._rangeObj=sel.getElement(0).clone()
else:
raise NotImplementedError("UIAutomationTextRangeArray is empty")
if position==textInfos.POSITION_CARET:
self.collapse()
elif isinstance(position,UIATextInfo): #bookmark
self._rangeObj=position._rangeObj
elif position==textInfos.POSITION_FIRST:
self._rangeObj=self.obj.UIATextPattern.documentRange
self.collapse()
elif position==textInfos.POSITION_LAST:
self._rangeObj=self.obj.UIATextPattern.documentRange
self.collapse(True)
elif position==textInfos.POSITION_ALL:
self._rangeObj=self.obj.UIATextPattern.documentRange
elif isinstance(position,UIA):
try:
self._rangeObj=self.obj.UIATextPattern.rangeFromChild(position.UIAElement)
except COMError:
raise LookupError
# sometimes rangeFromChild can return a NULL range
if not self._rangeObj: raise LookupError
elif isinstance(position,textInfos.Point):
#rangeFromPoint causes a freeze in UIA client library!
#p=POINT(position.x,position.y)
#self._rangeObj=self.obj.UIATextPattern.RangeFromPoint(p)
raise NotImplementedError
elif isinstance(position,UIAHandler.IUIAutomationTextRange):
self._rangeObj=position.clone()
else:
raise ValueError("Unknown position %s"%position)
def __eq__(self,other):
if self is other: return True
if self.__class__ is not other.__class__: return False
return bool(self._rangeObj.compare(other._rangeObj))
def _get_NVDAObjectAtStart(self):
e=self.UIAElementAtStart
if e:
return UIA(UIAElement=e) or self.obj
return self.obj
def _get_UIAElementAtStart(self):
"""
Fetches the deepest UIA element at the start of the text range.
This may be via UIA's getChildren (in the case of embedded controls), or GetEnClosingElement.
"""
tempInfo=self.copy()
tempInfo.collapse()
# some implementations (Edge, Word) do not correctly class embedded objects (graphics, checkboxes) as being the enclosing element, even when the range is completely within them. Rather, they still list the object in getChildren.
# Thus we must check getChildren before getEnclosingElement.
tempInfo.expand(textInfos.UNIT_CHARACTER)
tempRange=tempInfo._rangeObj
children=tempRange.getChildren()
if children.length==1:
child=children.getElement(0)
else:
child=tempRange.getEnclosingElement()
return child.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
def _get_bookmark(self):
return self.copy()
UIAControlTypesWhereNameIsContent={
UIAHandler.UIA_ButtonControlTypeId,
UIAHandler.UIA_HyperlinkControlTypeId,
UIAHandler.UIA_ImageControlTypeId,
UIAHandler.UIA_MenuItemControlTypeId,
UIAHandler.UIA_TabItemControlTypeId,
UIAHandler.UIA_TextControlTypeId,
UIAHandler.UIA_SplitButtonControlTypeId
} if UIAHandler.isUIAAvailable else None
def _getControlFieldForObject(self, obj,isEmbedded=False,startOfNode=False,endOfNode=False):
"""
Fetch control field information for the given UIA NVDAObject.
@ param obj: the NVDAObject the control field is for.
@type obj: L{UIA}
@param isEmbedded: True if this NVDAObject is for a leaf node (has no useful children).
@ type isEmbedded: bool
@param startOfNode: True if the control field represents the very start of this object.
@type startOfNode: bool
@param endOfNode: True if the control field represents the very end of this object.
@type endOfNode: bool
@return: The control field for this object
@rtype: textInfos.ControlField containing NVDA control field data.
"""
role = obj.role
field = textInfos.ControlField()
# Ensure this controlField is unique to the object
runtimeID=field['runtimeID']=obj.UIAElement.getRuntimeId()
field['_startOfNode']=startOfNode
field['_endOfNode']=endOfNode
field["role"] = obj.role
states = obj.states
# The user doesn't care about certain states, as they are obvious.
states.discard(controlTypes.STATE_EDITABLE)
states.discard(controlTypes.STATE_MULTILINE)
states.discard(controlTypes.STATE_FOCUSED)
field["states"] = states
field['nameIsContent']=nameIsContent=obj.UIAElement.cachedControlType in self.UIAControlTypesWhereNameIsContent
if not nameIsContent:
field['name']=obj.name
field["description"] = obj.description
field["level"] = obj.positionInfo.get("level")
if role == controlTypes.ROLE_TABLE:
field["table-id"] = obj.UIAElement.getRuntimeId()
try:
field["table-rowcount"] = obj.rowCount
field["table-columncount"] = obj.columnCount
except NotImplementedError:
pass
if role in (controlTypes.ROLE_TABLECELL, controlTypes.ROLE_DATAITEM,controlTypes.ROLE_TABLECOLUMNHEADER, controlTypes.ROLE_TABLEROWHEADER,controlTypes.ROLE_HEADERITEM):
try:
field["table-rownumber"] = obj.rowNumber
field["table-rowsspanned"] = obj.rowSpan
field["table-columnnumber"] = obj.columnNumber
field["table-columnsspanned"] = obj.columnSpan
field["table-id"] = obj.table.UIAElement.getRuntimeId()
field['role']=controlTypes.ROLE_TABLECELL
field['table-columnheadertext']=obj.columnHeaderText
field['table-rowheadertext']=obj.rowHeaderText
except NotImplementedError:
pass
return field
def _getTextFromUIARange(self,range):
"""
Fetches plain text from the given UI Automation text range.
Just calls getText(-1). This only exists to be overridden for filtering.
"""
return range.getText(-1)
def _getTextWithFields_text(self,textRange,formatConfig,UIAFormatUnits=None):
"""
Yields format fields and text for the given UI Automation text range, split up by the first available UI Automation text unit that does not result in mixed attribute values.
@param textRange: the UI Automation text range to walk.
@type textRange: L{UIAHandler.IUIAutomationTextRange}
@param formatConfig: the types of formatting requested.
@ type formatConfig: a dictionary of NVDA document formatting configuration keys with values set to true for those types that should be fetched.
@param UIAFormatUnits: the UI Automation text units (in order of resolution) that should be used to split the text so as to avoid mixed attribute values. This is None by default.
If the parameter is a list of 1 or more units, The range will be split by the first unit in the list, and this method will be recursively run on each subrange, with the remaining units in this list given as the value of this parameter.
If this parameter is an empty list, then formatting and text is fetched for the entire range, but any mixed attribute values are ignored and no splitting occures.
If this parameter is None, text and formatting is fetched for the entire range in one go, but if mixed attribute values are found, it will split by the first unit in self.UIAFormatUnits, and run this method recursively on each subrange, providing the remaining units from self.UIAFormatUnits as the value of this parameter.
@type UIAFormatUnits: List of UI Automation Text Units or None
@rtype: a Generator yielding L{textInfos.FieldCommand} objects containing L{textInfos.FormatField} objects, and text strings.
"""
log.debug("_getTextWithFields_text start")
if UIAFormatUnits:
unit=UIAFormatUnits[0]
furtherUIAFormatUnits=UIAFormatUnits[1:]
else:
# Fetching text and formatting from the entire range will be tried once before any possible splitting.
unit=None
furtherUIAFormatUnits=self.UIAFormatUnits if UIAFormatUnits is None else []
log.debug("Walking by unit %s"%unit)
log.debug("With further units of: %s"%furtherUIAFormatUnits)
rangeIter=iterUIARangeByUnit(textRange,unit) if unit is not None else [textRange]
for tempRange in rangeIter:
text=self._getTextFromUIARange(tempRange)
if text:
log.debug("Chunk has text. Fetching formatting")
try:
field=self._getFormatFieldAtRange(tempRange,formatConfig,ignoreMixedValues=len(furtherUIAFormatUnits)==0)
except UIAMixedAttributeError:
log.debug("Mixed formatting. Trying higher resolution unit")
for subfield in self._getTextWithFields_text(tempRange,formatConfig,UIAFormatUnits=furtherUIAFormatUnits):
yield subfield
log.debug("Done yielding higher resolution unit")
continue
log.debug("Yielding formatting and text")
yield field
yield text
log.debug("Done _getTextWithFields_text")
def _getTextWithFieldsForUIARange(self,rootElement,textRange,formatConfig,includeRoot=False,alwaysWalkAncestors=True,recurseChildren=True,_rootElementRange=None):
"""
Yields start and end control fields, and text, for the given UI Automation text range.
@param rootElement: the highest ancestor that encloses the given text range. This function will not walk higher than this point.
@type rootElement: L{UIAHandler.IUIAutomation}
@param textRange: the UI Automation text range whos content should be fetched.
@type textRange: L{UIAHandler.IUIAutomation}
@param formatConfig: the types of formatting requested.
@ type formatConfig: a dictionary of NVDA document formatting configuration keys with values set to true for those types that should be fetched.
@param includeRoot: If true, then a control start and end will be yielded for the root element.
@ type includeRoot: bool
@param alwaysWalkAncestors: If true then control fields will be yielded for any element enclosing the given text range, that is a descendant of the root element. If false then the root element may be assumed to be the only ancestor.
@type alwaysWalkAncestors: bool
@param recurseChildren: If true, this function will be recursively called for each child of the given text range, clipped to the bounds of this text range. Formatted text between the children will also be yielded. If false, only formatted text will be yielded.
@type recurseChildren: bool
@param _rootElementRange: Optimization argument: the actual text range for the root element, as it is usually already known when making recursive calls.
@type rootElementRange: L{UIAHandler.IUIAutomationTextRange}
@rtype: A generator that yields L{textInfo.FieldCommand} objects and text strings.
"""
if log.isEnabledFor(log.DEBUG):
log.debug("_getTextWithFieldsForUIARange")
log.debug("rootElement: %s"%rootElement.currentLocalizedControlType if rootElement else None)
log.debug("full text: %s"%textRange.getText(-1))
if recurseChildren:
childElements=textRange.getChildren()
# Specific check for embedded elements (checkboxes etc)
# Calling getChildren on their childRange always gives back the same child.
if childElements.length==1:
childElement=childElements.getElement(0)
if childElement and UIAHandler.handler.clientObject.compareElements(childElement,rootElement):
log.debug("Detected embedded child")
childElement=childElement.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
recurseChildren=False
parentElements=[]
if alwaysWalkAncestors:
log.debug("Fetching parents starting from enclosingElement")
try:
parentElement=textRange.getEnclosingElement()
except COMError:
parentElement=None
if parentElement:
# #6450: IE 11 on Windows 7 raises COMError here
try:
parentElement=parentElement.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
except COMError:
parentElement=None
while parentElement:
isRoot=UIAHandler.handler.clientObject.compareElements(parentElement,rootElement)
if log.isEnabledFor(log.DEBUG):
log.debug("parentElement: %s"%parentElement.currentLocalizedControlType)
if isRoot and not includeRoot:
log.debug("Is root, and root not requested. Breaking")
break
try:
parentRange=self.obj.UIATextPattern.rangeFromChild(parentElement)
except COMError:
parentRange=None
if not parentRange:
log.debug("parentRange is NULL. Breaking")
break
parentElements.append((parentElement,parentRange))
if isRoot:
log.debug("Hit root. Breaking")
break
parentElement=UIAHandler.handler.baseTreeWalker.getParentElementBuildCache(parentElement,UIAHandler.handler.baseCacheRequest)
else: # not alwaysWalkAncestors
if includeRoot:
log.debug("Using rootElement as only parent")
rootElementRange=_rootElementRange if _rootElementRange else self.obj.UIATextPattern.rangeFromChild(rootElement)
parentElements.append((rootElement,rootElementRange))
log.debug("Done fetching parents")
enclosingElement=parentElements[0][0] if parentElements else rootElement
parentFields=[]
log.debug("Generating controlFields for parents")
for index,(parentElement,parentRange) in enumerate(parentElements):
if log.isEnabledFor(log.DEBUG):
log.debug("parentElement: %s"%parentElement.currentLocalizedControlType)
startOfNode=textRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_Start,parentRange,UIAHandler.TextPatternRangeEndpoint_Start)<=0
endOfNode=textRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_End,parentRange,UIAHandler.TextPatternRangeEndpoint_End)>=0
try:
obj=UIA(windowHandle=self.obj.windowHandle,UIAElement=parentElement)
field=self._getControlFieldForObject(obj,isEmbedded=(index==0 and not recurseChildren),startOfNode=startOfNode,endOfNode=endOfNode)
except LookupError:
log.debug("Failed to fetch controlField data for parentElement. Breaking")
continue
if not field:
continue
parentFields.append(field)
log.debug("Done generating controlFields for parents")
log.debug("Yielding control starts for parents")
for field in reversed(parentFields):
yield textInfos.FieldCommand("controlStart",field)
log.debug("Done yielding control starts for parents")
del parentElements
log.debug("Yielding balanced fields for textRange")
# Move through the text range, collecting text and recursing into children
#: This variable is used to span lengths of plain text between child ranges as we iterate over getChildren
tempRange=textRange.clone()
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,tempRange,UIAHandler.TextPatternRangeEndpoint_Start)
if recurseChildren:
if log.isEnabledFor(log.DEBUG):
log.debug("Child count: %s"%childElements.length)
log.debug("Walking children")
for index in xrange(childElements.length):
childElement=childElements.getElement(index)
if not childElement or UIAHandler.handler.clientObject.compareElements(childElement,enclosingElement):
log.debug("NULL childElement. Skipping")
continue
childElement=childElement.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
if log.isEnabledFor(log.DEBUG):
log.debug("Fetched child %s (%s)"%(index,childElement.currentLocalizedControlType))
childRange=self.obj.UIATextPattern.rangeFromChild(childElement)
if not childRange:
log.debug("NULL childRange. Skipping")
continue
if childRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_Start,textRange,UIAHandler.TextPatternRangeEndpoint_End)>=0:
log.debug("Child at or past end of textRange. Breaking")
break
origChildRange=childRange.clone()
if childRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_End,textRange,UIAHandler.TextPatternRangeEndpoint_End)>0:
log.debug("textRange ended part way through the child. Crop end of childRange to fit")
childRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,textRange,UIAHandler.TextPatternRangeEndpoint_End)
childStartDelta=childRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_Start,tempRange,UIAHandler.TextPatternRangeEndpoint_End)
if childStartDelta>0:
# plain text before this child
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,childRange,UIAHandler.TextPatternRangeEndpoint_Start)
log.debug("Plain text before child")
for field in self._getTextWithFields_text(tempRange,formatConfig):
yield field
elif childStartDelta<0:
log.debug("textRange started part way through child. Cropping Start of child range to fit" )
childRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_Start,tempRange,UIAHandler.TextPatternRangeEndpoint_End)
if childRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_Start,childRange,UIAHandler.TextPatternRangeEndpoint_End)==0:
log.debug("childRange is degenerate. Skipping")
continue
log.debug("Recursing into child %s"%index)
for field in self._getTextWithFieldsForUIARange(childElement,childRange,formatConfig,_rootElementRange=origChildRange,includeRoot=True,alwaysWalkAncestors=False):
yield field
log.debug("Done recursing into child %s"%index)
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_Start,childRange,UIAHandler.TextPatternRangeEndpoint_End)
log.debug("children done")
else: #isEmbeddedChild==True
log.debug("isEmbeddedChild, not recursing children.")
# Plain text after the final child
if tempRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_End,textRange,UIAHandler.TextPatternRangeEndpoint_End)<0:
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,textRange,UIAHandler.TextPatternRangeEndpoint_End)
log.debug("Yielding final text")
for field in self._getTextWithFields_text(tempRange,formatConfig):
yield field
log.debug("Done yielding final text")
log.debug("Done yielding balanced fields for textRange")
for field in reversed(parentFields):
log.debug("Yielding controlEnd for parentElement")
yield textInfos.FieldCommand("controlEnd",field)
log.debug("_getTextWithFieldsForUIARange end")
def getTextWithFields(self,formatConfig=None):
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
fields=[]
for field in self._getTextWithFieldsForUIARange(self.obj.UIAElement,self._rangeObj,formatConfig):
if log.isEnabledFor(log.DEBUG):
log.debug("Field: %s"%field)
fields.append(field)
return fields
def _get_text(self):
return self._getTextFromUIARange(self._rangeObj)
def expand(self,unit):
UIAUnit=UIAHandler.NVDAUnitsToUIAUnits[unit]
self._rangeObj.ExpandToEnclosingUnit(UIAUnit)
def move(self,unit,direction,endPoint=None):
UIAUnit=UIAHandler.NVDAUnitsToUIAUnits[unit]
if endPoint=="start":
res=self._rangeObj.MoveEndpointByUnit(UIAHandler.TextPatternRangeEndpoint_Start,UIAUnit,direction)
elif endPoint=="end":
res=self._rangeObj.MoveEndpointByUnit(UIAHandler.TextPatternRangeEndpoint_End,UIAUnit,direction)
else:
res=self._rangeObj.Move(UIAUnit,direction)
#Some Implementations of Move and moveEndpointByUnit return a positive number even if the direction is negative
if direction<0 and res>0:
res=0-res
return res
def copy(self):
return self.__class__(self.obj,None,_rangeObj=self._rangeObj)
def collapse(self,end=False):
if end:
self._rangeObj.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_Start,self._rangeObj,UIAHandler.TextPatternRangeEndpoint_End)
else:
self._rangeObj.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,self._rangeObj,UIAHandler.TextPatternRangeEndpoint_Start)
def compareEndPoints(self,other,which):
if which.startswith('start'):
src=UIAHandler.TextPatternRangeEndpoint_Start
else:
src=UIAHandler.TextPatternRangeEndpoint_End
if which.endswith('Start'):
target=UIAHandler.TextPatternRangeEndpoint_Start
else:
target=UIAHandler.TextPatternRangeEndpoint_End
return self._rangeObj.CompareEndpoints(src,other._rangeObj,target)
def setEndPoint(self,other,which):
if which.startswith('start'):
src=UIAHandler.TextPatternRangeEndpoint_Start
else:
src=UIAHandler.TextPatternRangeEndpoint_End
if which.endswith('Start'):
target=UIAHandler.TextPatternRangeEndpoint_Start
else:
target=UIAHandler.TextPatternRangeEndpoint_End
self._rangeObj.MoveEndpointByRange(src,other._rangeObj,target)
def updateSelection(self):
self._rangeObj.Select()
updateCaret = updateSelection
class UIA(Window):
def findOverlayClasses(self,clsList):
if self.TextInfo==UIATextInfo:
clsList.append(EditableTextWithoutAutoSelectDetection)
UIAControlType=self.UIAElement.cachedControlType
UIAClassName=self.UIAElement.cachedClassName
if UIAClassName=="WpfTextView":
clsList.append(WpfTextView)
elif EditableTextWithoutAutoSelectDetection in clsList and (UIAClassName=='_WwG' or self.UIAElement.cachedAutomationID.startswith('UIA_AutomationId_Word_Content')):
from .wordDocument import WordDocument, WordDocumentNode
if self.role==controlTypes.ROLE_DOCUMENT:
clsList.append(WordDocument)
else:
clsList.append(WordDocumentNode)
# #5136: Windows 8.x and Windows 10 uses different window class and other attributes for toast notifications.
elif UIAClassName=="ToastContentHost" and UIAControlType==UIAHandler.UIA_ToolTipControlTypeId: #Windows 8.x
clsList.append(Toast_win8)
elif self.windowClassName=="Windows.UI.Core.CoreWindow" and UIAControlType==UIAHandler.UIA_WindowControlTypeId and "ToastView" in self.UIAElement.cachedAutomationId: # Windows 10
clsList.append(Toast_win10)
elif self.UIAElement.cachedFrameworkID in ("InternetExplorer","MicrosoftEdge"):
import edge
if UIAClassName in ("Internet Explorer_Server","WebView") and self.role==controlTypes.ROLE_PANE:
clsList.append(edge.EdgeHTMLRootContainer)
elif self.UIATextPattern and self.role==controlTypes.ROLE_PANE and self.parent and (isinstance(self.parent,edge.EdgeHTMLRootContainer) or not isinstance(self.parent,edge.EdgeNode)):
clsList.append(edge.EdgeHTMLRoot)
elif self.role==controlTypes.ROLE_LIST:
clsList.append(edge.EdgeList)
else:
clsList.append(edge.EdgeNode)
elif self.role==controlTypes.ROLE_DOCUMENT and self.UIAElement.cachedAutomationId=="Microsoft.Windows.PDF.DocumentView":
# PDFs
import edge
clsList.append(edge.EdgeHTMLRoot)
if UIAControlType==UIAHandler.UIA_ProgressBarControlTypeId:
clsList.append(ProgressBar)
if UIAClassName=="ControlPanelLink":
clsList.append(ControlPanelLink)
if UIAClassName=="UIColumnHeader":
clsList.append(UIColumnHeader)
elif UIAClassName=="UIItem":
clsList.append(UIItem)
elif UIAClassName=="SensitiveSlider":
clsList.append(SensitiveSlider)
if UIAControlType==UIAHandler.UIA_TreeItemControlTypeId:
clsList.append(TreeviewItem)
elif UIAControlType==UIAHandler.UIA_ComboBoxControlTypeId:
try:
if not self.UIAElement.getCurrentPropertyValue(UIAHandler.UIA_IsValuePatternAvailablePropertyId):
clsList.append(ComboBoxWithoutValuePattern)
except COMError:
pass
elif UIAControlType==UIAHandler.UIA_ListItemControlTypeId:
clsList.append(ListItem)
# #5942: In recent Windows 10 Redstone builds (14332 and later), Microsoft rewrote various dialog code including that of User Account Control.
if self.UIAIsWindowElement and UIAClassName in ("#32770","NUIDialog", "Credential Dialog Xaml Host"):
clsList.append(Dialog)
clsList.append(UIA)
if self.UIAIsWindowElement:
super(UIA,self).findOverlayClasses(clsList)
if self.UIATextPattern:
#Since there is a UIA text pattern, there is no need to use the win32 edit support at all
import NVDAObjects.window.edit
for x in list(clsList):
if issubclass(x,NVDAObjects.window.edit.Edit):
clsList.remove(x)
@classmethod
def kwargsFromSuper(cls,kwargs,relation=None):
UIAElement=None
windowHandle=kwargs.get('windowHandle')
if isinstance(relation,tuple):
UIAElement=UIAHandler.handler.clientObject.ElementFromPointBuildCache(POINT(relation[0],relation[1]),UIAHandler.handler.baseCacheRequest)
elif relation=="focus":
try:
UIAElement=UIAHandler.handler.clientObject.getFocusedElementBuildCache(UIAHandler.handler.baseCacheRequest)
# This object may be in a different window, so we need to recalculate the window handle.
kwargs['windowHandle']=None
except COMError:
log.debugWarning("getFocusedElement failed", exc_info=True)
else:
UIAElement=UIAHandler.handler.clientObject.ElementFromHandleBuildCache(windowHandle,UIAHandler.handler.baseCacheRequest)
if not UIAElement:
return False
kwargs['UIAElement']=UIAElement
return True
def getNormalizedUIATextRangeFromElement(self,UIAElement):
"""Simply fetches a UIA text range for the given UIAElement, allowing subclasses to process the range first."""
return UIATextRangeFromElement(self.UIATextPattern,UIAElement)
def __init__(self,windowHandle=None,UIAElement=None):
if not UIAElement:
raise ValueError("needs a UIA element")
self.UIAElement=UIAElement
UIACachedWindowHandle=UIAElement.cachedNativeWindowHandle
self.UIAIsWindowElement=bool(UIACachedWindowHandle)
if UIACachedWindowHandle:
windowHandle=UIACachedWindowHandle
if not windowHandle:
windowHandle=UIAHandler.handler.getNearestWindowHandle(UIAElement)
if not windowHandle:
raise InvalidNVDAObject("no windowHandle")
super(UIA,self).__init__(windowHandle=windowHandle)
def _isEqual(self,other):
if not isinstance(other,UIA):
return False
try:
return UIAHandler.handler.clientObject.CompareElements(self.UIAElement,other.UIAElement)
except:
return False
def _get_shouldAllowUIAFocusEvent(self):
try:
return bool(self.UIAElement.currentHasKeyboardFocus)
except COMError:
return True
def _getUIAPattern(self,ID,interface,cache=False):
punk=self.UIAElement.GetCachedPattern(ID) if cache else self.UIAElement.GetCurrentPattern(ID)
if punk:
return punk.QueryInterface(interface)
def _get_UIAInvokePattern(self):
self.UIAInvokePattern=self._getUIAPattern(UIAHandler.UIA_InvokePatternId,UIAHandler.IUIAutomationInvokePattern)
return self.UIAInvokePattern
def _get_UIAGridPattern(self):
self.UIAGridPattern=self._getUIAPattern(UIAHandler.UIA_GridPatternId,UIAHandler.IUIAutomationGridPattern)
return self.UIAGridPattern
def _get_UIATogglePattern(self):
self.UIATogglePattern=self._getUIAPattern(UIAHandler.UIA_TogglePatternId,UIAHandler.IUIAutomationTogglePattern)
return self.UIATogglePattern
def _get_UIASelectionItemPattern(self):
self.UIASelectionItemPattern=self._getUIAPattern(UIAHandler.UIA_SelectionItemPatternId,UIAHandler.IUIAutomationSelectionItemPattern)
return self.UIASelectionItemPattern
def _get_UIATextPattern(self):
self.UIATextPattern=self._getUIAPattern(UIAHandler.UIA_TextPatternId,UIAHandler.IUIAutomationTextPattern,cache=True)
return self.UIATextPattern
def _get_UIATextEditPattern(self):
if not isinstance(UIAHandler.handler.clientObject,UIAHandler.IUIAutomation3):
return None
self.UIATextEditPattern=self._getUIAPattern(UIAHandler.UIA_TextEditPatternId,UIAHandler.IUIAutomationTextEditPattern,cache=False)
return self.UIATextEditPattern
def _get_UIALegacyIAccessiblePattern(self):
self.UIALegacyIAccessiblePattern=self._getUIAPattern(UIAHandler.UIA_LegacyIAccessiblePatternId,UIAHandler.IUIAutomationLegacyIAccessiblePattern)
return self.UIALegacyIAccessiblePattern
_TextInfo=UIATextInfo
def _get_TextInfo(self):
if self.UIATextPattern: return self._TextInfo
textInfo=super(UIA,self).TextInfo
if textInfo is NVDAObjectTextInfo and self.UIAIsWindowElement and self.role==controlTypes.ROLE_WINDOW:
import displayModel
return displayModel.DisplayModelTextInfo
return textInfo
def setFocus(self):
self.UIAElement.setFocus()
def _get_devInfo(self):
info=super(UIA,self).devInfo
info.append("UIAElement: %r"%self.UIAElement)
try:
ret=self.UIAElement.currentAutomationID
except Exception as e:
ret="Exception: %s"%e
info.append("UIA automationID: %s"%ret)
try:
ret=self.UIAElement.cachedFrameworkID
except Exception as e:
ret="Exception: %s"%e
info.append("UIA frameworkID: %s"%ret)
try:
ret=str(self.UIAElement.getRuntimeID())
except Exception as e:
ret="Exception: %s"%e
info.append("UIA runtimeID: %s"%ret)
try:
ret=self.UIAElement.cachedProviderDescription
except Exception as e:
ret="Exception: %s"%e
info.append("UIA providerDescription: %s"%ret)
try:
ret=self.UIAElement.currentClassName
except Exception as e:
ret="Exception: %s"%e
info.append("UIA className: %s"%ret)
return info
def _get_name(self):
try:
return self.UIAElement.currentName
except COMError:
return ""
def _get_role(self):
role=UIAHandler.UIAControlTypesToNVDARoles.get(self.UIAElement.cachedControlType,controlTypes.ROLE_UNKNOWN)
if role==controlTypes.ROLE_BUTTON:
try:
s=self.UIACachedStatesElement.getCachedPropertyValueEx(UIAHandler.UIA_ToggleToggleStatePropertyId,True)
except COMError:
s=UIAHandler.handler.reservedNotSupportedValue
if s!=UIAHandler.handler.reservedNotSupportedValue:
role=controlTypes.ROLE_TOGGLEBUTTON
elif role in (controlTypes.ROLE_UNKNOWN,controlTypes.ROLE_PANE,controlTypes.ROLE_WINDOW) and self.windowHandle:
superRole=super(UIA,self).role
if superRole!=controlTypes.ROLE_WINDOW:
role=superRole
return role
def _get_description(self):
try:
return self.UIAElement.currentHelpText or ""
except COMError:
return ""
def _get_keyboardShortcut(self):
# Build the keyboard shortcuts list early for readability.
shortcuts = []
try:
accessKey = self.UIAElement.currentAccessKey
# #6779: Don't add access key to the shortcut list if UIA says access key is None, resolves concatenation error in focus events, object navigation and so on.
# In rare cases, access key itself is None.
if accessKey:
shortcuts.append(accessKey)
except COMError, AttributeError:
pass
try:
acceleratorKey = self.UIAElement.currentAcceleratorKey
# Same case as access key.
if acceleratorKey:
shortcuts.append(acceleratorKey)
except COMError, AttributeError:
pass
# #6790: Do not add two spaces unless both access key and accelerator are present in order to not waste string real estate.
return " ".join(shortcuts) if shortcuts else ""
def _get_UIACachedStatesElement(self):
statesCacheRequest=UIAHandler.handler.clientObject.createCacheRequest()
for prop in (UIAHandler.UIA_HasKeyboardFocusPropertyId,UIAHandler.UIA_SelectionItemIsSelectedPropertyId,UIAHandler.UIA_IsDataValidForFormPropertyId,UIAHandler.UIA_IsRequiredForFormPropertyId,UIAHandler.UIA_ValueIsReadOnlyPropertyId,UIAHandler.UIA_ExpandCollapseExpandCollapseStatePropertyId,UIAHandler.UIA_ToggleToggleStatePropertyId,UIAHandler.UIA_IsKeyboardFocusablePropertyId,UIAHandler.UIA_IsPasswordPropertyId,UIAHandler.UIA_IsSelectionItemPatternAvailablePropertyId,UIAHandler.UIA_IsEnabledPropertyId):
statesCacheRequest.addProperty(prop)
return self.UIAElement.buildUpdatedCache(statesCacheRequest)
def _get_states(self):
states=set()
e=self.UIACachedStatesElement
try:
hasKeyboardFocus=e.cachedHasKeyboardFocus
except COMError:
hasKeyboardFocus=False
if hasKeyboardFocus:
states.add(controlTypes.STATE_FOCUSED)
if e.cachedIsKeyboardFocusable:
states.add(controlTypes.STATE_FOCUSABLE)
if e.cachedIsPassword:
states.add(controlTypes.STATE_PROTECTED)
# Don't fetch the role unless we must, but never fetch it more than once.
role=None
if e.getCachedPropertyValue(UIAHandler.UIA_IsSelectionItemPatternAvailablePropertyId):
role=self.role
states.add(controlTypes.STATE_CHECKABLE if role==controlTypes.ROLE_RADIOBUTTON else controlTypes.STATE_SELECTABLE)
if e.getCachedPropertyValue(UIAHandler.UIA_SelectionItemIsSelectedPropertyId):
states.add(controlTypes.STATE_CHECKED if role==controlTypes.ROLE_RADIOBUTTON else controlTypes.STATE_SELECTED)
if not e.getCachedPropertyValueEx(UIAHandler.UIA_IsEnabledPropertyId,True):
states.add(controlTypes.STATE_UNAVAILABLE)
try:
isDataValid=e.getCachedPropertyValueEx(UIAHandler.UIA_IsDataValidForFormPropertyId,True)
except COMError:
isDataValid=UIAHandler.handler.reservedNotSupportedValue
if not isDataValid:
states.add(controlTypes.STATE_INVALID_ENTRY)
if e.getCachedPropertyValue(UIAHandler.UIA_IsRequiredForFormPropertyId):
states.add(controlTypes.STATE_REQUIRED)
try:
isReadOnly=e.getCachedPropertyValueEx(UIAHandler.UIA_ValueIsReadOnlyPropertyId,True)
except COMError:
isReadOnly=UIAHandler.handler.reservedNotSupportedValue
if isReadOnly and isReadOnly!=UIAHandler.handler.reservedNotSupportedValue:
states.add(controlTypes.STATE_READONLY)
try:
s=e.getCachedPropertyValueEx(UIAHandler.UIA_ExpandCollapseExpandCollapseStatePropertyId,True)
except COMError:
s=UIAHandler.handler.reservedNotSupportedValue
if s!=UIAHandler.handler.reservedNotSupportedValue:
if s==UIAHandler.ExpandCollapseState_Collapsed:
states.add(controlTypes.STATE_COLLAPSED)
elif s==UIAHandler.ExpandCollapseState_Expanded:
states.add(controlTypes.STATE_EXPANDED)
try:
s=e.getCachedPropertyValueEx(UIAHandler.UIA_ToggleToggleStatePropertyId,True)
except COMError:
s=UIAHandler.handler.reservedNotSupportedValue
if s!=UIAHandler.handler.reservedNotSupportedValue:
if not role:
role=self.role
if role==controlTypes.ROLE_TOGGLEBUTTON:
if s==UIAHandler.ToggleState_On:
states.add(controlTypes.STATE_PRESSED)
else:
states.add(controlTypes.STATE_CHECKABLE)
if s==UIAHandler.ToggleState_On:
states.add(controlTypes.STATE_CHECKED)
return states
def correctAPIForRelation(self, obj, relation=None):
if obj and self.windowHandle != obj.windowHandle and not obj.UIAElement.cachedNativeWindowHandle:
# The target element is not the root element for the window, so don't change API class; i.e. always use UIA.
return obj
return super(UIA, self).correctAPIForRelation(obj, relation)
def _get_parent(self):
try:
parentElement=UIAHandler.handler.baseTreeWalker.GetParentElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
parentElement=None
if not parentElement:
return super(UIA,self).parent
if not parentElement.CachedNativeWindowHandle and not self.UIAElement.CachedNativeWindowHandle:
# Neither self or parent have a window handle themselves, so their nearest window handle will be the same.
# Cache this on the parent if cached on self, to avoid fetching it later.
try:
parentElement._nearestWindowHandle=self.UIAElement._nearestWindowHandle
except AttributeError:
# _nearestWindowHandle may not exist on self if self was instantiated given a windowHandle.
pass
return self.correctAPIForRelation(UIA(UIAElement=parentElement),relation="parent")
def _get_previous(self):
try:
previousElement=UIAHandler.handler.baseTreeWalker.GetPreviousSiblingElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
log.debugWarning("Tree walker failed", exc_info=True)
return None
if not previousElement:
return None
return self.correctAPIForRelation(UIA(UIAElement=previousElement))
def _get_next(self):
try:
nextElement=UIAHandler.handler.baseTreeWalker.GetNextSiblingElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
log.debugWarning("Tree walker failed", exc_info=True)
return None
if not nextElement:
return None
return self.correctAPIForRelation(UIA(UIAElement=nextElement))
def _get_firstChild(self):
try:
firstChildElement=UIAHandler.handler.baseTreeWalker.GetFirstChildElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
log.debugWarning("Tree walker failed", exc_info=True)
return None
if not firstChildElement:
return None
return self.correctAPIForRelation(UIA(UIAElement=firstChildElement))
def _get_lastChild(self):
try:
lastChildElement=UIAHandler.handler.baseTreeWalker.GetLastChildElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
log.debugWarning("Tree walker failed", exc_info=True)
return None
if not lastChildElement:
return None
return self.correctAPIForRelation(UIA(UIAElement=lastChildElement))
def _get_children(self):
childrenCacheRequest=UIAHandler.handler.baseCacheRequest.clone()
childrenCacheRequest.TreeScope=UIAHandler.TreeScope_Children
try:
cachedChildren=self.UIAElement.buildUpdatedCache(childrenCacheRequest).getCachedChildren()
except COMError as e:
log.debugWarning("Could not fetch cached children from UIA element: %s"%e)
return super(UIA,self).children
children=[]
if not cachedChildren:
# GetCachedChildren returns null if there are no children.
return children
for index in xrange(cachedChildren.length):
e=cachedChildren.getElement(index)
windowHandle=e.cachedNativeWindowHandle or self.windowHandle
children.append(self.correctAPIForRelation(UIA(windowHandle=windowHandle,UIAElement=e)))
return children
def _get_rowNumber(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_GridItemRowPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val+1
raise NotImplementedError
def _get_rowSpan(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_GridItemRowSpanPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
return 1
def _get_rowHeaderText(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_TableItemRowHeaderItemsPropertyId ,True)
if val==UIAHandler.handler.reservedNotSupportedValue:
raise NotImplementedError
val=val.QueryInterface(UIAHandler.IUIAutomationElementArray)
textList=[]
for i in xrange(val.length):
e=val.getElement(i)
obj=UIA(windowHandle=self.windowHandle,UIAElement=e.buildUpdatedCache(UIAHandler.handler.baseCacheRequest))
if not obj: continue
text=obj.makeTextInfo(textInfos.POSITION_ALL).text
textList.append(text)
return " ".join(textList)
def _get_columnNumber(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_GridItemColumnPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val+1
raise NotImplementedError
def _get_columnSpan(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_GridItemColumnSpanPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
return 1
def _get_columnHeaderText(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_TableItemColumnHeaderItemsPropertyId ,True)
if val==UIAHandler.handler.reservedNotSupportedValue:
raise NotImplementedError
val=val.QueryInterface(UIAHandler.IUIAutomationElementArray)
textList=[]
for i in xrange(val.length):
e=val.getElement(i)
obj=UIA(windowHandle=self.windowHandle,UIAElement=e.buildUpdatedCache(UIAHandler.handler.baseCacheRequest))
if not obj: continue
text=obj.makeTextInfo(textInfos.POSITION_ALL).text
textList.append(text)
return " ".join(textList)
def _get_rowCount(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_GridRowCountPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
raise NotImplementedError
def _get_columnCount(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_GridColumnCountPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
raise NotImplementedError
def _get_table(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_GridItemContainingGridPropertyId ,True)
if val and val!=UIAHandler.handler.reservedNotSupportedValue:
e=val.QueryInterface(UIAHandler.IUIAutomationElement).buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
return UIA(UIAElement=e)
raise NotImplementedError
def _get_processID(self):
return self.UIAElement.cachedProcessId
def _get_location(self):
try:
r=self.UIAElement.currentBoundingRectangle
except COMError:
return None
left=r.left
top=r.top
width=r.right-left
height=r.bottom-top
return left,top,width,height
def _get_value(self):
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_RangeValueValuePropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
minVal=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_RangeValueMinimumPropertyId,False)
maxVal=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_RangeValueMaximumPropertyId,False)
if minVal==maxVal:
# There is no range.
return "0"
val=((val-minVal)/(maxVal-minVal))*100.0
return "%d"%round(val,4)
val=self.UIAElement.getCurrentPropertyValueEx(UIAHandler.UIA_ValueValuePropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
def _get_actionCount(self):
if self.UIAInvokePattern:
return 1
return 0
def getActionName(self,index=None):
if not index:
index=self.defaultActionIndex
if index==0 and self.UIAInvokePattern:
return _("invoke")
raise NotImplementedError
def doAction(self,index=None):
if not index:
index=self.defaultActionIndex
if index==0:
if self.UIAInvokePattern:
self.UIAInvokePattern.Invoke()
elif self.UIATogglePattern:
self.UIATogglePattern.toggle()
elif self.UIASelectionItemPattern:
self.UIASelectionItemPattern.select()
return
raise NotImplementedError
def _get_hasFocus(self):
try:
return self.UIAElement.currentHasKeyboardFocus
except COMError:
return False
def _get_positionInfo(self):
info=super(UIA,self).positionInfo or {}
itemIndex=0
try:
itemIndex=self.UIAElement.getCurrentPropertyValue(UIAHandler.UIA_PositionInSetPropertyId)
except COMError:
pass
if itemIndex>0:
info['indexInGroup']=itemIndex
itemCount=0
try:
itemCount=self.UIAElement.getCurrentPropertyValue(UIAHandler.UIA_SizeOfSetPropertyId)
except COMError:
pass
if itemCount>0:
info['similarItemsInGroup']=itemCount
try:
level=self.UIAElement.getCurrentPropertyValue(UIAHandler.UIA_LevelPropertyId)
except COMError:
level=None
if level is not None and level>0:
info["level"]=level
return info
def scrollIntoView(self):
pass
def _get_controllerFor(self):
e=self.UIAElement.getCurrentPropertyValue(UIAHandler.UIA_ControllerForPropertyId)
if UIAHandler.handler.clientObject.checkNotSupported(e):
return None
a=e.QueryInterface(UIAHandler.IUIAutomationElementArray)
objList=[]
for index in xrange(a.length):
e=a.getElement(index)
e=e.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
obj=UIA(UIAElement=e)
if obj:
objList.append(obj)
return objList
def event_UIA_elementSelected(self):
self.event_stateChange()
def event_valueChange(self):
if isinstance(self, EditableTextWithoutAutoSelectDetection):
return
return super(UIA, self).event_valueChange()
class TreeviewItem(UIA):
def _get_value(self):
return ""
def _get__level(self):
level=0
obj=self
while obj:
level+=1
parent=obj.parent=obj.parent
if not parent or parent==obj or parent.role!=controlTypes.ROLE_TREEVIEWITEM:
return level
obj=parent
return level
def _get_positionInfo(self):
info=super(TreeviewItem,self).positionInfo or {}
info['level']=self._level
return info
class UIColumnHeader(UIA):
def _get_description(self):
description=super(UIColumnHeader,self).description
try:
itemStatus=self.UIAElement.currentItemStatus
except COMError:
itemStatus=""
return " ".join([x for x in (description,itemStatus) if x and not x.isspace()])
class UIItem(UIA):
"""UIA list items in an Items View repeate the name as the value"""
def _get_positionInfo(self):
info={}
itemIndex=0
try:
itemIndex=self.UIAElement.getCurrentPropertyValue(UIAHandler.handler.ItemIndex_PropertyId)
except COMError:
pass
if itemIndex>0:
info['indexInGroup']=itemIndex
try:
e=self.UIAElement.getCurrentPropertyValue(UIAHandler.UIA_SelectionItemSelectionContainerPropertyId)
if e: e=e.QueryInterface(UIAHandler.IUIAutomationElement)
except COMError:
e=None
if e:
try:
itemCount=e.getCurrentPropertyValue(UIAHandler.handler.ItemCount_PropertyId)
except COMError:
itemCount=0
if itemCount>0:
info['similarItemsInGroup']=itemCount
return info
def _get_value(self):
return ""
class SensitiveSlider(UIA):
"""A slider that tends to give focus to its thumb control"""
def event_focusEntered(self):
self.reportFocus()
def event_valueChange(self):
focusParent=api.getFocusObject().parent
if self==focusParent:
speech.speakObjectProperties(self,value=True,reason=controlTypes.REASON_CHANGE)
else:
super(SensitiveSlider,self).event_valueChange()
class ControlPanelLink(UIA):
def _get_description(self):
desc=super(ControlPanelLink,self).description
try:
i=desc.find('\n')
except:
i=None
if i:
desc=desc[i+1:]
return desc
class ComboBoxWithoutValuePattern(UIA):
"""A combo box without the Value pattern.
UIA combo boxes don't necessarily support the Value pattern unless they take arbitrary text values.
However, NVDA expects combo boxes to have a value and to fire valueChange events.
The value is obtained by retrieving the selected item's name.
The valueChange event is fired on this object by L{ListItem.event_stateChange}.
"""
def _get_UIASelectionPattern(self):
punk = self.UIAElement.GetCurrentPattern(UIAHandler.UIA_SelectionPatternId)
if punk:
self.UIASelectionPattern = punk.QueryInterface(UIAHandler.IUIAutomationSelectionPattern)
else:
self.UIASelectionPattern = None
return self.UIASelectionPattern
def _get_value(self):
try:
return self.UIASelectionPattern.GetCurrentSelection().GetElement(0).CurrentName
except COMError:
return None
class ListItem(UIA):
def event_stateChange(self):
if not self.hasFocus:
parent = self.parent
focus=api.getFocusObject()
if parent and isinstance(parent, ComboBoxWithoutValuePattern) and parent==focus:
# This is an item in a combo box without the Value pattern.
# This item has been selected, so notify the combo box that its value has changed.
focus.event_valueChange()
super(ListItem, self).event_stateChange()
class Dialog(Dialog):
role=controlTypes.ROLE_DIALOG
class Toast_win8(Notification, UIA):
event_UIA_toolTipOpened=Notification.event_alert
class Toast_win10(Notification, UIA):
# #6096: Windows 10 Redstone build 14366 and later does not fire tooltip event when toasts appear.
if sys.getwindowsversion().build > 10586:
event_UIA_window_windowOpen=Notification.event_alert
else:
event_UIA_toolTipOpened=Notification.event_alert
#WpfTextView fires name state changes once a second, plus when IUIAutomationTextRange::GetAttributeValue is called.
#This causes major lags when using this control with Braille in NVDA. (#2759)
#For now just ignore the events.
class WpfTextView(UIA):
def event_nameChange(self):
return
def event_stateChange(self):
return
| 1 | 19,368 | Extraneous blank line. | nvaccess-nvda | py |
@@ -20,7 +20,8 @@ from codechecker_lib.analyzers import analyzer_clangsa
from codechecker_lib.analyzers import config_handler_clang_tidy
from codechecker_lib.analyzers import config_handler_clangsa
from codechecker_lib.analyzers import result_handler_clang_tidy
-from codechecker_lib.analyzers import result_handler_clangsa
+from codechecker_lib.analyzers import result_handler_plist_to_db
+from codechecker_lib.analyzers import result_handler_plist_to_stdout
LOG = LoggerFactory.get_new_logger('ANALYZER TYPES')
| 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Supported analyzer types.
"""
import os
import re
import sys
from codechecker_lib import analyzer_env
from codechecker_lib import client
from codechecker_lib import host_check
from codechecker_lib.logger import LoggerFactory
from codechecker_lib.analyzers import analyzer_clang_tidy
from codechecker_lib.analyzers import analyzer_clangsa
from codechecker_lib.analyzers import config_handler_clang_tidy
from codechecker_lib.analyzers import config_handler_clangsa
from codechecker_lib.analyzers import result_handler_clang_tidy
from codechecker_lib.analyzers import result_handler_clangsa
LOG = LoggerFactory.get_new_logger('ANALYZER TYPES')
CLANG_SA = 'clangsa'
CLANG_TIDY = 'clang-tidy'
supported_analyzers = {CLANG_SA, CLANG_TIDY}
def is_sa_checker_name(checker_name):
"""
Match for Clang Static analyzer names like:
- unix
- unix.Malloc
- security.insecureAPI
- security.insecureAPI.gets
"""
# No '-' is allowed in the checker name.
sa_checker_name = r'^[^-]+$'
ptn = re.compile(sa_checker_name)
if ptn.match(checker_name):
return True
return False
def is_tidy_checker_name(checker_name):
"""
Match for Clang Tidy analyzer names like:
-*
modernize-*
clang-diagnostic-*
cert-fio38-c
google-global-names-in-headers
"""
# Must contain at least one '-'.
tidy_checker_name = r'^(?=.*[\-]).+$'
ptn = re.compile(tidy_checker_name)
if ptn.match(checker_name):
return True
return False
def check_supported_analyzers(analyzers, context):
"""
Check if the selected analyzers are supported.
"""
check_env = analyzer_env.get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
analyzer_binaries = context.analyzer_binaries
enabled_analyzers = set()
for analyzer_name in analyzers:
if analyzer_name not in supported_analyzers:
LOG.error('Unsupported analyzer ' + analyzer_name + ' !')
sys.exit(1)
# Get the compiler binary to check if it can run.
available_analyzer = True
analyzer_bin = analyzer_binaries.get(analyzer_name)
if not analyzer_bin:
LOG.debug_analyzer('Failed to detect analyzer binary ' +
analyzer_name)
available_analyzer = False
if not host_check.check_clang(analyzer_bin, check_env):
LOG.warning('Failed to run analyzer ' + analyzer_name + ' !')
available_analyzer = False
if available_analyzer:
enabled_analyzers.add(analyzer_name)
return enabled_analyzers
def construct_analyzer_type(analyzer_type, config_handler, buildaction):
"""
Construct a specific analyzer based on the type.
"""
if analyzer_type == CLANG_SA:
LOG.debug_analyzer('Constructing clangSA analyzer')
analyzer = analyzer_clangsa.ClangSA(config_handler,
buildaction)
return analyzer
elif analyzer_type == CLANG_TIDY:
LOG.debug_analyzer("Constructing clang-tidy analyzer")
analyzer = analyzer_clang_tidy.ClangTidy(config_handler,
buildaction)
return analyzer
else:
LOG.error('Not supported analyzer type')
return None
def construct_analyzer(buildaction,
analyzer_config_map):
"""
Construct an analyzer.
"""
try:
LOG.debug_analyzer('Constructing analyzer')
analyzer_type = buildaction.analyzer_type
# Get the proper config handler for this analyzer type.
config_handler = analyzer_config_map.get(analyzer_type)
analyzer = construct_analyzer_type(analyzer_type,
config_handler,
buildaction)
return analyzer
except Exception as ex:
LOG.debug_analyzer(ex)
return None
def initialize_checkers(config_handler,
checkers,
default_checkers=None,
cmdline_checkers=None):
# By default disable all checkers.
for checker_name, description in checkers:
config_handler.add_checker(checker_name, False, description)
# Set default enabled or disabled checkers.
if default_checkers:
for checker in default_checkers:
for checker_name, enabled in checker.items():
if enabled:
config_handler.enable_checker(checker_name)
else:
config_handler.disable_checker(checker_name)
# Set user defined enabled or disabled checkers from the command line.
if cmdline_checkers:
for checker_name, enabled in cmdline_checkers:
if enabled:
config_handler.enable_checker(checker_name)
else:
config_handler.disable_checker(checker_name)
def __replace_env_var(cfg_file):
def replacer(matchobj):
env_var = matchobj.group(1)
if matchobj.group(1) not in os.environ:
LOG.error(env_var + ' environment variable not set in ' + cfg_file)
return ''
return os.environ[env_var]
return replacer
def __build_clangsa_config_handler(args, context):
"""
Build the config handler for clang static analyzer.
Handle config options from the command line and config files.
"""
config_handler = config_handler_clangsa.ClangSAConfigHandler()
config_handler.analyzer_plugins_dir = context.checker_plugin
config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_SA)
config_handler.compiler_resource_dir = context.compiler_resource_dir
config_handler.compiler_sysroot = context.compiler_sysroot
config_handler.system_includes = context.extra_system_includes
config_handler.includes = context.extra_includes
try:
with open(args.clangsa_args_cfg_file, 'rb') as sa_cfg:
config_handler.analyzer_extra_arguments = \
re.sub('\$\((.*?)\)',
__replace_env_var(args.clangsa_args_cfg_file),
sa_cfg.read().strip())
except IOError as ioerr:
LOG.debug_analyzer(ioerr)
except AttributeError as aerr:
# No clangsa arguments file was given in the command line.
LOG.debug_analyzer(aerr)
analyzer = construct_analyzer_type(CLANG_SA, config_handler, None)
check_env = analyzer_env.get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
checkers = analyzer.get_analyzer_checkers(config_handler, check_env)
# Read clang-tidy checkers from the config file.
clang_sa_checkers = context.default_checkers_config.get(CLANG_SA +
'_checkers')
try:
cmdline_checkers = args.ordered_checkers
except AttributeError:
LOG.debug_analyzer('No checkers were defined in the command line for' +
CLANG_SA)
cmdline_checkers = None
initialize_checkers(config_handler,
checkers,
clang_sa_checkers,
cmdline_checkers)
return config_handler
def __build_clang_tidy_config_handler(args, context):
"""
Build the config handler for clang tidy analyzer.
Handle config options from the command line and config files.
"""
config_handler = config_handler_clang_tidy.ClangTidyConfigHandler()
config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_TIDY)
config_handler.compiler_resource_dir = context.compiler_resource_dir
config_handler.compiler_sysroot = context.compiler_sysroot
config_handler.system_includes = context.extra_system_includes
config_handler.includes = context.extra_includes
try:
with open(args.tidy_args_cfg_file, 'rb') as tidy_cfg:
config_handler.analyzer_extra_arguments = \
re.sub('\$\((.*?)\)', __replace_env_var,
tidy_cfg.read().strip())
except IOError as ioerr:
LOG.debug_analyzer(ioerr)
except AttributeError as aerr:
# No clang tidy arguments file was given in the command line.
LOG.debug_analyzer(aerr)
analyzer = construct_analyzer_type(CLANG_TIDY, config_handler, None)
check_env = analyzer_env.get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
checkers = analyzer.get_analyzer_checkers(config_handler, check_env)
# Read clang-tidy checkers from the config file.
clang_tidy_checkers = context.default_checkers_config.get(CLANG_TIDY +
'_checkers')
try:
cmdline_checkers = args.ordered_checkers
except AttributeError:
LOG.debug_analyzer('No checkers were defined in '
'the command line for ' +
CLANG_TIDY)
cmdline_checkers = None
initialize_checkers(config_handler,
checkers,
clang_tidy_checkers,
cmdline_checkers)
return config_handler
def build_config_handlers(args, context, enabled_analyzers, connection=None):
"""
Construct multiple config handlers and if there is a connection.
Store configs into the database.
Handle config from command line or from config file if no command line
config is given.
Supported command line config format is in JSON tidy supports YAML also but
no standard lib for yaml parsing is available in python.
"""
run_id = context.run_id
analyzer_config_map = {}
for ea in enabled_analyzers:
if ea == CLANG_SA:
config_handler = __build_clangsa_config_handler(args, context)
analyzer_config_map[ea] = config_handler
elif ea == CLANG_TIDY:
config_handler = __build_clang_tidy_config_handler(args, context)
analyzer_config_map[ea] = config_handler
else:
LOG.debug_analyzer('Not supported analyzer type. '
'No configuration handler will be created.')
if connection:
# Collect all configuration options and store them together.
configs = []
for _, config_handler in analyzer_config_map.items():
configs.extend(config_handler.get_checker_configs())
client.replace_config_in_db(run_id, connection, configs)
return analyzer_config_map
def construct_result_handler(args,
buildaction,
run_id,
report_output,
severity_map,
skiplist_handler,
lock,
store_to_db=False):
"""
Construct a result handler.
"""
if store_to_db:
# Create a result handler which stores the results into a database.
if buildaction.analyzer_type == CLANG_SA:
csa_res_handler = result_handler_clangsa.ClangSAPlistToDB(
buildaction,
report_output,
run_id)
csa_res_handler.severity_map = severity_map
csa_res_handler.skiplist_handler = skiplist_handler
return csa_res_handler
elif buildaction.analyzer_type == CLANG_TIDY:
ct_res_handler = result_handler_clang_tidy.ClangTidyPlistToDB(
buildaction,
report_output,
run_id)
ct_res_handler.severity_map = severity_map
ct_res_handler.skiplist_handler = skiplist_handler
return ct_res_handler
else:
LOG.error('Not supported analyzer type.')
return None
else:
if buildaction.analyzer_type == CLANG_SA:
csa_res_handler = result_handler_clangsa.ClangSAPlistToStdout(
buildaction,
report_output,
lock)
csa_res_handler.print_steps = args.print_steps
csa_res_handler.skiplist_handler = skiplist_handler
return csa_res_handler
elif buildaction.analyzer_type == CLANG_TIDY:
ct_res_handler = result_handler_clang_tidy.ClangTidyPlistToStdout(
buildaction,
report_output,
lock)
ct_res_handler.severity_map = severity_map
ct_res_handler.skiplist_handler = skiplist_handler
return ct_res_handler
else:
LOG.error('Not supported analyzer type.')
return None
| 1 | 6,275 | Is this the same import as in the line 20? | Ericsson-codechecker | c |
@@ -5,6 +5,7 @@ class Template < ActiveRecord::Base
validates_with TemplateLinksValidator
before_validation :set_defaults
+ after_update :reconcile_published, if: Proc.new { |template| template.published? && template.version.present? && template.version > 0 }
# Stores links as an JSON object: { funder: [{"link":"www.example.com","text":"foo"}, ...], sample_plan: [{"link":"www.example.com","text":"foo"}, ...]}
# The links is validated against custom validator allocated at validators/template_links_validator.rb | 1 | class Template < ActiveRecord::Base
include GlobalHelpers
include ActiveModel::Validations
include TemplateScope
validates_with TemplateLinksValidator
before_validation :set_defaults
# Stores links as an JSON object: { funder: [{"link":"www.example.com","text":"foo"}, ...], sample_plan: [{"link":"www.example.com","text":"foo"}, ...]}
# The links is validated against custom validator allocated at validators/template_links_validator.rb
serialize :links, JSON
##
# Associations
belongs_to :org
has_many :plans
has_many :phases, dependent: :destroy
has_many :sections, through: :phases
has_many :questions, through: :sections
##
# Possibly needed for active_admin
# -relies on protected_attributes gem as syntax depricated in rails 4.2
attr_accessible :id, :org_id, :description, :published, :title, :locale, :customization_of,
:is_default, :guidance_group_ids, :org, :plans, :phases, :family_id,
:archived, :version, :visibility, :published, :links, :as => [:default, :admin]
# A standard template should be organisationally visible. Funder templates that are
# meant for external use will be publicly visible. This allows a funder to create 'funder' as
# well as organisational templates. The default template should also always be publicly_visible
enum visibility: [:organisationally_visible, :publicly_visible]
# defines the export setting for a template object
has_settings :export, class_name: 'Settings::Template' do |s|
s.key :export, defaults: Settings::Template::DEFAULT_SETTINGS
end
validates :org, :title, presence: {message: _("can't be blank")}
# Class methods gets defined within this
class << self
def current(family_id)
unarchived.where(family_id: family_id).order(version: :desc).first
end
def live(family_id)
if family_id.respond_to?(:each)
unarchived.where(family_id: family_id, published: true)
else
unarchived.where(family_id: family_id, published: true).first
end
end
def default
unarchived.where(is_default: true, published: true).order(:version).last
end
def find_or_generate_version!(template)
if template.latest?
if template.generate_version?
return template.generate_version!
end
return template
end
raise _('A historical template cannot be retrieved for being modified')
end
end
# Creates a copy of the current template
# raises ActiveRecord::RecordInvalid when save option is true and validations fails
def deep_copy(attributes: {}, **options)
copy = self.dup
if attributes.respond_to?(:each_pair)
attributes.each_pair{ |attribute, value| copy.send("#{attribute}=".to_sym, value) if copy.respond_to?("#{attribute}=".to_sym) }
end
copy.save! if options.fetch(:save, false)
options[:template_id] = copy.id
self.phases.each{ |phase| copy.phases << phase.deep_copy(options) }
return copy
end
# Retrieves the template's org or the org of the template this one is derived
# from of it is a customization
def base_org
if self.customization_of.present?
return Template.where(family_id: self.customization_of).first.org
else
return self.org
end
end
# Returns whether or not this is the latest version of the current template's family
def latest?
return (self.id == Template.latest_version(self.family_id).pluck(:id).first)
end
# Determines whether or not a new version should be generated
def generate_version?
return self.published
end
# Determines whether or not a customization for the customizing_org passed should be generated
def customize?(customizing_org)
if customizing_org.is_a?(Org)
return !Template.unarchived.where(customization_of: self.family_id, org: customizing_org).exists?
end
return false
end
# Determines whether or not a customized template should be upgraded
def upgrade_customization?
if customization_of.present?
funder_template = Template.published(self.customization_of).select(:created_at).first
if funder_template.present?
return funder_template.created_at > self.created_at
end
end
return false
end
# Returns a new unpublished copy of self with a new family_id, version = zero for the specified org
def generate_copy!(org)
raise _('generate_copy! requires an organisation target') unless org.is_a?(Org) # Assume customizing_org is persisted
template = deep_copy(
attributes: {
version: 0,
published: false,
family_id: new_family_id,
org: org,
is_default: false,
title: _('Copy of %{template}') % { template: self.title }
}, modifiable: true, save: true)
return template
end
# Generates a new copy of self with an incremented version number
def generate_version!
raise _('generate_version! requires a published template') unless published
template = deep_copy(
attributes: {
version: self.version+1,
published: false,
org: self.org
}, save: true)
return template
end
# Generates a new copy of self for the specified customizing_org
def customize!(customizing_org)
raise _('customize! requires an organisation target') unless customizing_org.is_a?(Org) # Assume customizing_org is persisted
raise _('customize! requires a template from a funder') unless org.funder_only? || self.is_default # Assume self has org associated
customization = deep_copy(
attributes: {
version: 0,
published: false,
family_id: new_family_id,
customization_of: self.family_id,
org: customizing_org,
visibility: Template.visibilities[:organisationally_visible],
is_default: false
}, modifiable: false, save: true)
return customization
end
# Generates a new copy of self including latest changes from the funder this template is customized_of
def upgrade_customization!
raise _('upgrade_customization! requires a customised template') unless customization_of.present?
funder_template = Template.published(self.customization_of).first
raise _('upgrade_customization! cannot be carried out since there is no published template of its current funder') unless funder_template.present?
source = deep_copy(attributes: { version: self.version+1, published: false }) # preserves modifiable flags from the self template copied
# Creates a new customisation for the published template whose family_id is self.customization_of
customization = funder_template.deep_copy(
attributes: {
version: source.version,
published: source.published,
family_id: source.family_id,
customization_of: source.customization_of,
org: source.org,
visibility: Template.visibilities[:organisationally_visible],
is_default: false
}, modifiable: false, save: true)
# Sorts the phases from the source template, i.e. self
sorted_phases = source.phases.sort{ |phase1,phase2| phase1.number <=> phase2.number }
# Merges modifiable sections or questions from source into customization template object
customization.phases.each do |customization_phase|
# Search for the phase in the source template whose number matches the customization_phase
candidate_phase = sorted_phases.bsearch{ |phase| customization_phase.number <=> phase.number }
if candidate_phase.present? # The funder could have added this new phase after the customisation took place
# Selects modifiable sections from the candidate_phase
modifiable_sections = candidate_phase.sections.select{ |section| section.modifiable }
# Attaches modifiable sections into the customization_phase
modifiable_sections.each{ |modifiable_section| customization_phase.sections << modifiable_section }
# Sorts the sections for the customization_phase
sorted_sections = customization_phase.sections.sort{ |section1, section2| section1.number <=> section2.number }
# Selects unmodifiable sections from the candidate_phase
unmodifiable_sections = candidate_phase.sections.select{ |section| !section.modifiable }
unmodifiable_sections.each do |unmodifiable_section|
# Search for modifiable questions within the unmodifiable_section from candidate_phase
modifiable_questions = unmodifiable_section.questions.select{ |question| question.modifiable }
customization_section = sorted_sections.bsearch{ |section| unmodifiable_section.number <=> section.number }
if customization_section.present? # The funder could have deleted the section
modifiable_questions.each{ |modifiable_question| customization_section.questions << modifiable_question; }
end
# Search for unmodifiable questions within the unmodifiable_section in case source template added annotations
unmodifiable_questions = unmodifiable_section.questions.select{ |question| !question.modifiable }
sorted_questions = customization_section.questions.sort{ |question1, question2| question1.number <=> question2.number }
unmodifiable_questions.each do |unmodifiable_question|
customization_question = sorted_questions.bsearch{ |question| unmodifiable_question.number <=> question.number }
if customization_question.present? # The funder could have deleted the question
annotations_added_by_customiser = unmodifiable_question.annotations.select{ |annotation| annotation.org_id == source.org_id }
annotations_added_by_customiser.each{ |annotation| customization_question.annotations << annotation }
end
end
end
end
end
# Appends the modifiable phases from source
source.phases.select{ |phase| phase.modifiable }.each{ |modifiable_phase| customization.phases << modifiable_phase }
return customization
end
##
# convert the given template to a hash and return with all it's associations
# to use, please pre-fetch org, phases, section, questions, annotations,
# question_options, question_formats,
# TODO: Themes & guidance?
#
# @return [hash] hash of template, phases, sections, questions, question_options, annotations
# TODO: If there is time to update the UI to stop using hashes, remove this method
def to_hash
hash = {}
hash[:template] = {}
hash[:template][:data] = self
hash[:template][:org] = self.org
phases = {}
hash[:template][:phases] = phases
self.phases.each do |phase|
phases[phase.number] = {}
phases[phase.number][:data] = phase
phases[phase.number][:sections] = {}
phase.sections.each do |section|
phases[phase.number][:sections][section.number] = {}
phases[phase.number][:sections][section.number][:data] = section
phases[phase.number][:sections][section.number][:questions] = {}
section.questions.each do |question|
phases[phase.number][:sections][section.number][:questions][question.number] = {}
phases[phase.number][:sections][section.number][:questions][question.number][:data] = question
phases[phase.number][:sections][section.number][:questions][question.number][:annotations] = {}
question.annotations.each do |annotation|
phases[phase.number][:sections][section.number][:questions][question.number][:annotations][annotation.id] = {}
phases[phase.number][:sections][section.number][:questions][question.number][:annotations][annotation.id][:data] = annotation
end
phases[phase.number][:sections][section.number][:questions][question.number][:question_options] = {}
question.question_options.each do |question_option|
phases[phase.number][:sections][section.number][:questions][question.number][:question_options][:data] = question_option
phases[phase.number][:sections][section.number][:questions][question.number][:question_format] = question.question_format
end
end
end
end
return hash
end
private
# Generate a new random family identifier
def new_family_id
family_id = loop do
random = rand 2147483647
break random unless Template.exists?(family_id: random)
end
family_id
end
# Default values to set before running any validation
def set_defaults
self.published ||= false
self.archived ||= false
self.is_default ||= false
self.version ||= 0
self.visibility = ((self.org.present? && self.org.funder_only?) || self.is_default?) ? Template.visibilities[:publicly_visible] : Template.visibilities[:organisationally_visible]
self.customization_of ||= nil
self.family_id ||= new_family_id
self.archived ||= false
self.links ||= { funder: [], sample_plan: [] }
end
end
| 1 | 17,591 | why those additional checks after published? template.version should always be present and greater than zero | DMPRoadmap-roadmap | rb |
@@ -61,6 +61,16 @@ type ExternalEntityReference struct {
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
+// EntityReference represents a reference to either a Pod or an ExternalEntity.
+// TODO: replace Pod and ExternalEntity in GroupMember to embed EntityReference
+// when controlplane version is bumped?
+type EntityReference struct {
+ // Pod maintains the reference to the Pod.
+ Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,1,opt,name=pod"`
+ // ExternalEntity maintains the reference to the ExternalEntity.
+ ExternalEntity *ExternalEntityReference `json:"externalEntity,omitempty" protobuf:"bytes,2,opt,name=externalEntity"`
+}
+
// GroupMember represents resource member to be populated in Groups.
// This supersedes GroupMemberPod, and will eventually replace it.
type GroupMember struct { | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
statsv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/stats/v1alpha1"
)
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroup is the message format of antrea/pkg/controller/types.AppliedToGroup in an API response.
type AppliedToGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// GroupMembers is list of resources selected by this group.
GroupMembers []GroupMember `json:"groupMembers,omitempty" protobuf:"bytes,2,rep,name=groupMembers"`
}
// PodReference represents a Pod Reference.
type PodReference struct {
// The name of this pod.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The namespace of this pod.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// NamedPort represents a Port with a name on Pod.
type NamedPort struct {
// Port represents the Port number.
Port int32 `json:"port,omitempty" protobuf:"varint,1,opt,name=port"`
// Name represents the associated name with this Port number.
Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
// Protocol for port. Must be UDP, TCP, or SCTP.
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol"`
}
// ExternalEntityReference represents a ExternalEntity Reference.
type ExternalEntityReference struct {
// The name of this ExternalEntity.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The namespace of this ExternalEntity.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// GroupMember represents resource member to be populated in Groups.
// This supersedes GroupMemberPod, and will eventually replace it.
type GroupMember struct {
// Pod maintains the reference to the Pod.
Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,1,opt,name=pod"`
// ExternalEntity maintains the reference to the ExternalEntity.
ExternalEntity *ExternalEntityReference `json:"externalEntity,omitempty" protobuf:"bytes,2,opt,name=externalEntity"`
// IP is the IP address of the Endpoints associated with the GroupMember.
IPs []IPAddress `json:"ips,omitempty" protobuf:"bytes,3,rep,name=ips"`
// Ports is the list NamedPort of the GroupMember.
Ports []NamedPort `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupPatch describes the incremental update of an AppliedToGroup.
type AppliedToGroupPatch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
AddedGroupMembers []GroupMember `json:"addedGroupMembers,omitempty" protobuf:"bytes,2,rep,name=addedGroupMembers"`
RemovedGroupMembers []GroupMember `json:"removedGroupMembers,omitempty" protobuf:"bytes,3,rep,name=removedGroupMembers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupList is a list of AppliedToGroup objects.
type AppliedToGroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []AppliedToGroup `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroup is the message format of antrea/pkg/controller/types.AddressGroup in an API response.
type AddressGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
GroupMembers []GroupMember `json:"groupMembers,omitempty" protobuf:"bytes,2,rep,name=groupMembers"`
}
// IPAddress describes a single IP address. Either an IPv4 or IPv6 address must be set.
type IPAddress []byte
// IPNet describes an IP network.
type IPNet struct {
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
PrefixLength int32 `json:"prefixLength,omitempty" protobuf:"varint,2,opt,name=prefixLength"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupPatch describes the incremental update of an AddressGroup.
type AddressGroupPatch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
AddedGroupMembers []GroupMember `json:"addedGroupMembers,omitempty" protobuf:"bytes,2,rep,name=addedGroupMembers"`
RemovedGroupMembers []GroupMember `json:"removedGroupMembers,omitempty" protobuf:"bytes,3,rep,name=removedGroupMembers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupList is a list of AddressGroup objects.
type AddressGroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []AddressGroup `json:"items" protobuf:"bytes,2,rep,name=items"`
}
type NetworkPolicyType string
const (
K8sNetworkPolicy NetworkPolicyType = "K8sNetworkPolicy"
AntreaClusterNetworkPolicy NetworkPolicyType = "AntreaClusterNetworkPolicy"
AntreaNetworkPolicy NetworkPolicyType = "AntreaNetworkPolicy"
)
type NetworkPolicyReference struct {
// Type of the NetworkPolicy.
Type NetworkPolicyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=NetworkPolicyType"`
// Namespace of the NetworkPolicy. It's empty for Antrea ClusterNetworkPolicy.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the NetworkPolicy.
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the NetworkPolicy.
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicy is the message format of antrea/pkg/controller/types.NetworkPolicy in an API response.
type NetworkPolicy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Rules is a list of rules to be applied to the selected GroupMembers.
Rules []NetworkPolicyRule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"`
// AppliedToGroups is a list of names of AppliedToGroups to which this policy applies.
// Cannot be set in conjunction with any NetworkPolicyRule.AppliedToGroups in Rules.
AppliedToGroups []string `json:"appliedToGroups,omitempty" protobuf:"bytes,3,rep,name=appliedToGroups"`
// Priority represents the relative priority of this Network Policy as compared to
// other Network Policies. Priority will be unset (nil) for K8s NetworkPolicy.
Priority *float64 `json:"priority,omitempty" protobuf:"fixed64,4,opt,name=priority"`
// TierPriority represents the priority of the Tier associated with this Network
// Policy. The TierPriority will remain nil for K8s NetworkPolicy.
TierPriority *int32 `json:"tierPriority,omitempty" protobuf:"varint,5,opt,name=tierPriority"`
// Reference to the original NetworkPolicy that the internal NetworkPolicy is created for.
SourceRef *NetworkPolicyReference `json:"sourceRef,omitempty" protobuf:"bytes,6,opt,name=sourceRef"`
}
// Direction defines traffic direction of NetworkPolicyRule.
type Direction string
const (
DirectionIn Direction = "In"
DirectionOut Direction = "Out"
)
// NetworkPolicyRule describes a particular set of traffic that is allowed.
type NetworkPolicyRule struct {
// The direction of this rule.
// If it's set to In, From must be set and To must not be set.
// If it's set to Out, To must be set and From must not be set.
Direction Direction `json:"direction,omitempty" protobuf:"bytes,1,opt,name=direction"`
// From represents sources which should be able to access the GroupMembers selected by the policy.
From NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"`
// To represents destinations which should be able to be accessed by the GroupMembers selected by the policy.
To NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,3,opt,name=to"`
// Services is a list of services which should be matched.
Services []Service `json:"services,omitempty" protobuf:"bytes,4,rep,name=services"`
// Priority defines the priority of the Rule as compared to other rules in the
// NetworkPolicy.
Priority int32 `json:"priority,omitempty" protobuf:"varint,5,opt,name=priority"`
// Action specifies the action to be applied on the rule. i.e. Allow/Drop. An empty
// action “nil” defaults to Allow action, which would be the case for rules created for
// K8s Network Policy.
Action *secv1alpha1.RuleAction `json:"action,omitempty" protobuf:"bytes,6,opt,name=action,casttype=github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1.RuleAction"`
// EnableLogging indicates whether or not to generate logs when rules are matched. Default to false.
EnableLogging bool `json:"enableLogging" protobuf:"varint,7,opt,name=enableLogging"`
// AppliedToGroups is a list of names of AppliedToGroups to which this rule applies.
// Cannot be set in conjunction with NetworkPolicy.AppliedToGroups of the NetworkPolicy
// that this Rule is referred to.
AppliedToGroups []string `json:"appliedToGroups,omitempty" protobuf:"bytes,8,opt,name=appliedToGroups"`
}
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
// ProtocolSCTP is the SCTP protocol.
ProtocolSCTP Protocol = "SCTP"
)
// Service describes a port to allow traffic on.
type Service struct {
// The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this
// field defaults to TCP.
// +optional
Protocol *Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol"`
// The port name or number on the given protocol. If not specified, this matches all port numbers.
// +optional
Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
// EndPort defines the end of the port range, being the end included within the range.
// It can only be specified when a numerical `port` is specified.
// +optional
EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"`
}
// NetworkPolicyPeer describes a peer of NetworkPolicyRules.
// It could be a list of names of AddressGroups and/or a list of IPBlock.
type NetworkPolicyPeer struct {
// A list of names of AddressGroups.
AddressGroups []string `json:"addressGroups,omitempty" protobuf:"bytes,1,rep,name=addressGroups"`
// A list of IPBlock.
IPBlocks []IPBlock `json:"ipBlocks,omitempty" protobuf:"bytes,2,rep,name=ipBlocks"`
}
// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24"). The except entry describes CIDRs that should
// not be included within this rule.
type IPBlock struct {
// CIDR is an IPNet represents the IP Block.
CIDR IPNet `json:"cidr" protobuf:"bytes,1,name=cidr"`
// Except is a slice of IPNets that should not be included within an IP Block.
// Except values will be rejected if they are outside the CIDR range.
// +optional
Except []IPNet `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicyList is a list of NetworkPolicy objects.
type NetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=create
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeStatsSummary contains stats produced on a Node. It's used by the antrea-agents to report stats to the antrea-controller.
type NodeStatsSummary struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The TrafficStats of K8s NetworkPolicies collected from the Node.
NetworkPolicies []NetworkPolicyStats `json:"networkPolicies,omitempty" protobuf:"bytes,2,rep,name=networkPolicies"`
// The TrafficStats of Antrea ClusterNetworkPolicies collected from the Node.
AntreaClusterNetworkPolicies []NetworkPolicyStats `json:"antreaClusterNetworkPolicies,omitempty" protobuf:"bytes,3,rep,name=antreaClusterNetworkPolicies"`
// The TrafficStats of Antrea NetworkPolicies collected from the Node.
AntreaNetworkPolicies []NetworkPolicyStats `json:"antreaNetworkPolicies,omitempty" protobuf:"bytes,4,rep,name=antreaNetworkPolicies"`
}
// NetworkPolicyStats contains the information and traffic stats of a NetworkPolicy.
type NetworkPolicyStats struct {
// The reference of the NetworkPolicy.
NetworkPolicy NetworkPolicyReference `json:"networkPolicy,omitempty" protobuf:"bytes,1,opt,name=networkPolicy"`
// The stats of the NetworkPolicy.
TrafficStats statsv1alpha1.TrafficStats `json:"trafficStats,omitempty" protobuf:"bytes,2,opt,name=trafficStats"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicyStatus is the status of a NetworkPolicy.
type NetworkPolicyStatus struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Nodes contains statuses produced on a list of Nodes.
Nodes []NetworkPolicyNodeStatus `json:"nodes,omitempty" protobuf:"bytes,2,rep,name=nodes"`
}
// NetworkPolicyNodeStatus is the status of a NetworkPolicy on a Node.
type NetworkPolicyNodeStatus struct {
// The name of the Node that produces the status.
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,1,opt,name=nodeName"`
// The generation realized by the Node.
Generation int64 `json:"generation,omitempty" protobuf:"varint,2,opt,name=generation"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Group is the message format of antrea/pkg/controller/types.Group in an API response.
// An internal Group is created corresponding to a ClusterGroup resource, i.e. it is a
// 1:1 mapping. The UID of this Group is the same as that of it's corresponding ClusterGroup.
type Group struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// GroupMembers is a list of resources selected by this Group based on the selectors
// present in the corresponding ClusterGroup.
GroupMembers []GroupMember `json:"groupMembers,omitempty" protobuf:"bytes,2,rep,name=groupMembers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// GroupList is a list of Group objects.
type GroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []Group `json:"items" protobuf:"bytes,2,rep,name=items"`
}
| 1 | 31,815 | I did not see this is used in the controlplane API? Is it for internal use only? Then no need to define it here? | antrea-io-antrea | go |
@@ -138,7 +138,7 @@ void printDeviceProp(int deviceId) {
cout << setw(w1) << "arch.hasSurfaceFuncs: " << props.arch.hasSurfaceFuncs << endl;
cout << setw(w1) << "arch.has3dGrid: " << props.arch.has3dGrid << endl;
cout << setw(w1) << "arch.hasDynamicParallelism: " << props.arch.hasDynamicParallelism << endl;
- cout << setw(w1) << "gcnArch: " << props.gcnArch << endl;
+ cout << setw(w1) << "gcnArchName: " << props.gcnArchName << endl;
cout << setw(w1) << "isIntegrated: " << props.integrated << endl;
cout << setw(w1) << "maxTexture1D: " << props.maxTexture1D << endl;
cout << setw(w1) << "maxTexture2D.width: " << props.maxTexture2D[0] << endl; | 1 | /*
Copyright (c) 2015-present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <iostream>
#include <iomanip>
#include "hip/hip_runtime.h"
#define KNRM "\x1B[0m"
#define KRED "\x1B[31m"
#define KGRN "\x1B[32m"
#define KYEL "\x1B[33m"
#define KBLU "\x1B[34m"
#define KMAG "\x1B[35m"
#define KCYN "\x1B[36m"
#define KWHT "\x1B[37m"
#define failed(...) \
printf("%serror: ", KRED); \
printf(__VA_ARGS__); \
printf("\n"); \
printf("error: TEST FAILED\n%s", KNRM); \
exit(EXIT_FAILURE);
#define HIPCHECK(error) \
if (error != hipSuccess) { \
printf("%serror: '%s'(%d) at %s:%d%s\n", KRED, hipGetErrorString(error), error, __FILE__, \
__LINE__, KNRM); \
failed("API returned error code."); \
}
void printCompilerInfo() {
#ifdef __HCC__
printf("compiler: hcc version=%s, workweek (YYWWD) = %u\n", __hcc_version__, __hcc_workweek__);
#endif
#ifdef __NVCC__
printf("compiler: nvcc\n");
#endif
}
double bytesToKB(size_t s) { return (double)s / (1024.0); }
double bytesToGB(size_t s) { return (double)s / (1024.0 * 1024.0 * 1024.0); }
#define printLimit(w1, limit, units) \
{ \
size_t val; \
cudaDeviceGetLimit(&val, limit); \
std::cout << setw(w1) << #limit ": " << val << " " << units << std::endl; \
}
void printDeviceProp(int deviceId) {
using namespace std;
const int w1 = 34;
cout << left;
cout << setw(w1)
<< "--------------------------------------------------------------------------------"
<< endl;
cout << setw(w1) << "device#" << deviceId << endl;
hipDeviceProp_t props;
HIPCHECK(hipGetDeviceProperties(&props, deviceId));
cout << setw(w1) << "Name: " << props.name << endl;
cout << setw(w1) << "pciBusID: " << props.pciBusID << endl;
cout << setw(w1) << "pciDeviceID: " << props.pciDeviceID << endl;
cout << setw(w1) << "pciDomainID: " << props.pciDomainID << endl;
cout << setw(w1) << "multiProcessorCount: " << props.multiProcessorCount << endl;
cout << setw(w1) << "maxThreadsPerMultiProcessor: " << props.maxThreadsPerMultiProcessor
<< endl;
cout << setw(w1) << "isMultiGpuBoard: " << props.isMultiGpuBoard << endl;
cout << setw(w1) << "clockRate: " << (float)props.clockRate / 1000.0 << " Mhz" << endl;
cout << setw(w1) << "memoryClockRate: " << (float)props.memoryClockRate / 1000.0 << " Mhz"
<< endl;
cout << setw(w1) << "memoryBusWidth: " << props.memoryBusWidth << endl;
cout << setw(w1) << "clockInstructionRate: " << (float)props.clockInstructionRate / 1000.0
<< " Mhz" << endl;
cout << setw(w1) << "totalGlobalMem: " << fixed << setprecision(2)
<< bytesToGB(props.totalGlobalMem) << " GB" << endl;
cout << setw(w1) << "maxSharedMemoryPerMultiProcessor: " << fixed << setprecision(2)
<< bytesToKB(props.maxSharedMemoryPerMultiProcessor) << " KB" << endl;
cout << setw(w1) << "totalConstMem: " << props.totalConstMem << endl;
cout << setw(w1) << "sharedMemPerBlock: " << (float)props.sharedMemPerBlock / 1024.0 << " KB"
<< endl;
cout << setw(w1) << "canMapHostMemory: " << props.canMapHostMemory << endl;
cout << setw(w1) << "regsPerBlock: " << props.regsPerBlock << endl;
cout << setw(w1) << "warpSize: " << props.warpSize << endl;
cout << setw(w1) << "l2CacheSize: " << props.l2CacheSize << endl;
cout << setw(w1) << "computeMode: " << props.computeMode << endl;
cout << setw(w1) << "maxThreadsPerBlock: " << props.maxThreadsPerBlock << endl;
cout << setw(w1) << "maxThreadsDim.x: " << props.maxThreadsDim[0] << endl;
cout << setw(w1) << "maxThreadsDim.y: " << props.maxThreadsDim[1] << endl;
cout << setw(w1) << "maxThreadsDim.z: " << props.maxThreadsDim[2] << endl;
cout << setw(w1) << "maxGridSize.x: " << props.maxGridSize[0] << endl;
cout << setw(w1) << "maxGridSize.y: " << props.maxGridSize[1] << endl;
cout << setw(w1) << "maxGridSize.z: " << props.maxGridSize[2] << endl;
cout << setw(w1) << "major: " << props.major << endl;
cout << setw(w1) << "minor: " << props.minor << endl;
cout << setw(w1) << "concurrentKernels: " << props.concurrentKernels << endl;
cout << setw(w1) << "cooperativeLaunch: " << props.cooperativeLaunch << endl;
cout << setw(w1) << "cooperativeMultiDeviceLaunch: " << props.cooperativeMultiDeviceLaunch << endl;
cout << setw(w1) << "arch.hasGlobalInt32Atomics: " << props.arch.hasGlobalInt32Atomics << endl;
cout << setw(w1) << "arch.hasGlobalFloatAtomicExch: " << props.arch.hasGlobalFloatAtomicExch
<< endl;
cout << setw(w1) << "arch.hasSharedInt32Atomics: " << props.arch.hasSharedInt32Atomics << endl;
cout << setw(w1) << "arch.hasSharedFloatAtomicExch: " << props.arch.hasSharedFloatAtomicExch
<< endl;
cout << setw(w1) << "arch.hasFloatAtomicAdd: " << props.arch.hasFloatAtomicAdd << endl;
cout << setw(w1) << "arch.hasGlobalInt64Atomics: " << props.arch.hasGlobalInt64Atomics << endl;
cout << setw(w1) << "arch.hasSharedInt64Atomics: " << props.arch.hasSharedInt64Atomics << endl;
cout << setw(w1) << "arch.hasDoubles: " << props.arch.hasDoubles << endl;
cout << setw(w1) << "arch.hasWarpVote: " << props.arch.hasWarpVote << endl;
cout << setw(w1) << "arch.hasWarpBallot: " << props.arch.hasWarpBallot << endl;
cout << setw(w1) << "arch.hasWarpShuffle: " << props.arch.hasWarpShuffle << endl;
cout << setw(w1) << "arch.hasFunnelShift: " << props.arch.hasFunnelShift << endl;
cout << setw(w1) << "arch.hasThreadFenceSystem: " << props.arch.hasThreadFenceSystem << endl;
cout << setw(w1) << "arch.hasSyncThreadsExt: " << props.arch.hasSyncThreadsExt << endl;
cout << setw(w1) << "arch.hasSurfaceFuncs: " << props.arch.hasSurfaceFuncs << endl;
cout << setw(w1) << "arch.has3dGrid: " << props.arch.has3dGrid << endl;
cout << setw(w1) << "arch.hasDynamicParallelism: " << props.arch.hasDynamicParallelism << endl;
cout << setw(w1) << "gcnArch: " << props.gcnArch << endl;
cout << setw(w1) << "isIntegrated: " << props.integrated << endl;
cout << setw(w1) << "maxTexture1D: " << props.maxTexture1D << endl;
cout << setw(w1) << "maxTexture2D.width: " << props.maxTexture2D[0] << endl;
cout << setw(w1) << "maxTexture2D.height: " << props.maxTexture2D[1] << endl;
cout << setw(w1) << "maxTexture3D.width: " << props.maxTexture3D[0] << endl;
cout << setw(w1) << "maxTexture3D.height: " << props.maxTexture3D[1] << endl;
cout << setw(w1) << "maxTexture3D.depth: " << props.maxTexture3D[2] << endl;
int deviceCnt;
hipGetDeviceCount(&deviceCnt);
cout << setw(w1) << "peers: ";
for (int i = 0; i < deviceCnt; i++) {
int isPeer;
hipDeviceCanAccessPeer(&isPeer, i, deviceId);
if (isPeer) {
cout << "device#" << i << " ";
}
}
cout << endl;
cout << setw(w1) << "non-peers: ";
for (int i = 0; i < deviceCnt; i++) {
int isPeer;
hipDeviceCanAccessPeer(&isPeer, i, deviceId);
if (!isPeer) {
cout << "device#" << i << " ";
}
}
cout << endl;
#ifdef __HIP_PLATFORM_NVCC__
// Limits:
cout << endl;
printLimit(w1, cudaLimitStackSize, "bytes/thread");
printLimit(w1, cudaLimitPrintfFifoSize, "bytes/device");
printLimit(w1, cudaLimitMallocHeapSize, "bytes/device");
printLimit(w1, cudaLimitDevRuntimeSyncDepth, "grids");
printLimit(w1, cudaLimitDevRuntimePendingLaunchCount, "launches");
#endif
cout << endl;
size_t free, total;
hipMemGetInfo(&free, &total);
cout << fixed << setprecision(2);
cout << setw(w1) << "memInfo.total: " << bytesToGB(total) << " GB" << endl;
cout << setw(w1) << "memInfo.free: " << bytesToGB(free) << " GB (" << setprecision(0)
<< (float)free / total * 100.0 << "%)" << endl;
}
int main(int argc, char* argv[]) {
using namespace std;
cout << endl;
printCompilerInfo();
int deviceCnt;
HIPCHECK(hipGetDeviceCount(&deviceCnt));
for (int i = 0; i < deviceCnt; i++) {
hipSetDevice(i);
printDeviceProp(i);
}
std::cout << std::endl;
}
| 1 | 9,342 | Should we also add a line to print gcnArch? | ROCm-Developer-Tools-HIP | cpp |
@@ -87,6 +87,7 @@ module.exports.rebaseBraveStringFilesOnChromiumL10nFiles = (path) =>
.replace('<include name="IDR_MD_HISTORY_SIDE_BAR_HTML"', '<include name="IDR_MD_HISTORY_SIDE_BAR_HTML" flattenhtml="true"')
.replace(pageVisibility, bravePageVisibility + pageVisibility)
.replace(/settings_chromium_strings.grdp/g, 'settings_brave_strings.grdp')
+ .replace(/Automatically sends usage statistics and crash reports to Brave/g, 'Automatically sends crash reports to Brave')
.replace(/The Chromium Authors/g, 'Brave Software Inc')
.replace(/Google Chrome/g, 'Brave')
.replace(/Chromium/g, 'Brave') | 1 | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
const path = require('path')
const fs = require('fs')
const srcDir = path.resolve(path.join(__dirname, '..', 'src'))
// Brave string paths
const braveStringsPath = path.resolve(path.join(srcDir, 'brave', 'app', 'brave_strings.grd'))
const braveSettingsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'settings_brave_strings.grdp'))
const braveComponentsStringsPath = path.resolve(path.join(srcDir, 'brave', 'app', 'components_brave_strings.grd'))
const braveExtensionMessagesPath = path.resolve(path.join(srcDir, 'brave', 'vendor', 'brave-extension', 'app', '_locales', 'en_US', 'messages.json'))
const braveSpecificGeneratedResourcesPath = path.resolve(path.join(srcDir, 'brave', 'app', 'brave_generated_resources.grd'))
const braveComponentsResourcesPath = path.resolve(path.join(srcDir, 'brave', 'components', 'resources', 'brave_components_resources.grd'))
const braveGeneratedResourcesPath = path.resolve(path.join(srcDir, 'brave', 'app', 'generated_resources.grd'))
const braveBookmarksPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'bookmarks_strings.grdp'))
const braveMediaRouterPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'media_router_strings.grdp'))
const braveSettingsStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'settings_strings.grdp'))
const braveMdExtensionsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'md_extensions_strings.grdp'))
const bravePrintingStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'printing_strings.grdp'))
const braveExtensionsResourcesPath = path.resolve(path.join(srcDir, 'brave', 'browser', 'resources', 'md_extensions', 'extensions_resources.grd'))
const braveSettingsResourcesPath = path.resolve(path.join(srcDir, 'brave', 'browser', 'resources', 'settings', 'settings_resources.grd'))
const braveBrowserResourcesPath = path.resolve(path.join(srcDir, 'brave', 'browser', 'browser_resources.grd'))
// Chromium string paths
const chromiumStringsPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'chromium_strings.grd'))
const chroimumSettingsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'settings_chromium_strings.grdp'))
const chromiumComponentsStringsPath = path.resolve(path.join(srcDir, 'components', 'components_chromium_strings.grd'))
const chromiumGeneratedResourcesPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'generated_resources.grd'))
const chromiumBookmarksPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'bookmarks_strings.grdp'))
const chromiumMediaRouterPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'media_router_strings.grdp'))
const chromiumSettingsStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'settings_strings.grdp'))
const chromiumMdExtensionsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'md_extensions_strings.grdp'))
const chromePrintingStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'printing_strings.grdp'))
const chromiumExtensionsResourcesPath = path.resolve(path.join(srcDir, 'chrome', 'browser', 'resources', 'md_extensions', 'extensions_resources.grd'))
const chromiumSettingsResourcesPath = path.resolve(path.join(srcDir, 'chrome', 'browser', 'resources', 'settings', 'settings_resources.grd'))
const chromiumBrowserResourcesPath = path.resolve(path.join(srcDir, 'chrome', 'browser', 'browser_resources.grd'))
const autoGeneratedWarning = '<!-- This file is created by l10nUtil.js. Do not edit manually. -->'
const pageVisibility = ' <structure name="IDR_SETTINGS_PAGE_VISIBILITY_JS"\n'
const bravePageVisibility = ' <structure name="IDR_SETTINGS_BRAVE_PAGE_VISIBILITY_JS"\n' +
' file="brave_page_visibility.js"\n' +
' type="chrome_html" />\n'
module.exports.getSourceStringPaths = () => {
return [
braveStringsPath,
braveComponentsStringsPath,
braveExtensionMessagesPath,
braveSpecificGeneratedResourcesPath,
braveComponentsResourcesPath,
braveGeneratedResourcesPath,
// No strings for now, uncomment if strings are added
// path.resolve(path.join(srcDir, 'brave', 'browser', 'resources', 'brave_extension.grd')),
// path.resolve(path.join(srcDir, 'brave', 'common', 'extensions', 'api', 'brave_api_resources.grd')),
]
}
module.exports.rebaseBraveStringFilesOnChromiumL10nFiles = (path) =>
Object.entries({
[chromiumStringsPath]: braveStringsPath,
[chroimumSettingsPartPath]: braveSettingsPartPath,
[chromiumComponentsStringsPath]: braveComponentsStringsPath,
[chromiumGeneratedResourcesPath]: braveGeneratedResourcesPath,
[chromiumBookmarksPartPath]: braveBookmarksPartPath,
[chromiumMediaRouterPartPath]: braveMediaRouterPartPath,
[chromiumSettingsStringsPartPath]: braveSettingsStringsPartPath,
[chromiumMdExtensionsPartPath]: braveMdExtensionsPartPath,
[chromePrintingStringsPartPath]: bravePrintingStringsPartPath,
[chromiumExtensionsResourcesPath]: braveExtensionsResourcesPath,
[chromiumSettingsResourcesPath]: braveSettingsResourcesPath,
[chromiumBrowserResourcesPath]: braveBrowserResourcesPath
}).forEach(([sourcePath, destPath]) =>
fs.writeFileSync(destPath,
fs.readFileSync(sourcePath, 'utf8')
.replace(/<\?xml version="1.0" encoding="utf-8"\?>/i, '<?xml version="1.0" encoding="utf-8"?>\n' + autoGeneratedWarning)
.replace('<structure name="IDR_MD_EXTENSIONS_SIDEBAR_HTML"', '<structure name="IDR_MD_EXTENSIONS_SIDEBAR_HTML" preprocess="true"')
.replace('<structure name="IDR_SETTINGS_APPEARANCE_FONTS_PAGE_HTML"', '<structure name="IDR_SETTINGS_APPEARANCE_FONTS_PAGE_HTML" preprocess="true"')
.replace('<structure name="IDR_SETTINGS_PASSWORDS_SECTION_HTML"', '<structure name="IDR_SETTINGS_PASSWORDS_SECTION_HTML" preprocess="true"')
.replace('<include name="IDR_MD_HISTORY_SIDE_BAR_HTML"', '<include name="IDR_MD_HISTORY_SIDE_BAR_HTML" flattenhtml="true"')
.replace(pageVisibility, bravePageVisibility + pageVisibility)
.replace(/settings_chromium_strings.grdp/g, 'settings_brave_strings.grdp')
.replace(/The Chromium Authors/g, 'Brave Software Inc')
.replace(/Google Chrome/g, 'Brave')
.replace(/Chromium/g, 'Brave')
.replace(/Chrome/g, 'Brave')
.replace(/Google/g, 'Brave'), 'utf8'))
| 1 | 5,361 | I merged already so need another PR, but I think this needs to be at the bottom. | brave-brave-browser | js |
@@ -150,6 +150,7 @@ bool ConfigManager::load()
boolean[ONLINE_OFFLINE_CHARLIST] = getGlobalBoolean(L, "showOnlineStatusInCharlist", false);
boolean[YELL_ALLOW_PREMIUM] = getGlobalBoolean(L, "yellAlwaysAllowPremium", false);
boolean[FORCE_MONSTERTYPE_LOAD] = getGlobalBoolean(L, "forceMonsterTypesOnLoad", true);
+ boolean[HOUSE_OWNED_BY_ACCOUNT] = getGlobalBoolean(L, "houseOwnedByAccount ", false);
string[DEFAULT_PRIORITY] = getGlobalString(L, "defaultPriority", "high");
string[SERVER_NAME] = getGlobalString(L, "serverName", ""); | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#if __has_include("luajit/lua.hpp")
#include <luajit/lua.hpp>
#else
#include <lua.hpp>
#endif
#include "configmanager.h"
#include "game.h"
#if LUA_VERSION_NUM >= 502
#undef lua_strlen
#define lua_strlen lua_rawlen
#endif
extern Game g_game;
namespace {
std::string getGlobalString(lua_State* L, const char* identifier, const char* defaultValue)
{
lua_getglobal(L, identifier);
if (!lua_isstring(L, -1)) {
lua_pop(L, 1);
return defaultValue;
}
size_t len = lua_strlen(L, -1);
std::string ret(lua_tostring(L, -1), len);
lua_pop(L, 1);
return ret;
}
int32_t getGlobalNumber(lua_State* L, const char* identifier, const int32_t defaultValue = 0)
{
lua_getglobal(L, identifier);
if (!lua_isnumber(L, -1)) {
lua_pop(L, 1);
return defaultValue;
}
int32_t val = lua_tonumber(L, -1);
lua_pop(L, 1);
return val;
}
bool getGlobalBoolean(lua_State* L, const char* identifier, const bool defaultValue)
{
lua_getglobal(L, identifier);
if (!lua_isboolean(L, -1)) {
if (!lua_isstring(L, -1)) {
lua_pop(L, 1);
return defaultValue;
}
size_t len = lua_strlen(L, -1);
std::string ret(lua_tostring(L, -1), len);
lua_pop(L, 1);
return booleanString(ret);
}
int val = lua_toboolean(L, -1);
lua_pop(L, 1);
return val != 0;
}
}
bool ConfigManager::load()
{
lua_State* L = luaL_newstate();
if (!L) {
throw std::runtime_error("Failed to allocate memory");
}
luaL_openlibs(L);
if (luaL_dofile(L, "config.lua")) {
std::cout << "[Error - ConfigManager::load] " << lua_tostring(L, -1) << std::endl;
lua_close(L);
return false;
}
//parse config
if (!loaded) { //info that must be loaded one time (unless we reset the modules involved)
boolean[BIND_ONLY_GLOBAL_ADDRESS] = getGlobalBoolean(L, "bindOnlyGlobalAddress", false);
boolean[OPTIMIZE_DATABASE] = getGlobalBoolean(L, "startupDatabaseOptimization", true);
string[IP] = getGlobalString(L, "ip", "127.0.0.1");
string[MAP_NAME] = getGlobalString(L, "mapName", "forgotten");
string[MAP_AUTHOR] = getGlobalString(L, "mapAuthor", "Unknown");
string[HOUSE_RENT_PERIOD] = getGlobalString(L, "houseRentPeriod", "never");
string[MYSQL_HOST] = getGlobalString(L, "mysqlHost", "127.0.0.1");
string[MYSQL_USER] = getGlobalString(L, "mysqlUser", "forgottenserver");
string[MYSQL_PASS] = getGlobalString(L, "mysqlPass", "");
string[MYSQL_DB] = getGlobalString(L, "mysqlDatabase", "forgottenserver");
string[MYSQL_SOCK] = getGlobalString(L, "mysqlSock", "");
integer[SQL_PORT] = getGlobalNumber(L, "mysqlPort", 3306);
integer[GAME_PORT] = getGlobalNumber(L, "gameProtocolPort", 7172);
integer[LOGIN_PORT] = getGlobalNumber(L, "loginProtocolPort", 7171);
integer[STATUS_PORT] = getGlobalNumber(L, "statusProtocolPort", 7171);
integer[MARKET_OFFER_DURATION] = getGlobalNumber(L, "marketOfferDuration", 30 * 24 * 60 * 60);
}
boolean[ALLOW_CHANGEOUTFIT] = getGlobalBoolean(L, "allowChangeOutfit", true);
boolean[ONE_PLAYER_ON_ACCOUNT] = getGlobalBoolean(L, "onePlayerOnlinePerAccount", true);
boolean[AIMBOT_HOTKEY_ENABLED] = getGlobalBoolean(L, "hotkeyAimbotEnabled", true);
boolean[REMOVE_RUNE_CHARGES] = getGlobalBoolean(L, "removeChargesFromRunes", true);
boolean[REMOVE_WEAPON_AMMO] = getGlobalBoolean(L, "removeWeaponAmmunition", true);
boolean[REMOVE_WEAPON_CHARGES] = getGlobalBoolean(L, "removeWeaponCharges", true);
boolean[REMOVE_POTION_CHARGES] = getGlobalBoolean(L, "removeChargesFromPotions", true);
boolean[EXPERIENCE_FROM_PLAYERS] = getGlobalBoolean(L, "experienceByKillingPlayers", false);
boolean[FREE_PREMIUM] = getGlobalBoolean(L, "freePremium", false);
boolean[REPLACE_KICK_ON_LOGIN] = getGlobalBoolean(L, "replaceKickOnLogin", true);
boolean[ALLOW_CLONES] = getGlobalBoolean(L, "allowClones", false);
boolean[MARKET_PREMIUM] = getGlobalBoolean(L, "premiumToCreateMarketOffer", true);
boolean[EMOTE_SPELLS] = getGlobalBoolean(L, "emoteSpells", false);
boolean[STAMINA_SYSTEM] = getGlobalBoolean(L, "staminaSystem", true);
boolean[WARN_UNSAFE_SCRIPTS] = getGlobalBoolean(L, "warnUnsafeScripts", true);
boolean[CONVERT_UNSAFE_SCRIPTS] = getGlobalBoolean(L, "convertUnsafeScripts", true);
boolean[CLASSIC_EQUIPMENT_SLOTS] = getGlobalBoolean(L, "classicEquipmentSlots", false);
boolean[CLASSIC_ATTACK_SPEED] = getGlobalBoolean(L, "classicAttackSpeed", false);
boolean[SCRIPTS_CONSOLE_LOGS] = getGlobalBoolean(L, "showScriptsLogInConsole", true);
boolean[SERVER_SAVE_NOTIFY_MESSAGE] = getGlobalBoolean(L, "serverSaveNotifyMessage", true);
boolean[SERVER_SAVE_CLEAN_MAP] = getGlobalBoolean(L, "serverSaveCleanMap", false);
boolean[SERVER_SAVE_CLOSE] = getGlobalBoolean(L, "serverSaveClose", false);
boolean[SERVER_SAVE_SHUTDOWN] = getGlobalBoolean(L, "serverSaveShutdown", true);
boolean[ONLINE_OFFLINE_CHARLIST] = getGlobalBoolean(L, "showOnlineStatusInCharlist", false);
boolean[YELL_ALLOW_PREMIUM] = getGlobalBoolean(L, "yellAlwaysAllowPremium", false);
boolean[FORCE_MONSTERTYPE_LOAD] = getGlobalBoolean(L, "forceMonsterTypesOnLoad", true);
string[DEFAULT_PRIORITY] = getGlobalString(L, "defaultPriority", "high");
string[SERVER_NAME] = getGlobalString(L, "serverName", "");
string[OWNER_NAME] = getGlobalString(L, "ownerName", "");
string[OWNER_EMAIL] = getGlobalString(L, "ownerEmail", "");
string[URL] = getGlobalString(L, "url", "");
string[LOCATION] = getGlobalString(L, "location", "");
string[MOTD] = getGlobalString(L, "motd", "");
string[WORLD_TYPE] = getGlobalString(L, "worldType", "pvp");
integer[MAX_PLAYERS] = getGlobalNumber(L, "maxPlayers");
integer[PZ_LOCKED] = getGlobalNumber(L, "pzLocked", 60000);
integer[DEFAULT_DESPAWNRANGE] = getGlobalNumber(L, "deSpawnRange", 2);
integer[DEFAULT_DESPAWNRADIUS] = getGlobalNumber(L, "deSpawnRadius", 50);
integer[RATE_EXPERIENCE] = getGlobalNumber(L, "rateExp", 5);
integer[RATE_SKILL] = getGlobalNumber(L, "rateSkill", 3);
integer[RATE_LOOT] = getGlobalNumber(L, "rateLoot", 2);
integer[RATE_MAGIC] = getGlobalNumber(L, "rateMagic", 3);
integer[RATE_SPAWN] = getGlobalNumber(L, "rateSpawn", 1);
integer[HOUSE_PRICE] = getGlobalNumber(L, "housePriceEachSQM", 1000);
integer[KILLS_TO_RED] = getGlobalNumber(L, "killsToRedSkull", 3);
integer[KILLS_TO_BLACK] = getGlobalNumber(L, "killsToBlackSkull", 6);
integer[ACTIONS_DELAY_INTERVAL] = getGlobalNumber(L, "timeBetweenActions", 200);
integer[EX_ACTIONS_DELAY_INTERVAL] = getGlobalNumber(L, "timeBetweenExActions", 1000);
integer[MAX_MESSAGEBUFFER] = getGlobalNumber(L, "maxMessageBuffer", 4);
integer[KICK_AFTER_MINUTES] = getGlobalNumber(L, "kickIdlePlayerAfterMinutes", 15);
integer[PROTECTION_LEVEL] = getGlobalNumber(L, "protectionLevel", 1);
integer[DEATH_LOSE_PERCENT] = getGlobalNumber(L, "deathLosePercent", -1);
integer[STATUSQUERY_TIMEOUT] = getGlobalNumber(L, "statusTimeout", 5000);
integer[FRAG_TIME] = getGlobalNumber(L, "timeToDecreaseFrags", 24 * 60 * 60);
integer[WHITE_SKULL_TIME] = getGlobalNumber(L, "whiteSkullTime", 15 * 60);
integer[STAIRHOP_DELAY] = getGlobalNumber(L, "stairJumpExhaustion", 2000);
integer[EXP_FROM_PLAYERS_LEVEL_RANGE] = getGlobalNumber(L, "expFromPlayersLevelRange", 75);
integer[CHECK_EXPIRED_MARKET_OFFERS_EACH_MINUTES] = getGlobalNumber(L, "checkExpiredMarketOffersEachMinutes", 60);
integer[MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER] = getGlobalNumber(L, "maxMarketOffersAtATimePerPlayer", 100);
integer[MAX_PACKETS_PER_SECOND] = getGlobalNumber(L, "maxPacketsPerSecond", 25);
integer[SERVER_SAVE_NOTIFY_DURATION] = getGlobalNumber(L, "serverSaveNotifyDuration", 5);
integer[YELL_MINIMUM_LEVEL] = getGlobalNumber(L, "yellMinimumLevel", 2);
loaded = true;
lua_close(L);
return true;
}
bool ConfigManager::reload()
{
bool result = load();
if (transformToSHA1(getString(ConfigManager::MOTD)) != g_game.getMotdHash()) {
g_game.incrementMotdNum();
}
return result;
}
static std::string dummyStr;
const std::string& ConfigManager::getString(string_config_t what) const
{
if (what >= LAST_STRING_CONFIG) {
std::cout << "[Warning - ConfigManager::getString] Accessing invalid index: " << what << std::endl;
return dummyStr;
}
return string[what];
}
int32_t ConfigManager::getNumber(integer_config_t what) const
{
if (what >= LAST_INTEGER_CONFIG) {
std::cout << "[Warning - ConfigManager::getNumber] Accessing invalid index: " << what << std::endl;
return 0;
}
return integer[what];
}
bool ConfigManager::getBoolean(boolean_config_t what) const
{
if (what >= LAST_BOOLEAN_CONFIG) {
std::cout << "[Warning - ConfigManager::getBoolean] Accessing invalid index: " << what << std::endl;
return false;
}
return boolean[what];
}
| 1 | 16,899 | space in key string? | otland-forgottenserver | cpp |
@@ -48,9 +48,13 @@ func (b *ProcessBuilder) Build() *ManagedProcess {
if len(b.nsOptions) > 0 {
args = append([]string{"--", cmd}, args...)
for _, option := range b.nsOptions {
- args = append([]string{"-" + nsArgMap[option.Typ] + option.Path}, args...)
+ args = append([]string{"-" + nsArgMap[option.Typ], option.Path}, args...)
}
- cmd = "nsenter"
+
+ if b.localMnt {
+ args = append([]string{"-l"}, args...)
+ }
+ cmd = nsexecPath
}
if b.pause { | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package bpm
import (
"context"
"os/exec"
"strings"
"syscall"
"github.com/chaos-mesh/chaos-mesh/pkg/mock"
)
// Build builds the process
func (b *ProcessBuilder) Build() *ManagedProcess {
// The call routine is pause -> suicide -> nsenter --(fork)-> suicide -> process
// so that when chaos-daemon killed the suicide process, the sub suicide process will
// receive a signal and exit.
// For example:
// If you call `nsenter -p/proc/.../ns/pid bash -c "while true; do sleep 1; date; done"`
// then even you kill the nsenter process, the subprocess of it will continue running
// until it gets killed. The suicide program is used to make sure that the subprocess will
// be terminated when its parent died.
// But the `./bin/suicide nsenter -p/proc/.../ns/pid ./bin/suicide bash -c "while true; do sleep 1; date; done"`
// can fix this problem. The first suicide is used to ensure when chaos-daemon is dead, the process is killed
// I'm not sure this method is 100% reliable, but half a loaf is better than none.
args := b.args
cmd := b.cmd
if b.suicide {
args = append([]string{cmd}, args...)
cmd = suicidePath
}
if len(b.nsOptions) > 0 {
args = append([]string{"--", cmd}, args...)
for _, option := range b.nsOptions {
args = append([]string{"-" + nsArgMap[option.Typ] + option.Path}, args...)
}
cmd = "nsenter"
}
if b.pause {
args = append([]string{cmd}, args...)
cmd = pausePath
}
if c := mock.On("MockProcessBuild"); c != nil {
f := c.(func(context.Context, string, ...string) *exec.Cmd)
return &ManagedProcess{
Cmd: f(b.ctx, cmd, args...),
Identifier: b.identifier,
}
}
log.Info("build command", "command", cmd+" "+strings.Join(args, " "))
command := exec.CommandContext(b.ctx, cmd, args...)
command.SysProcAttr = &syscall.SysProcAttr{}
if b.suicide {
command.SysProcAttr.Pdeathsig = syscall.SIGTERM
}
return &ManagedProcess{
Cmd: command,
Identifier: b.identifier,
}
}
| 1 | 18,679 | The commends of this function should be updated | chaos-mesh-chaos-mesh | go |
@@ -48,6 +48,7 @@ class AnchorHead(nn.Module):
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0)),
+ reg_decoded_bbox=False,
background_label=None,
loss_cls=dict(
type='CrossEntropyLoss', | 1 | from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import (AnchorGenerator, anchor_inside_flags, build_assigner,
build_bbox_coder, build_sampler, force_fp32,
images_to_levels, multi_apply, multiclass_nms, unmap)
from ..builder import build_loss
from ..registry import HEADS
@HEADS.register_module
class AnchorHead(nn.Module):
"""Anchor-based head (RPN, RetinaNet, SSD, etc.).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
anchor_scales (Iterable): Anchor scales.
anchor_ratios (Iterable): Anchor aspect ratios.
anchor_strides (Iterable): Anchor strides.
anchor_base_sizes (Iterable): Anchor base sizes.
target_means (Iterable): Mean values of regression targets.
target_stds (Iterable): Std values of regression targets.
background_label (int | None): Label ID of background, set as 0 for
RPN and num_classes for other heads. It will automatically set as
num_classes if None is given.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
anchor_scales=[8, 16, 32],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0)),
background_label=None,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
train_cfg=None,
test_cfg=None):
super(AnchorHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.anchor_strides = anchor_strides
self.anchor_base_sizes = list(
anchor_strides) if anchor_base_sizes is None else anchor_base_sizes
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
# TODO better way to determine whether sample or not
self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC']
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
if self.cls_out_channels <= 0:
raise ValueError('num_classes={} is too small'.format(num_classes))
self.background_label = (
num_classes if background_label is None else background_label)
# background_label should be either 0 or num_classes
assert (self.background_label == 0
or self.background_label == num_classes)
self.bbox_coder = build_bbox_coder(bbox_coder)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# use PseudoSampler when sampling is False
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.fp16_enabled = False
self.anchor_generators = []
for anchor_base in self.anchor_base_sizes:
self.anchor_generators.append(
AnchorGenerator(anchor_base, anchor_scales, anchor_ratios))
self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales)
self._init_layers()
def _init_layers(self):
self.conv_cls = nn.Conv2d(self.in_channels,
self.num_anchors * self.cls_out_channels, 1)
self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1)
def init_weights(self):
normal_init(self.conv_cls, std=0.01)
normal_init(self.conv_reg, std=0.01)
def forward_single(self, x):
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
return cls_score, bbox_pred
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
device (torch.device | str): Device for returned tensors
Returns:
tuple:
anchor_list (list[Tensor]): Anchors of each image
valid_flag_list (list[Tensor]): Valid flags of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = []
for i in range(num_levels):
anchors = self.anchor_generators[i].grid_anchors(
featmap_sizes[i], self.anchor_strides[i], device=device)
multi_level_anchors.append(anchors)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level anchors
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
for i in range(num_levels):
anchor_stride = self.anchor_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w = img_meta['pad_shape'][:2]
valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)
flags = self.anchor_generators[i].valid_flags(
(feat_h, feat_w), (valid_feat_h, valid_feat_w),
device=device)
multi_level_flags.append(flags)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list
def _get_targets_single(self,
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in
a single image.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors ,4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
img_meta (dict): Meta info of the image.
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
img_meta (dict): Meta info of the image.
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple:
labels_list (list[Tensor]): Labels of each level
label_weights_list (list[Tensor]): Label weights of each level
bbox_targets_list (list[Tensor]): BBox targets of each level
bbox_weights_list (list[Tensor]): BBox weights of each level
num_total_pos (int): Number of positive samples in all images
num_total_neg (int): Number of negative samples in all images
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 6
# assign gt and sample anchors
anchors = flat_anchors[inside_flags.type(torch.bool), :]
assign_result = self.assigner.assign(
anchors, gt_bboxes, gt_bboxes_ignore,
None if self.sampling else gt_labels)
sampling_result = self.sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.background_label,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
# only rpn gives gt_labels as None, this time FG is 1
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
labels = unmap(labels, num_total_anchors, inside_flags)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds)
def get_targets(self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in
multiple images.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 4).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
ignored.
gt_labels_list (list[Tensor]): Ground truth labels of each box.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple:
labels_list (list[Tensor]): Labels of each level
label_weights_list (list[Tensor]): Label weights of each level
bbox_targets_list (list[Tensor]): BBox targets of each level
bbox_weights_list (list[Tensor]): BBox weights of each level
num_total_pos (int): Number of positive samples in all images
num_total_neg (int): Number of negative samples in all images
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
pos_inds_list, neg_inds_list) = multi_apply(
self._get_targets_single,
anchor_list,
valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg)
def loss_single(self, cls_score, bbox_pred, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# regression loss
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
loss_bbox = self.loss_bbox(
bbox_pred,
bbox_targets,
bbox_weights,
avg_factor=num_total_samples)
return loss_cls, loss_bbox
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.anchor_generators)
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg=None,
rescale=False):
"""
Transform network output for a batch into labeled boxes.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
img_metas (list[dict]): Size / scale info for each image
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used
rescale (bool): If True, return boxes in original image space
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the class index of the
corresponding box.
Example:
>>> import mmcv
>>> self = AnchorHead(num_classes=9, in_channels=1)
>>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
>>> cfg = mmcv.Config(dict(
>>> score_thr=0.00,
>>> nms=dict(type='nms', iou_thr=1.0),
>>> max_per_img=10))
>>> feat = torch.rand(1, 1, 3, 3)
>>> cls_score, bbox_pred = self.forward_single(feat)
>>> # note the input lists are over different levels, not images
>>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
>>> result_list = self.get_bboxes(cls_scores, bbox_preds,
>>> img_metas, cfg)
>>> det_bboxes, det_labels = result_list[0]
>>> assert len(result_list) == 1
>>> assert det_bboxes.shape[1] == 5
>>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
mlvl_anchors = [
self.anchor_generators[i].grid_anchors(
cls_scores[i].size()[-2:],
self.anchor_strides[i],
device=device) for i in range(num_levels)
]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale)
result_list.append(proposals)
return result_list
def _get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
"""
Transform outputs for a single batch item into labeled boxes.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, anchors in zip(cls_score_list,
bbox_pred_list, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
max_scores, _ = scores[:, :-1].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = self.bbox_coder.decode(
anchors, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
| 1 | 18,618 | The docstring is outdated. | open-mmlab-mmdetection | py |
@@ -7,6 +7,9 @@ namespace Datadog.Trace
/// </summary>
public static class CorrelationIdentifier
{
+ internal static readonly string ServiceKey = "dd.service";
+ internal static readonly string VersionKey = "dd.version";
+ internal static readonly string EnvKey = "dd.env";
internal static readonly string TraceIdKey = "dd.trace_id";
internal static readonly string SpanIdKey = "dd.span_id";
| 1 | using System;
namespace Datadog.Trace
{
/// <summary>
/// An API to access the active trace and span ids.
/// </summary>
public static class CorrelationIdentifier
{
internal static readonly string TraceIdKey = "dd.trace_id";
internal static readonly string SpanIdKey = "dd.span_id";
/// <summary>
/// Gets the trace id
/// </summary>
public static ulong TraceId
{
get
{
return Tracer.Instance.ActiveScope?.Span?.TraceId ?? 0;
}
}
/// <summary>
/// Gets the span id
/// </summary>
public static ulong SpanId
{
get
{
return Tracer.Instance.ActiveScope?.Span?.SpanId ?? 0;
}
}
}
}
| 1 | 16,904 | Could rename to `ServiceVersionKey` for consistency with the suggestion to rename the TracerSetting. | DataDog-dd-trace-dotnet | .cs |
@@ -826,6 +826,12 @@ def install(package, hash=None, version=None, tag=None, force=False):
store = PackageStore()
existing_pkg = store.get_package(owner, pkg)
+ if existing_pkg is not None and not force:
+ print("{owner}/{pkg} already installed.".format(owner=owner, pkg=pkg))
+ overwrite = input("Overwrite? (y/n) ")
+ if overwrite.lower() != 'y':
+ return
+
if version is not None:
response = session.get(
"{url}/api/version/{owner}/{pkg}/{version}".format( | 1 | # -*- coding: utf-8 -*-
"""
Command line parsing and command dispatch
"""
from __future__ import print_function
from builtins import input # pylint:disable=W0622
from datetime import datetime
import gzip
import hashlib
import json
import os
import re
from shutil import copyfileobj, move, rmtree
import stat
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import yaml
from packaging.version import Version
import pandas as pd
import pkg_resources
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from six import iteritems, string_types
from six.moves.urllib.parse import urlparse, urlunparse
from tqdm import tqdm
from .build import (build_package, build_package_from_contents, generate_build_file,
generate_contents, BuildException)
from .const import DEFAULT_BUILDFILE, LATEST_TAG
from .core import (hash_contents, find_object_hashes, PackageFormat, TableNode, FileNode, GroupNode,
decode_node, encode_node, exec_yaml_python, CommandException, diff_dataframes,
load_yaml)
from .hashing import digest_file
from .store import PackageStore, parse_package, parse_package_extended
from .util import BASE_DIR, FileWithReadProgress, gzip_compress
from . import check_functions as qc
from .. import nodes
# pyOpenSSL and S3 don't play well together. pyOpenSSL is completely optional, but gets enabled by requests.
# So... We disable it. That's what boto does.
# https://github.com/boto/botocore/issues/760
# https://github.com/boto/botocore/pull/803
try:
from urllib3.contrib import pyopenssl
pyopenssl.extract_from_urllib3()
except ImportError:
pass
DEFAULT_REGISTRY_URL = 'https://pkg.quiltdata.com'
GIT_URL_RE = re.compile(r'(?P<url>http[s]?://[\w./~_-]+\.git)(?:@(?P<branch>[\w_-]+))?')
CHUNK_SIZE = 4096
PARALLEL_UPLOADS = 20
S3_CONNECT_TIMEOUT = 30
S3_READ_TIMEOUT = 30
S3_TIMEOUT_RETRIES = 3
CONTENT_RANGE_RE = re.compile(r'^bytes (\d+)-(\d+)/(\d+)$')
LOG_TIMEOUT = 3 # 3 seconds
VERSION = pkg_resources.require('quilt')[0].version
_registry_url = None
def _load_config():
config_path = os.path.join(BASE_DIR, 'config.json')
if os.path.exists(config_path):
with open(config_path) as fd:
return json.load(fd)
return {}
def _save_config(cfg):
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
config_path = os.path.join(BASE_DIR, 'config.json')
with open(config_path, 'w') as fd:
json.dump(cfg, fd)
def get_registry_url():
global _registry_url
if _registry_url is not None:
return _registry_url
# Env variable; overrides the config.
url = os.environ.get('QUILT_PKG_URL')
if url is None:
# Config file (generated by `quilt config`).
cfg = _load_config()
url = cfg.get('registry_url', '')
# '' means default URL.
_registry_url = url or DEFAULT_REGISTRY_URL
return _registry_url
def config():
answer = input("Please enter the URL for your custom Quilt registry (ask your administrator),\n" +
"or leave this line blank to use the default registry: ")
if answer:
url = urlparse(answer.rstrip('/'))
if (url.scheme not in ['http', 'https'] or not url.netloc or
url.path or url.params or url.query or url.fragment):
raise CommandException("Invalid URL: %s" % answer)
canonical_url = urlunparse(url)
else:
# When saving the config, store '' instead of the actual URL in case we ever change it.
canonical_url = ''
cfg = _load_config()
cfg['registry_url'] = canonical_url
_save_config(cfg)
# Clear the cached URL.
global _registry_url
_registry_url = None
def get_auth_path():
url = get_registry_url()
if url == DEFAULT_REGISTRY_URL:
suffix = ''
else:
# Store different servers' auth in different files.
suffix = "-%.8s" % hashlib.md5(url.encode('utf-8')).hexdigest()
return os.path.join(BASE_DIR, 'auth%s.json' % suffix)
def _update_auth(refresh_token):
response = requests.post("%s/api/token" % get_registry_url(), data=dict(
refresh_token=refresh_token
))
if response.status_code != requests.codes.ok:
raise CommandException("Authentication error: %s" % response.status_code)
data = response.json()
error = data.get('error')
if error is not None:
raise CommandException("Failed to log in: %s" % error)
return dict(
refresh_token=data['refresh_token'],
access_token=data['access_token'],
expires_at=data['expires_at']
)
def _save_auth(auth):
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
file_path = get_auth_path()
with open(file_path, 'w') as fd:
os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR)
json.dump(auth, fd)
def _handle_response(resp, **kwargs):
_ = kwargs # unused pylint:disable=W0613
if resp.status_code == requests.codes.unauthorized:
raise CommandException("Authentication failed. Run `quilt login` again.")
elif not resp.ok:
try:
data = resp.json()
raise CommandException(data['message'])
except ValueError:
raise CommandException("Unexpected failure: error %s" % resp.status_code)
def _create_auth():
"""
Reads the credentials, updates the access token if necessary, and returns it.
"""
file_path = get_auth_path()
if os.path.exists(file_path):
with open(file_path) as fd:
auth = json.load(fd)
# If the access token expires within a minute, update it.
if auth['expires_at'] < time.time() + 60:
try:
auth = _update_auth(auth['refresh_token'])
except CommandException as ex:
raise CommandException(
"Failed to update the access token (%s). Run `quilt login` again." % ex
)
_save_auth(auth)
else:
# The auth file doesn't exist, probably because the
# user hasn't run quilt login yet.
auth = None
return auth
def _create_session(auth):
"""
Creates a session object to be used for `push`, `install`, etc.
"""
session = requests.Session()
session.hooks.update(dict(
response=_handle_response
))
session.headers.update({
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": "quilt-cli/%s" % VERSION,
})
if auth is not None:
session.headers["Authorization"] = "Bearer %s" % auth['access_token']
return session
_session = None # pylint:disable=C0103
def _get_session():
"""
Creates a session or returns an existing session.
"""
global _session # pylint:disable=C0103
if _session is None:
auth = _create_auth()
_session = _create_session(auth)
return _session
def _clear_session():
global _session # pylint:disable=C0103
if _session is not None:
_session.close()
_session = None
def _open_url(url):
try:
if sys.platform == 'win32':
os.startfile(url) # pylint:disable=E1101
elif sys.platform == 'darwin':
with open(os.devnull, 'r+') as null:
subprocess.check_call(['open', url], stdin=null, stdout=null, stderr=null)
else:
with open(os.devnull, 'r+') as null:
subprocess.check_call(['xdg-open', url], stdin=null, stdout=null, stderr=null)
except Exception as ex: # pylint:disable=W0703
print("Failed to launch the browser: %s" % ex)
def _match_hash(session, owner, pkg, hash, raise_exception=True):
# short-circuit for exact length
if len(hash) == 64:
return hash
response = session.get(
"{url}/api/log/{owner}/{pkg}/".format(
url=get_registry_url(),
owner=owner,
pkg=pkg
)
)
for entry in reversed(response.json()['logs']):
# support short hashes
if entry['hash'].startswith(hash):
return entry['hash']
if raise_exception:
raise CommandException("Invalid hash for package {owner}/{pkg}: {hash}".format(
hash=hash, owner=owner, pkg=pkg))
return None
def login():
"""
Authenticate.
Launches a web browser and asks the user for a token.
"""
login_url = "%s/login" % get_registry_url()
print("Launching a web browser...")
print("If that didn't work, please visit the following URL: %s" % login_url)
_open_url(login_url)
print()
refresh_token = input("Enter the code from the webpage: ")
login_with_token(refresh_token)
def login_with_token(refresh_token):
"""
Authenticate using an existing token.
"""
# Get an access token and a new refresh token.
auth = _update_auth(refresh_token)
_save_auth(auth)
_clear_session()
def logout():
"""
Become anonymous. Useful for testing.
"""
auth_file = get_auth_path()
# TODO revoke refresh token (without logging out of web sessions)
if os.path.exists(auth_file):
os.remove(auth_file)
else:
print("Already logged out.")
_clear_session()
def generate(directory, outfilename=DEFAULT_BUILDFILE):
"""
Generate a build-file for quilt build from a directory of
source files.
"""
try:
buildfilepath = generate_build_file(directory, outfilename=outfilename)
except BuildException as builderror:
raise CommandException(str(builderror))
print("Generated build-file %s." % (buildfilepath))
def diff_node_dataframe(package, nodename, dataframe):
"""
compare two dataframes and print the result
WIP: find_node_by_name() doesn't work yet.
TODO: higher level API: diff_two_files(filepath1, filepath2)
TODO: higher level API: diff_node_file(file, package, nodename, filepath)
"""
owner, pkg = parse_package(package)
pkgobj = PackageStore.find_package(owner, pkg)
if pkgobj is None:
raise CommandException("Package {owner}/{pkg} not found.".format(owner=owner, pkg=pkg))
node = pkgobj.find_node_by_name(nodename)
if node is None:
raise CommandException("Node path not found: {}".format(nodename))
quilt_dataframe = pkgobj.get_obj(node)
return diff_dataframes(quilt_dataframe, dataframe)
def check(path=None, env='default'):
"""
Execute the checks: rules for a given build.yml file.
"""
# TODO: add files=<list of files> to check only a subset...
# also useful for 'quilt build' to exclude certain files?
# (if not, then require dry_run=True if files!=None/all)
build("dry_run/dry_run", path=path, dry_run=True, env=env)
def _clone_git_repo(url, branch, dest):
cmd = ['git', 'clone', '-q', '--depth=1']
if branch:
cmd += ['-b', branch]
cmd += [url, dest]
subprocess.check_call(cmd)
def _log(**kwargs):
# TODO(dima): Save logs to a file, then send them when we get a chance.
cfg = _load_config()
if cfg.get('disable_analytics'):
return
session = _get_session()
# Disable error handling.
orig_response_hooks = session.hooks.get('response')
session.hooks.update(dict(
response=None
))
try:
session.post(
"{url}/api/log".format(
url=get_registry_url(),
),
data=json.dumps([kwargs]),
timeout=LOG_TIMEOUT,
)
except requests.exceptions.RequestException:
# Ignore logging errors.
pass
# restore disabled error-handling
session.hooks['response'] = orig_response_hooks
def build(package, path=None, dry_run=False, env='default'):
"""
Compile a Quilt data package, either from a build file or an existing package node.
"""
package_hash = hashlib.md5(package.encode('utf-8')).hexdigest()
try:
_build_internal(package, path, dry_run, env)
except Exception as ex:
_log(type='build', package=package_hash, dry_run=dry_run, env=env, error=str(ex))
raise
_log(type='build', package=package_hash, dry_run=dry_run, env=env)
def _build_internal(package, path, dry_run, env):
# we may have a path, git URL, PackageNode, or None
if isinstance(path, string_types):
# is this a git url?
is_git_url = GIT_URL_RE.match(path)
if is_git_url:
tmpdir = tempfile.mkdtemp()
url = is_git_url.group('url')
branch = is_git_url.group('branch')
try:
_clone_git_repo(url, branch, tmpdir)
build_from_path(package, tmpdir, dry_run=dry_run, env=env)
except Exception as exc:
msg = "attempting git clone raised exception: {exc}"
raise CommandException(msg.format(exc=exc))
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
else:
build_from_path(package, path, dry_run=dry_run, env=env)
elif isinstance(path, nodes.PackageNode):
assert not dry_run # TODO?
build_from_node(package, path)
elif path is None:
assert not dry_run # TODO?
build_empty(package)
else:
raise ValueError("Expected a PackageNode, path or git URL, but got %r" % path)
def build_empty(package):
"""
Create an empty package for convenient editing of de novo packages
"""
owner, pkg = parse_package(package)
store = PackageStore()
new = store.create_package(owner, pkg)
new.save_contents()
def build_from_node(package, node):
"""
Compile a Quilt data package from an existing package node.
"""
owner, pkg = parse_package(package)
# deliberate access of protected member
store = node._package.get_store()
package_obj = store.create_package(owner, pkg)
def _process_node(node, path=''):
if isinstance(node, nodes.GroupNode):
for key, child in node._items():
_process_node(child, path + '/' + key)
elif isinstance(node, nodes.DataNode):
core_node = node._node
metadata = core_node.metadata or {}
if isinstance(core_node, TableNode):
dataframe = node._data()
package_obj.save_df(dataframe, path, metadata.get('q_path'), metadata.get('q_ext'),
'pandas', PackageFormat.default)
elif isinstance(core_node, FileNode):
src_path = node._data()
package_obj.save_file(src_path, path, metadata.get('q_path'))
else:
assert False, "Unexpected core node type: %r" % core_node
else:
assert False, "Unexpected node type: %r" % node
_process_node(node)
package_obj.save_contents()
def build_from_path(package, path, dry_run=False, env='default', outfilename=DEFAULT_BUILDFILE):
"""
Compile a Quilt data package from a build file.
Path can be a directory, in which case the build file will be generated automatically.
"""
owner, pkg = parse_package(package)
if not os.path.exists(path):
raise CommandException("%s does not exist." % path)
try:
if os.path.isdir(path):
buildpath = os.path.join(path, outfilename)
if os.path.exists(buildpath):
raise CommandException(
"Build file already exists. Run `quilt build %r` instead." % buildpath
)
contents = generate_contents(path, outfilename)
build_package_from_contents(owner, pkg, path, contents, dry_run=dry_run, env=env)
else:
build_package(owner, pkg, path, dry_run=dry_run, env=env)
if not dry_run:
print("Built %s/%s successfully." % (owner, pkg))
except BuildException as ex:
raise CommandException("Failed to build the package: %s" % ex)
def log(package):
"""
List all of the changes to a package on the server.
"""
owner, pkg = parse_package(package)
session = _get_session()
response = session.get(
"{url}/api/log/{owner}/{pkg}/".format(
url=get_registry_url(),
owner=owner,
pkg=pkg
)
)
format_str = "%-64s %-19s %s"
print(format_str % ("Hash", "Pushed", "Author"))
for entry in reversed(response.json()['logs']):
ugly = datetime.fromtimestamp(entry['created'])
nice = ugly.strftime("%Y-%m-%d %H:%M:%S")
print(format_str % (entry['hash'], nice, entry['author']))
def push(package, public=False, reupload=False):
"""
Push a Quilt data package to the server
"""
owner, pkg = parse_package(package)
session = _get_session()
pkgobj = PackageStore.find_package(owner, pkg)
if pkgobj is None:
raise CommandException("Package {owner}/{pkg} not found.".format(owner=owner, pkg=pkg))
pkghash = pkgobj.get_hash()
def _push_package(dry_run=False):
data = json.dumps(dict(
dry_run=dry_run,
public=public,
contents=pkgobj.get_contents(),
description="" # TODO
), default=encode_node)
compressed_data = gzip_compress(data.encode('utf-8'))
return session.put(
"{url}/api/package/{owner}/{pkg}/{hash}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
hash=pkghash
),
data=compressed_data,
headers={
'Content-Encoding': 'gzip'
}
)
print("Fetching upload URLs from the registry...")
resp = _push_package(dry_run=True)
upload_urls = resp.json()['upload_urls']
obj_queue = sorted(set(find_object_hashes(pkgobj.get_contents())), reverse=True)
total = len(obj_queue)
total_bytes = 0
for obj_hash in obj_queue:
total_bytes += os.path.getsize(pkgobj.get_store().object_path(obj_hash))
uploaded = []
lock = Lock()
headers = {
'Content-Encoding': 'gzip'
}
print("Uploading %d fragments (%d bytes before compression)..." % (total, total_bytes))
with tqdm(total=total_bytes, unit='B', unit_scale=True) as progress:
def _worker_thread():
with requests.Session() as s3_session:
# Retry 500s.
retries = Retry(total=3,
backoff_factor=.5,
status_forcelist=[500, 502, 503, 504])
s3_session.mount('https://', HTTPAdapter(max_retries=retries))
while True:
with lock:
if not obj_queue:
break
obj_hash = obj_queue.pop()
try:
obj_urls = upload_urls[obj_hash]
original_size = os.path.getsize(pkgobj.get_store().object_path(obj_hash))
if reupload or not s3_session.head(obj_urls['head']).ok:
# Create a temporary gzip'ed file.
with pkgobj.tempfile(obj_hash) as temp_file:
temp_file.seek(0, 2)
compressed_size = temp_file.tell()
temp_file.seek(0)
# Workaround for non-local variables in Python 2.7
class Context:
compressed_read = 0
original_last_update = 0
def _progress_cb(count):
Context.compressed_read += count
original_read = Context.compressed_read * original_size // compressed_size
with lock:
progress.update(original_read - Context.original_last_update)
Context.original_last_update = original_read
with FileWithReadProgress(temp_file, _progress_cb) as fd:
url = obj_urls['put']
response = s3_session.put(url, data=fd, headers=headers)
response.raise_for_status()
else:
with lock:
tqdm.write("Fragment %s already uploaded; skipping." % obj_hash)
progress.update(original_size)
with lock:
uploaded.append(obj_hash)
except requests.exceptions.RequestException as ex:
message = "Upload failed for %s:\n" % obj_hash
if ex.response is not None:
message += "URL: %s\nStatus code: %s\nResponse: %r\n" % (
ex.request.url, ex.response.status_code, ex.response.text
)
else:
message += "%s\n" % ex
with lock:
tqdm.write(message)
threads = [
Thread(target=_worker_thread, name="upload-worker-%d" % i)
for i in range(PARALLEL_UPLOADS)
]
for thread in threads:
thread.daemon = True
thread.start()
for thread in threads:
thread.join()
if len(uploaded) != total:
raise CommandException("Failed to upload fragments")
print("Uploading package metadata...")
_push_package()
print("Updating the 'latest' tag...")
session.put(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
tag=LATEST_TAG
),
data=json.dumps(dict(
hash=pkghash
))
)
url = "https://quiltdata.com/package/%s/%s" % (owner, pkg)
print("Push complete. %s/%s is live:\n%s" % (owner, pkg, url))
def version_list(package):
"""
List the versions of a package.
"""
owner, pkg = parse_package(package)
session = _get_session()
response = session.get(
"{url}/api/version/{owner}/{pkg}/".format(
url=get_registry_url(),
owner=owner,
pkg=pkg
)
)
for version in response.json()['versions']:
print("%s: %s" % (version['version'], version['hash']))
def version_add(package, version, pkghash, force=False):
"""
Add a new version for a given package hash.
Version format needs to follow PEP 440.
Versions are permanent - once created, they cannot be modified or deleted.
"""
owner, pkg = parse_package(package)
session = _get_session()
try:
Version(version)
except ValueError:
url = "https://www.python.org/dev/peps/pep-0440/#examples-of-compliant-version-schemes"
raise CommandException(
"Invalid version format; see %s" % url
)
if not force:
answer = input("Versions cannot be modified or deleted; are you sure? (y/n) ")
if answer.lower() != 'y':
return
session.put(
"{url}/api/version/{owner}/{pkg}/{version}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
version=version
),
data=json.dumps(dict(
hash=_match_hash(session, owner, pkg, pkghash)
))
)
def tag_list(package):
"""
List the tags of a package.
"""
owner, pkg = parse_package(package)
session = _get_session()
response = session.get(
"{url}/api/tag/{owner}/{pkg}/".format(
url=get_registry_url(),
owner=owner,
pkg=pkg
)
)
for tag in response.json()['tags']:
print("%s: %s" % (tag['tag'], tag['hash']))
def tag_add(package, tag, pkghash):
"""
Add a new tag for a given package hash.
Unlike versions, tags can have an arbitrary format, and can be modified
and deleted.
When a package is pushed, it gets the "latest" tag.
"""
owner, pkg = parse_package(package)
session = _get_session()
session.put(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
tag=tag
),
data=json.dumps(dict(
hash=_match_hash(session, owner, pkg, pkghash)
))
)
def tag_remove(package, tag):
"""
Delete a tag.
"""
owner, pkg = parse_package(package)
session = _get_session()
session.delete(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
tag=tag
)
)
def install_via_requirements(requirements_str, force=False):
"""
Download multiple Quilt data packages via quilt.xml requirements file.
"""
if requirements_str[0] == '@':
path = requirements_str[1:]
if os.path.isfile(path):
yaml_data = load_yaml(path)
else:
raise CommandException("Requirements file not found: {filename}".format(filename=path))
else:
yaml_data = yaml.load(requirements_str)
for pkginfo in yaml_data['packages']:
owner, pkg, subpath, hash, version, tag = parse_package_extended(pkginfo)
package = owner + '/' + pkg
if subpath is not None:
package += '/' + "/".join(subpath)
install(package, hash, version, tag, force=force)
def install(package, hash=None, version=None, tag=None, force=False):
"""
Download a Quilt data package from the server and install locally.
At most one of `hash`, `version`, or `tag` can be given. If none are
given, `tag` defaults to "latest".
"""
if hash is version is tag is None:
tag = LATEST_TAG
# @filename ==> read from file
# newline = multiple lines ==> multiple requirements
package = package.strip()
if len(package) == 0:
raise CommandException("package name is empty.")
if package[0] == '@' or '\n' in package:
return install_via_requirements(package, force=force)
assert [hash, version, tag].count(None) == 2
owner, pkg, subpath = parse_package(package, allow_subpath=True)
session = _get_session()
store = PackageStore()
existing_pkg = store.get_package(owner, pkg)
if version is not None:
response = session.get(
"{url}/api/version/{owner}/{pkg}/{version}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
version=version
)
)
pkghash = response.json()['hash']
elif tag is not None:
response = session.get(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
tag=tag
)
)
pkghash = response.json()['hash']
else:
pkghash = _match_hash(session, owner, pkg, hash)
assert pkghash is not None
response = session.get(
"{url}/api/package/{owner}/{pkg}/{hash}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
hash=pkghash
),
params=dict(
subpath='/'.join(subpath)
)
)
assert response.ok # other responses handled by _handle_response
if existing_pkg is not None and not force:
print("{owner}/{pkg} already installed.".format(owner=owner, pkg=pkg))
overwrite = input("Overwrite? (y/n) ")
if overwrite.lower() != 'y':
return
dataset = response.json(object_hook=decode_node)
response_urls = dataset['urls']
response_contents = dataset['contents']
# Verify contents hash
if pkghash != hash_contents(response_contents):
raise CommandException("Mismatched hash. Try again.")
pkgobj = store.install_package(owner, pkg, response_contents)
with requests.Session() as s3_session:
total = len(response_urls)
for idx, (download_hash, url) in enumerate(sorted(iteritems(response_urls))):
print("Downloading %s (%d/%d)..." % (download_hash, idx + 1, total))
local_filename = store.object_path(download_hash)
if os.path.exists(local_filename):
file_hash = digest_file(local_filename)
if file_hash == download_hash:
print("Fragment already installed; skipping.")
continue
else:
print("Fragment already installed, but has the wrong hash (%s); re-downloading." %
file_hash)
temp_path_gz = store.temporary_object_path(download_hash + '.gz')
with open(temp_path_gz, 'ab') as output_file:
for attempt in range(S3_TIMEOUT_RETRIES):
try:
starting_length = output_file.tell()
response = s3_session.get(
url,
headers={
'Range': 'bytes=%d-' % starting_length
},
stream=True,
timeout=(S3_CONNECT_TIMEOUT, S3_READ_TIMEOUT)
)
# RANGE_NOT_SATISFIABLE means, we already have the whole file.
if response.status_code != requests.codes.RANGE_NOT_SATISFIABLE:
if not response.ok:
message = "Download failed for %s:\nURL: %s\nStatus code: %s\nResponse: %r\n" % (
download_hash, response.request.url, response.status_code, response.text
)
raise CommandException(message)
# Fragments have the 'Content-Encoding: gzip' header set to make requests ungzip
# them automatically - but that turned out to be a bad idea because it makes
# resuming downloads impossible.
# HACK: For now, just delete the header. Eventually, update the data in S3.
response.raw.headers.pop('Content-Encoding', None)
# Make sure we're getting the expected range.
content_range = response.headers.get('Content-Range', '')
match = CONTENT_RANGE_RE.match(content_range)
if not match or not int(match.group(1)) == starting_length:
raise CommandException("Unexpected Content-Range: %s" % content_range)
total_length = int(match.group(3))
with tqdm(initial=starting_length,
total=total_length,
unit='B',
unit_scale=True) as progress:
for chunk in response.iter_content(CHUNK_SIZE):
output_file.write(chunk)
progress.update(len(chunk))
break # Done!
except requests.exceptions.ConnectionError:
if attempt < S3_TIMEOUT_RETRIES - 1:
print("Timed out; retrying...")
else:
raise
# Ungzip the downloaded fragment.
temp_path = store.temporary_object_path(download_hash)
try:
with gzip.open(temp_path_gz, 'rb') as f_in, open(temp_path, 'wb') as f_out:
copyfileobj(f_in, f_out)
finally:
# Delete the file unconditionally - in case it's corrupted and cannot be ungzipped.
os.remove(temp_path_gz)
# Check the hash of the result.
file_hash = digest_file(temp_path)
if file_hash != download_hash:
os.remove(temp_path)
raise CommandException("Fragment hashes do not match: expected %s, got %s." %
(download_hash, file_hash))
move(temp_path, local_filename)
pkgobj.save_contents()
def _setup_env(env, files):
""" process data distribution. """
# TODO: build.yml is not saved in the package system, so re-load it here
with open('build.yml') as fd:
buildfile = next(yaml.load_all(fd), None)
environments = buildfile.get('environments', {})
if env != 'default' and (env not in environments):
raise CommandException(
"environment %s not found in environments: section of build.yml" % env)
if len(environments) == 0:
return files
if env == 'default' and 'default' not in environments:
return files
# TODO: this should be done during quilt push, not during install/import
# (requires server support)
# TODO: add a way to dry-run dataset checking
print('processing environment %s: checking data...' % (env))
environment = environments[env]
dataset = environment.get('dataset')
for key, val in files.items():
# TODO: debug mode, where we can see which files were skipped
if isinstance(val, pd.DataFrame):
before_len = len(val)
res = exec_yaml_python(dataset, val, key, '('+key+')')
if not res and res is not None:
raise BuildException("error creating dataset for environment: %s on file %s" % (
env, key))
print('%s: %s=>%s recs' % (key, before_len, len(qc.data)))
files[key] = qc.data
# TODO: should be done on the server during quilt install
# (requires server support)
print('processing environment %s: slicing data...' % (env))
instance_data = environment.get('instance_data')
for key, val in files.items():
# TODO: debug mode, where we can see which files were skipped
if type(val) == pd.core.frame.DataFrame:
before_len = len(val)
# TODO: pass instance identifier, e.g. instance number N of M
val['.qchash'] = val.apply(lambda x: abs(hash(tuple(x))), axis = 1)
res = exec_yaml_python(instance_data, val, key, '('+key+')')
if res == False:
raise BuildException("error assigning data to instance in environment: %s on file %s" % (
env, key))
print('%s: %s=>%s recs' % (key, before_len, len(qc.data)))
files[key] = qc.data
return files
def access_list(package):
"""
Print list of users who can access a package.
"""
owner, pkg = parse_package(package)
session = _get_session()
lookup_url = "{url}/api/access/{owner}/{pkg}".format(url=get_registry_url(), owner=owner, pkg=pkg)
response = session.get(lookup_url)
data = response.json()
users = data['users']
print('\n'.join(users))
def access_add(package, user):
"""
Add access
"""
owner, pkg = parse_package(package)
session = _get_session()
session.put("%s/api/access/%s/%s/%s" % (get_registry_url(), owner, pkg, user))
def access_remove(package, user):
"""
Remove access
"""
owner, pkg = parse_package(package)
session = _get_session()
session.delete("%s/api/access/%s/%s/%s" % (get_registry_url(), owner, pkg, user))
def delete(package):
"""
Delete a package from the server.
Irreversibly deletes the package along with its history, tags, versions, etc.
"""
owner, pkg = parse_package(package)
answer = input(
"Are you sure you want to delete this package and its entire history? " +
"Type '%s/%s' to confirm: " % (owner, pkg)
)
if answer != '%s/%s' % (owner, pkg):
print("Not deleting.")
return 1
session = _get_session()
session.delete("%s/api/package/%s/%s/" % (get_registry_url(), owner, pkg))
print("Deleted.")
def search(query):
"""
Search for packages
"""
session = _get_session()
response = session.get("%s/api/search/" % get_registry_url(), params=dict(q=query))
packages = response.json()['packages']
for pkg in packages:
print("%(owner)s/%(name)s" % pkg)
def ls(): # pylint:disable=C0103
"""
List all installed Quilt data packages
"""
for pkg_dir in PackageStore.find_store_dirs():
print("%s" % pkg_dir)
packages = PackageStore(pkg_dir).ls_packages()
for idx, (package, tag, pkghash) in enumerate(packages):
print("{0:30} {1:20} {2}".format(package, tag, pkghash))
def inspect(package):
"""
Inspect package details
"""
owner, pkg = parse_package(package)
pkgobj = PackageStore.find_package(owner, pkg)
if pkgobj is None:
raise CommandException("Package {owner}/{pkg} not found.".format(owner=owner, pkg=pkg))
def _print_children(children, prefix, path):
for idx, (name, child) in enumerate(children):
if idx == len(children) - 1:
new_prefix = u"└─"
new_child_prefix = u" "
else:
new_prefix = u"├─"
new_child_prefix = u"│ "
_print_node(child, prefix + new_prefix, prefix + new_child_prefix, name, path)
def _print_node(node, prefix, child_prefix, name, path):
name_prefix = u"─ "
if isinstance(node, GroupNode):
children = list(node.children.items())
if children:
name_prefix = u"┬ "
print(prefix + name_prefix + name)
_print_children(children, child_prefix, path + name)
elif isinstance(node, TableNode):
df = pkgobj.get_obj(node)
assert isinstance(df, pd.DataFrame)
info = "shape %s, type \"%s\"" % (df.shape, df.dtypes)
print(prefix + name_prefix + ": " + info)
elif isinstance(node, FileNode):
print(prefix + name_prefix + name)
else:
assert False, "node=%s type=%s" % (node, type(node))
print(pkgobj.get_path())
_print_children(children=pkgobj.get_contents().children.items(), prefix='', path='')
def rm(package, force=False):
"""
Remove a package (all instances) from the local store.
"""
owner, pkg = parse_package(package)
if not force:
confirmed = input("Remove {0}? (y/n)".format(package))
if confirmed.lower() != 'y':
return
store = PackageStore()
deleted = store.remove_package(owner, pkg)
for obj in deleted:
print("Removed: {0}".format(obj))
| 1 | 15,498 | I seem to recall a UI issue with this... @akarve I think you didn't like this for some reason... | quiltdata-quilt | py |
@@ -84,7 +84,7 @@ class MediaBlockService extends BaseBlockService
/**
* {@inheritdoc}
*/
- public function getDefaultSettings(OptionsResolverInterface $resolver)
+ public function setDefaultSettings(OptionsResolverInterface $resolver)
{
$resolver->setDefaults(array(
'media' => false, | 1 | <?php
/*
* This file is part of the Sonata project.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Block;
use Sonata\AdminBundle\Form\FormMapper;
use Sonata\AdminBundle\Admin\Admin;
use Sonata\AdminBundle\Validator\ErrorElement;
use Sonata\BlockBundle\Block\BlockContextInterface;
use Sonata\BlockBundle\Model\BlockInterface;
use Sonata\BlockBundle\Block\BaseBlockService;
use Sonata\MediaBundle\Model\MediaManagerInterface;
use Sonata\MediaBundle\Model\MediaInterface;
use Symfony\Component\OptionsResolver\OptionsResolver;
use Symfony\Component\OptionsResolver\OptionsResolverInterface;
use Symfony\Component\Templating\EngineInterface;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\Form\Form;
use Symfony\Component\DependencyInjection\ContainerInterface;
/**
* PageExtension
*
* @author Thomas Rabaix <[email protected]>
*/
class MediaBlockService extends BaseBlockService
{
protected $mediaAdmin;
protected $mediaManager;
/**
* @param string $name
* @param \Symfony\Component\Templating\EngineInterface $templating
* @param \Symfony\Component\DependencyInjection\ContainerInterface $container
* @param \Sonata\MediaBundle\Model\MediaManagerInterface $mediaManager
*/
public function __construct($name, EngineInterface $templating, ContainerInterface $container, MediaManagerInterface $mediaManager)
{
parent::__construct($name, $templating);
$this->mediaManager = $mediaManager;
$this->container = $container;
}
/**
* {@inheritdoc}
*/
public function getName()
{
return 'Media';
}
/**
* @return mixed
*/
public function getMediaPool()
{
return $this->getMediaAdmin()->getPool();
}
/**
* @return mixed
*/
public function getMediaAdmin()
{
if (!$this->mediaAdmin) {
$this->mediaAdmin = $this->container->get('sonata.media.admin.media');
}
return $this->mediaAdmin;
}
/**
* {@inheritdoc}
*/
public function getDefaultSettings(OptionsResolverInterface $resolver)
{
$resolver->setDefaults(array(
'media' => false,
'title' => false,
'context' => false,
'mediaId' => false,
'format' => false,
'template' => 'SonataMediaBundle:Block:block_media.html.twig'
));
}
/**
* {@inheritdoc}
*/
public function buildEditForm(FormMapper $formMapper, BlockInterface $block)
{
$contextChoices = $this->getContextChoices();
if (!$block->getSetting('mediaId') instanceof MediaInterface) {
$this->load($block);
}
$formatChoices = $this->getFormatChoices($block->getSetting('mediaId'));
$formMapper->add('settings', 'sonata_type_immutable_array', array(
'keys' => array(
array('title', 'text', array('required' => false)),
array('context', 'choice', array('required' => true, 'choices' => $contextChoices)),
array('format', 'choice', array('required' => count($formatChoices) > 0, 'choices' => $formatChoices)),
array($this->getMediaBuilder($formMapper), null, array()),
)
));
}
/**
* @return array
*/
protected function getContextChoices()
{
$contextChoices = array();
foreach ($this->getMediaPool()->getContexts() as $name => $context) {
$contextChoices[$name] = $name;
}
return $contextChoices;
}
/**
* @param null|\Sonata\MediaBundle\Model\MediaInterface $media
*
* @return array
*/
protected function getFormatChoices(MediaInterface $media = null)
{
$formatChoices = array();
if ($media instanceof MediaInterface) {
$formats = $this->getMediaPool()->getFormatNamesByContext($media->getContext());
foreach ($formats as $code => $format) {
$formatChoices[$code] = $code;
}
}
return $formatChoices;
}
/**
* @param \Sonata\AdminBundle\Form\FormMapper $formMapper
*
* @return \Symfony\Component\Form\FormBuilder
*/
protected function getMediaBuilder(FormMapper $formMapper)
{
// simulate an association ...
$fieldDescription = $this->getMediaAdmin()->getModelManager()->getNewFieldDescriptionInstance($this->mediaAdmin->getClass(), 'media');
$fieldDescription->setAssociationAdmin($this->getMediaAdmin());
$fieldDescription->setAdmin($formMapper->getAdmin());
$fieldDescription->setOption('edit', 'list');
$fieldDescription->setAssociationMapping(array(
'fieldName' => 'media',
'type' => \Doctrine\ORM\Mapping\ClassMetadataInfo::MANY_TO_ONE
));
return $formMapper->create('mediaId', 'sonata_type_model', array(
'sonata_field_description' => $fieldDescription,
'class' => $this->getMediaAdmin()->getClass(),
'model_manager' => $this->getMediaAdmin()->getModelManager()
));
}
/**
* {@inheritdoc}
*/
public function validateBlock(ErrorElement $errorElement, BlockInterface $block)
{
}
/**
* {@inheritdoc}
*/
public function execute(BlockContextInterface $blockContext, Response $response = null)
{
return $this->renderResponse($this->getTemplate(), array(
'media' => $blockContext->getSetting('mediaId'),
'block' => $blockContext->getBlock(),
'settings' => $blockContext->getSettings()
), $response);
}
/**
* {@inheritdoc}
*/
public function load(BlockInterface $block)
{
$media = $block->getSetting('mediaId', null);
if ($media) {
$media = $this->mediaManager->findOneBy(array('id' => $media));
}
$block->setSetting('mediaId', $media);
}
/**
* {@inheritdoc}
*/
public function prePersist(BlockInterface $block)
{
$block->setSetting('mediaId', is_object($block->getSetting('mediaId')) ? $block->getSetting('mediaId')->getId() : null);
}
/**
* {@inheritdoc}
*/
public function preUpdate(BlockInterface $block)
{
$block->setSetting('mediaId', is_object($block->getSetting('mediaId')) ? $block->getSetting('mediaId')->getId() : null);
}
}
| 1 | 6,075 | The `MediaBlockService` commit must be remove | sonata-project-SonataMediaBundle | php |
@@ -239,11 +239,17 @@ rules:
maximum_retention: 365
"""
+
+ def test_number_of_rules(self):
+ """The number of rules should be exactly the same as the length of SUPPORTED_RETENTION_RES_TYPES."""
+ rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
+ self.assertTrue(len(rre.SUPPORTED_RETENTION_RES_TYPES) == len(rules_engine.rule_book.resource_rules_map))
+
+
def test_only_max_normal_delete(self):
"""Test that a bucket's rule can guarantee the maximum_retention if its
action is 'Delete' and the only condition is an age(<= maximum_retention)"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
- self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365) | 1 | # Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the BigqueryRulesEngine."""
import copy
import itertools
import json
import mock
import tempfile
import unittest
import yaml
from datetime import datetime, timedelta
from google.cloud.forseti.common.gcp_type import organization
from google.cloud.forseti.common.gcp_type import project
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
from google.cloud.forseti.scanner.audit import base_rules_engine as bre
from google.cloud.forseti.scanner.audit import errors as audit_errors
from google.cloud.forseti.scanner.audit import retention_rules_engine as rre
from google.cloud.forseti.scanner.audit import rules as scanner_rules
from tests.scanner.test_data import fake_retention_scanner_data as frsd
from tests.unittest_utils import get_datafile_path
from tests.unittest_utils import ForsetiTestCase
import collections
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.scanner.scanners import retention_scanner
def get_rules_engine_with_rule(rule):
"""Create a rule engine based on a yaml file string"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(rule)
f.flush()
rules_engine = rre.RetentionRulesEngine(
rules_file_path=f.name)
rules_engine.build_rule_book()
return rules_engine
def get_expect_violation_item(res_map, bucket_id, rule_name, rule_index):
RuleViolation = namedtuple(
'RuleViolation',
['resource_name', 'resource_type', 'full_name', 'rule_name',
'rule_index', 'violation_type', 'violation_data', 'resource_data'])
lifecycle_str = json.dumps(res_map.get(bucket_id).get_lifecycle_rule())
return RuleViolation(
resource_name=bucket_id,
resource_type=res_map.get(bucket_id).type,
full_name=res_map.get(bucket_id).full_name,
rule_name=rule_name,
rule_index=rule_index,
violation_type=rre.VIOLATION_TYPE,
violation_data=lifecycle_str,
resource_data=res_map.get(bucket_id).data)
class RetentionRulesEngineTest(ForsetiTestCase):
"""Tests for the BigqueryRulesEngine."""
def setUp(self):
"""Set up."""
def test_invalid_rule_with_no_applies_to(self):
"""Test that a rule without applies_to cannot be created"""
yaml_str_no_applies_to="""
rules:
- name: No applies_to
resource:
- type: bucket
resource_ids:
- some-resource-id
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_applies_to)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_lack_of_min_max(self):
"""Test that a rule with neither minimum_retention nor maximum_retention
cannot be created"""
yaml_str_lack_min_max="""
rules:
- name: Lack of min and max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- some-resource-id
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_lack_min_max)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_min_lgr_max(self):
"""Test that a rule whose minimum_retention is larger than
maximum_retention cannot be created"""
yaml_str_min_lgr_max="""
rules:
- name: min larger than max
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- some-resource-id
minimum_retention: 366
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_min_lgr_max)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_duplicate_applies_to(self):
"""Test that a rule with duplicate applies_to cannot be created"""
yaml_str_duplicate_applies_to="""
rules:
- name: Duplicate applies_to
applies_to:
- bucket
- bucket
resource:
- type: bucket
resource_ids:
- some-resource-id
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_duplicate_applies_to)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_resource(self):
"""Test that a rule without resource cannot be created"""
yaml_str_no_resource="""
rules:
- name: No resource
applies_to:
- bucket
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_resource)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_res_type(self):
"""Test that a rule without resource.type cannot be created"""
yaml_str_no_res_type="""
rules:
- name: No resource type
applies_to:
- bucket
resource:
- resource_ids:
- some-resource-id
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_res_type)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_res_id(self):
"""Test that a rule without resource.resource_ids cannot be created"""
yaml_str_no_res_id="""
rules:
- name: No resource ids
applies_to:
- bucket
resource:
- type: bucket
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_res_id)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
yaml_str_only_max_retention = """
rules:
- name: only max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
maximum_retention: 365
"""
def test_only_max_normal_delete(self):
"""Test that a bucket's rule can guarantee the maximum_retention if its
action is 'Delete' and the only condition is an age(<= maximum_retention)"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_normal_nodelete(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its action is not 'Delete'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=365)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_larger_delete(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its age condition is larger than maximum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=366)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_normal_del_anynormal_del(self):
"""Test that a bucket's rules can guarantee the maximum_retention
if they include a rule whose action is 'Delete' and the only condition
is an age(<= maximum_retention)"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=365, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_lgr_del_anynormal_del(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its age comes along with any other conditions"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=366)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=365, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_lgr_del_normal_else(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its action is not 'Delete'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=366)
data_creater.AddLifecycleDict(action="SetStorageClass", age=365)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_normal_del_any_del(self):
"""Test that a bucket could have more than one rules. If one of them can
guarantee the maximum_retention, there is no violation."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
data_creater.AddLifecycleDict(action="Delete", is_live=False)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_normal_del_lgr_del(self):
"""Test that a bucket could have more than one rules. If one of them can
guarantee the maximum_retention, there is no violation."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
data_creater.AddLifecycleDict(action="Delete", age=366)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_no_condition(self):
"""Test that a rule with maximum_retention produces a violation,
if a bucket has no condition at all."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_anynormal_del(self):
"""Test that a rule with maximum_retention produces a violation.
If a condition whose age comes along with any other conditions, it cannot
guarantee the maximum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365, num_newer_versions=5)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
yaml_str_only_min_retention = """
rules:
- name: only min retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
minimum_retention: 90
"""
def test_only_min_normal_del(self):
"""Test that a rule with minimum_retention does not produce violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_normal_else(self):
"""Test that a rule whose action is not 'Delete' should not break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_less_else(self):
"""Test that a rule whose action is not 'Delete' cannot break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_no_condition(self):
"""Test that a rule with minimum_retention does not produce violations.
The minimum_retention is guaranteed when there is no condition at all"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessver1_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its number of newer versions
is larger than 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89, num_newer_versions=1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessver0_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its number of newer versions
is equal to 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89, num_newer_versions=0)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_ver1_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its number of newer versions
is larger than 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", num_newer_versions=1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_ver0_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its number of newer versions
is equal to 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", num_newer_versions=0)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_ver0_else(self):
"""Test that a rule with minimum_retention does not produce violations.
An action that is not 'Delete' cannot break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", num_newer_versions=0)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessold_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its created before time
is earlier than today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=89, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessnew_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its created before time
is later than today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=89)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=89, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_normalnew_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its age is larger
than or equal to minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=89)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=90, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_less_del_normal_del(self):
"""Test that a rule with minimum_retention produces violations.
A rule that does not produce violations cannot prevent another rule from
producing violations"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_less_else_normal_del(self):
"""Test that a rule with minimum_retention does not produce violations.
An action that is not 'Delete' cannot break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=89)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_less_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule breaks minimum_retention, if its age is smaller than
minimum_retention and its action is 'Delete'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_old_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its created before time
is earlier than the date that is today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_new_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its created before time
is later than today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=89)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
yaml_str_both_min_and_max_retention = """
rules:
- name: both min and max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
minimum_retention: 90
maximum_retention: 365
"""
def test_both_min_max_no_condition(self):
"""Test that a rule with both minimum_retention and maximum_retention
produces violations. A bucket's rule break it, if the bucket breakes the
maximum_retention part."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'both min and max retention')
self.assertEqual(got_violations, expected_violations)
def test_both_min_max_normal_del_any_del(self):
"""Test that a rule with both minimum_retention and maximum_retention
produces violations. A bucket's rule break it, if the bucket breakes the
minimum_retention part."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=100)
data_creater.AddLifecycleDict(action="Delete", is_live=True)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'both min and max retention')
self.assertEqual(got_violations, expected_violations)
def test_both_min_max_normal_del(self):
"""Test that a rule with both minimum_retention and maximum_retention
does not produce violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=100)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_both_min_max_3_conditions(self):
"""Test that a rule with both minimum_retention and maximum_retention
does not produce violations when there are more than one conditions."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=100)
data_creater.AddLifecycleDict(action="SetStorageClass", age=89)
data_creater.AddLifecycleDict(action="Delete", age=500)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
yaml_str_bucket_retention_on_correct_project = """
rules:
- name: bucket retention on correct project
applies_to:
- bucket
resource:
- type: project
resource_ids:
- def-project-1
minimum_retention: 90
"""
def test_bucket_on_correct_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project' does not
produce violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_correct_project)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_bucket_on_correct_project_has_vio(self):
"""Test that a rule with a resource.type equal to 'project' produces violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_correct_project)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'bucket retention on correct project')
self.assertEqual(got_violations, expected_violations)
yaml_str_bucket_retention_on_wrong_project = """
rules:
- name: bucket retention on wrong project
applies_to:
- bucket
resource:
- type: project
resource_ids:
- def-project-wrong
minimum_retention: 90
"""
def test_bucket_on_incorrect_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project' does not
produce violations because the project ID does not match"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_wrong_project)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
yaml_str_bucket_retention_on_multi_projects = """
rules:
- name: bucket retention on multi projects
applies_to:
- bucket
resource:
- type: project
resource_ids:
- def-project-1
- def-project-2
minimum_retention: 90
"""
def test_bucket_on_multi_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project' does not
produce violations when the resource_ids includes more than one projects"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_multi_projects)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket_1', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
data_creater = frsd.FakeBucketDataCreater('fake_bucket_2', frsd.PROJECT2)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_bucket_on_multi_project_has_vio(self):
"""Test that a rule with a resource.type equal to 'project' produces
violations when the resource_ids includes more than one projects"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_multi_projects)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket_1', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'bucket retention on multi projects')
self.assertEqual(got_violations, expected_violations)
data_creater = frsd.FakeBucketDataCreater('fake_bucket_2', frsd.PROJECT2)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'bucket retention on multi projects')
self.assertEqual(got_violations, expected_violations)
if __name__ == '__main__':
unittest.main()
| 1 | 32,976 | Use a literal constant here, and use `assertEqual`, e.g. `self.assertEqual(2, len(...))` You should also check that the number of rules is correct, i.e. 1 rule for buckets, 0 rules for tables. | forseti-security-forseti-security | py |
@@ -15,8 +15,8 @@
* will expire before some files can be uploaded.
*
* The long-term solution to this problem is to change the upload pipeline so that files
- * can be sent to the next step individually. That requires a breakig change, so it is
- * planned for Uppy v2.
+ * can be sent to the next step individually. That requires a breaking change, so it is
+ * planned for Uppy v4.
*
* In the mean time, this plugin is stuck with a hackier approach: the necessary parts
* of the XHRUpload implementation were copied into this plugin, as the MiniXHRUpload | 1 | /**
* This plugin is currently a A Big Hack™! The core reason for that is how this plugin
* interacts with Uppy's current pipeline design. The pipeline can handle files in steps,
* including preprocessing, uploading, and postprocessing steps. This plugin initially
* was designed to do its work in a preprocessing step, and let XHRUpload deal with the
* actual file upload as an uploading step. However, Uppy runs steps on all files at once,
* sequentially: first, all files go through a preprocessing step, then, once they are all
* done, they go through the uploading step.
*
* For S3, this causes severely broken behaviour when users upload many files. The
* preprocessing step will request S3 upload URLs that are valid for a short time only,
* but it has to do this for _all_ files, which can take a long time if there are hundreds
* or even thousands of files. By the time the uploader step starts, the first URLs may
* already have expired. If not, the uploading might take such a long time that later URLs
* will expire before some files can be uploaded.
*
* The long-term solution to this problem is to change the upload pipeline so that files
* can be sent to the next step individually. That requires a breakig change, so it is
* planned for Uppy v2.
*
* In the mean time, this plugin is stuck with a hackier approach: the necessary parts
* of the XHRUpload implementation were copied into this plugin, as the MiniXHRUpload
* class, and this plugin calls into it immediately once it receives an upload URL.
* This isn't as nicely modular as we'd like and requires us to maintain two copies of
* the XHRUpload code, but at least it's not horrifically broken :)
*/
// If global `URL` constructor is available, use it
const URL_ = typeof URL === 'function' ? URL : require('url-parse')
const { Plugin } = require('@uppy/core')
const Translator = require('@uppy/utils/lib/Translator')
const RateLimitedQueue = require('@uppy/utils/lib/RateLimitedQueue')
const settle = require('@uppy/utils/lib/settle')
const hasProperty = require('@uppy/utils/lib/hasProperty')
const { RequestClient } = require('@uppy/companion-client')
const qsStringify = require('qs-stringify')
const MiniXHRUpload = require('./MiniXHRUpload')
const isXml = require('./isXml')
function resolveUrl (origin, link) {
return origin
? new URL_(link, origin).toString()
: new URL_(link).toString()
}
/**
* Get the contents of a named tag in an XML source string.
*
* @param {string} source - The XML source string.
* @param {string} tagName - The name of the tag.
* @returns {string} The contents of the tag, or the empty string if the tag does not exist.
*/
function getXmlValue (source, tagName) {
const start = source.indexOf(`<${tagName}>`)
const end = source.indexOf(`</${tagName}>`, start)
return start !== -1 && end !== -1
? source.slice(start + tagName.length + 2, end)
: ''
}
function assertServerError (res) {
if (res && res.error) {
const error = new Error(res.message)
Object.assign(error, res.error)
throw error
}
return res
}
// warning deduplication flag: see `getResponseData()` XHRUpload option definition
let warnedSuccessActionStatus = false
module.exports = class AwsS3 extends Plugin {
static VERSION = require('../package.json').version
constructor (uppy, opts) {
super(uppy, opts)
this.type = 'uploader'
this.id = this.opts.id || 'AwsS3'
this.title = 'AWS S3'
this.defaultLocale = {
strings: {
timedOut: 'Upload stalled for %{seconds} seconds, aborting.',
},
}
const defaultOptions = {
timeout: 30 * 1000,
limit: 0,
metaFields: [], // have to opt in
getUploadParameters: this.getUploadParameters.bind(this),
}
this.opts = { ...defaultOptions, ...opts }
this.i18nInit()
this.client = new RequestClient(uppy, opts)
this.handleUpload = this.handleUpload.bind(this)
this.requests = new RateLimitedQueue(this.opts.limit)
}
setOptions (newOpts) {
super.setOptions(newOpts)
this.i18nInit()
}
i18nInit () {
this.translator = new Translator([this.defaultLocale, this.uppy.locale, this.opts.locale])
this.i18n = this.translator.translate.bind(this.translator)
this.setPluginState() // so that UI re-renders and we see the updated locale
}
getUploadParameters (file) {
if (!this.opts.companionUrl) {
throw new Error('Expected a `companionUrl` option containing a Companion address.')
}
const filename = file.meta.name
const type = file.meta.type
const metadata = {}
this.opts.metaFields.forEach((key) => {
if (file.meta[key] != null) {
metadata[key] = file.meta[key].toString()
}
})
const query = qsStringify({ filename, type, metadata })
return this.client.get(`s3/params?${query}`)
.then(assertServerError)
}
validateParameters (file, params) {
const valid = typeof params === 'object' && params
&& typeof params.url === 'string'
&& (typeof params.fields === 'object' || params.fields == null)
if (!valid) {
const err = new TypeError(`AwsS3: got incorrect result from 'getUploadParameters()' for file '${file.name}', expected an object '{ url, method, fields, headers }' but got '${JSON.stringify(params)}' instead.\nSee https://uppy.io/docs/aws-s3/#getUploadParameters-file for more on the expected format.`)
console.error(err)
throw err
}
const methodIsValid = params.method == null || /^(put|post)$/i.test(params.method)
if (!methodIsValid) {
const err = new TypeError(`AwsS3: got incorrect method from 'getUploadParameters()' for file '${file.name}', expected 'put' or 'post' but got '${params.method}' instead.\nSee https://uppy.io/docs/aws-s3/#getUploadParameters-file for more on the expected format.`)
console.error(err)
throw err
}
}
handleUpload (fileIDs) {
/**
* keep track of `getUploadParameters()` responses
* so we can cancel the calls individually using just a file ID
*
* @type {object.<string, Promise>}
*/
const paramsPromises = Object.create(null)
function onremove (file) {
const { id } = file
if (hasProperty(paramsPromises, id)) {
paramsPromises[id].abort()
}
}
this.uppy.on('file-removed', onremove)
fileIDs.forEach((id) => {
const file = this.uppy.getFile(id)
this.uppy.emit('upload-started', file)
})
const getUploadParameters = this.requests.wrapPromiseFunction((file) => {
return this.opts.getUploadParameters(file)
})
const numberOfFiles = fileIDs.length
return settle(fileIDs.map((id, index) => {
paramsPromises[id] = getUploadParameters(this.uppy.getFile(id))
return paramsPromises[id].then((params) => {
delete paramsPromises[id]
const file = this.uppy.getFile(id)
this.validateParameters(file, params)
const {
method = 'post',
url,
fields,
headers,
} = params
const xhrOpts = {
method,
formData: method.toLowerCase() === 'post',
endpoint: url,
metaFields: fields ? Object.keys(fields) : [],
}
if (headers) {
xhrOpts.headers = headers
}
this.uppy.setFileState(file.id, {
meta: { ...file.meta, ...fields },
xhrUpload: xhrOpts,
})
return this._uploader.uploadFile(file.id, index, numberOfFiles)
}).catch((error) => {
delete paramsPromises[id]
const file = this.uppy.getFile(id)
this.uppy.emit('upload-error', file, error)
})
})).then((settled) => {
// cleanup.
this.uppy.off('file-removed', onremove)
return settled
})
}
install () {
const uppy = this.uppy
this.uppy.addUploader(this.handleUpload)
// Get the response data from a successful XMLHttpRequest instance.
// `content` is the S3 response as a string.
// `xhr` is the XMLHttpRequest instance.
function defaultGetResponseData (content, xhr) {
const opts = this
// If no response, we've hopefully done a PUT request to the file
// in the bucket on its full URL.
if (!isXml(content, xhr)) {
if (opts.method.toUpperCase() === 'POST') {
if (!warnedSuccessActionStatus) {
uppy.log('[AwsS3] No response data found, make sure to set the success_action_status AWS SDK option to 201. See https://uppy.io/docs/aws-s3/#POST-Uploads', 'warning')
warnedSuccessActionStatus = true
}
// The responseURL won't contain the object key. Give up.
return { location: null }
}
// responseURL is not available in older browsers.
if (!xhr.responseURL) {
return { location: null }
}
// Trim the query string because it's going to be a bunch of presign
// parameters for a PUT request—doing a GET request with those will
// always result in an error
return { location: xhr.responseURL.replace(/\?.*$/, '') }
}
return {
// Some S3 alternatives do not reply with an absolute URL.
// Eg DigitalOcean Spaces uses /$bucketName/xyz
location: resolveUrl(xhr.responseURL, getXmlValue(content, 'Location')),
bucket: getXmlValue(content, 'Bucket'),
key: getXmlValue(content, 'Key'),
etag: getXmlValue(content, 'ETag'),
}
}
// Get the error data from a failed XMLHttpRequest instance.
// `content` is the S3 response as a string.
// `xhr` is the XMLHttpRequest instance.
function defaultGetResponseError (content, xhr) {
// If no response, we don't have a specific error message, use the default.
if (!isXml(content, xhr)) {
return
}
const error = getXmlValue(content, 'Message')
return new Error(error)
}
const xhrOptions = {
fieldName: 'file',
responseUrlFieldName: 'location',
timeout: this.opts.timeout,
// Share the rate limiting queue with XHRUpload.
__queue: this.requests,
responseType: 'text',
getResponseData: this.opts.getResponseData || defaultGetResponseData,
getResponseError: defaultGetResponseError,
}
// Only for MiniXHRUpload, remove once we can depend on XHRUpload directly again
xhrOptions.i18n = this.i18n
// Revert to `this.uppy.use(XHRUpload)` once the big comment block at the top of
// this file is solved
this._uploader = new MiniXHRUpload(this.uppy, xhrOptions)
}
uninstall () {
this.uppy.removeUploader(this.handleUpload)
}
}
| 1 | 14,195 | @Murderlon Should it just say `some future version`? | transloadit-uppy | js |
@@ -122,12 +122,14 @@ func (c *Controller) syncCSPC(cspcGot *apis.CStorPoolCluster) error {
return nil
}
- cspcGot, err := c.populateVersion(cspcGot)
+ cspcObj := cspcGot
+ cspcObj, err := c.populateVersion(cspcObj)
if err != nil {
klog.Errorf("failed to add versionDetails to CSPC %s:%s", cspcGot.Name, err.Error())
return nil
}
+ cspcGot = cspcObj
pc, err := c.NewPoolConfig(cspcGot, openebsNameSpace)
if err != nil {
message := fmt.Sprintf("Could not sync CSPC : failed to get pool config: {%s}", err.Error()) | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"fmt"
bdc "github.com/openebs/maya/pkg/blockdeviceclaim/v1alpha1"
apiscspc "github.com/openebs/maya/pkg/cstor/poolcluster/v1alpha1"
"github.com/openebs/maya/pkg/version"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"time"
apiscsp "github.com/openebs/maya/pkg/cstor/poolinstance/v1alpha3"
nodeselect "github.com/openebs/maya/pkg/algorithm/nodeselect/v1alpha2"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
openebs "github.com/openebs/maya/pkg/client/generated/clientset/versioned"
env "github.com/openebs/maya/pkg/env/v1alpha1"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
k8serror "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
)
type clientSet struct {
oecs openebs.Interface
}
// PoolConfig embeds nodeselect config from algorithm package and Controller object.
type PoolConfig struct {
AlgorithmConfig *nodeselect.Config
Controller *Controller
}
// NewPoolConfig returns a poolconfig object
func (c *Controller) NewPoolConfig(cspc *apis.CStorPoolCluster, namespace string) (*PoolConfig, error) {
pc, err := nodeselect.
NewBuilder().
WithCSPC(cspc).
WithNameSpace(namespace).
Build()
if err != nil {
return nil, errors.Wrap(err, "could not get algorithm config for provisioning")
}
return &PoolConfig{AlgorithmConfig: pc, Controller: c}, nil
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the cspcPoolUpdated resource
// with the current status of the resource.
func (c *Controller) syncHandler(key string) error {
startTime := time.Now()
klog.V(4).Infof("Started syncing cstorpoolcluster %q (%v)", key, startTime)
defer func() {
klog.V(4).Infof("Finished syncing cstorpoolcluster %q (%v)", key, time.Since(startTime))
}()
// Convert the namespace/name string into a distinct namespace and name
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
// Get the cspc resource with this namespace/name
cspc, err := c.cspcLister.CStorPoolClusters(ns).Get(name)
if k8serror.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("cspc '%s' has been deleted", key))
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
cspcGot := cspc.DeepCopy()
err = c.syncCSPC(cspcGot)
return err
}
// enqueueCSPC takes a CSPC resource and converts it into a namespace/name
// string which is then put onto the work queue. This method should *not* be
// passed resources of any type other than CSPC.
func (c *Controller) enqueueCSPC(cspc interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(cspc); err != nil {
runtime.HandleError(err)
return
}
c.workqueue.Add(key)
}
// synSpc is the function which tries to converge to a desired state for the cspc.
func (c *Controller) syncCSPC(cspcGot *apis.CStorPoolCluster) error {
openebsNameSpace := env.Get(env.OpenEBSNamespace)
if openebsNameSpace == "" {
message := fmt.Sprint("Could not sync CSPC: got empty namespace for openebs from env variable")
c.recorder.Event(cspcGot, corev1.EventTypeWarning, "Getting Namespace", message)
klog.Errorf("Could not sync CSPC {%s}: got empty namespace for openebs from env variable", cspcGot.Name)
return nil
}
cspcGot, err := c.populateVersion(cspcGot)
if err != nil {
klog.Errorf("failed to add versionDetails to CSPC %s:%s", cspcGot.Name, err.Error())
return nil
}
pc, err := c.NewPoolConfig(cspcGot, openebsNameSpace)
if err != nil {
message := fmt.Sprintf("Could not sync CSPC : failed to get pool config: {%s}", err.Error())
c.recorder.Event(cspcGot, corev1.EventTypeWarning, "Creating Pool Config", message)
klog.Errorf("Could not sync CSPC {%s}: failed to get pool config: {%s}", cspcGot.Name, err.Error())
return nil
}
// If CSPC is deleted -- handle the deletion.
if !cspcGot.DeletionTimestamp.IsZero() {
err = pc.handleCSPCDeletion()
if err != nil {
klog.Errorf("Failed to sync CSPC for deletion:%s", err.Error())
}
return nil
}
cspcBuilderObj, err := apiscspc.BuilderForAPIObject(cspcGot).Build()
if err != nil {
klog.Errorf("Failed to build CSPC api object %s", cspcGot.Name)
return nil
}
cspc, err := cspcBuilderObj.AddFinalizer(apiscspc.CSPCFinalizer)
if err != nil {
klog.Errorf("Failed to add finalizer on CSPC %s:%s", cspcGot.Name, err.Error())
return nil
}
pendingPoolCount, err := pc.AlgorithmConfig.GetPendingPoolCount()
if err != nil {
message := fmt.Sprintf("Could not sync CSPC : failed to get pending pool count: {%s}", err.Error())
c.recorder.Event(cspc, corev1.EventTypeWarning, "Getting Pending Pool(s) ", message)
klog.Errorf("Could not sync CSPC {%s}: failed to get pending pool count:{%s}", cspc.Name, err.Error())
return nil
}
if pendingPoolCount < 0 {
err = pc.DownScalePool()
if err != nil {
message := fmt.Sprintf("Could not downscale pool: %s", err.Error())
c.recorder.Event(cspc, corev1.EventTypeWarning, "PoolDownScale", message)
klog.Errorf("Could not downscale pool for CSPC %s: %s", cspc.Name, err.Error())
return nil
}
}
if pendingPoolCount > 0 {
err = pc.create(pendingPoolCount, cspc)
if err != nil {
message := fmt.Sprintf("Could not create pool(s) for CSPC: %s", err.Error())
c.recorder.Event(cspc, corev1.EventTypeWarning, "Pool Create", message)
klog.Errorf("Could not create pool(s) for CSPC {%s}:{%s}", cspc.Name, err.Error())
return nil
}
}
cspList, err := pc.AlgorithmConfig.GetCSPIWithoutDeployment()
if err != nil {
// Note: CSP for which pool deployment does not exists are known as orphaned.
message := fmt.Sprintf("Error in getting orphaned CSP :{%s}", err.Error())
c.recorder.Event(cspc, corev1.EventTypeWarning, "Pool Create", message)
klog.Errorf("Error in getting orphaned CSP for CSPC {%s}:{%s}", cspc.Name, err.Error())
return nil
}
if len(cspList) > 0 {
pc.createDeployForCSPList(cspList)
}
if pendingPoolCount == 0 {
klog.V(2).Infof("Handling pool operations for CSPC %s if any", cspc.Name)
pc.handleOperations()
}
return nil
}
// create is a wrapper function that calls the actual function to create pool as many time
// as the number of pools need to be created.
func (pc *PoolConfig) create(pendingPoolCount int, cspc *apis.CStorPoolCluster) error {
newSpcLease := &Lease{cspc, CSPCLeaseKey, pc.Controller.clientset, pc.Controller.kubeclientset}
err := newSpcLease.Hold()
if err != nil {
return errors.Wrapf(err, "Could not acquire lease on cspc object")
}
klog.V(4).Infof("Lease acquired successfully on cstorpoolcluster %s ", cspc.Name)
for poolCount := 1; poolCount <= pendingPoolCount; poolCount++ {
err = pc.CreateStoragePool()
if err != nil {
message := fmt.Sprintf("Pool provisioning failed for %d/%d ", poolCount, pendingPoolCount)
pc.Controller.recorder.Event(cspc, corev1.EventTypeWarning, "Create", message)
runtime.HandleError(errors.Wrapf(err, "Pool provisioning failed for %d/%d for cstorpoolcluster %s", poolCount, pendingPoolCount, cspc.Name))
} else {
message := fmt.Sprintf("Pool Provisioned %d/%d ", poolCount, pendingPoolCount)
pc.Controller.recorder.Event(cspc, corev1.EventTypeNormal, "Create", message)
klog.Infof("Pool provisioned successfully %d/%d for cstorpoolcluster %s", poolCount, pendingPoolCount, cspc.Name)
}
}
return nil
}
func (pc *PoolConfig) createDeployForCSPList(cspList []apis.CStorPoolInstance) {
for _, cspObj := range cspList {
cspObj := cspObj
err := pc.createDeployForCSP(&cspObj)
if err != nil {
message := fmt.Sprintf("Failed to create pool deployment for CSP %s: %s", cspObj.Name, err.Error())
pc.Controller.recorder.Event(pc.AlgorithmConfig.CSPC, corev1.EventTypeWarning, "PoolDeploymentCreate", message)
runtime.HandleError(errors.Errorf("Failed to create pool deployment for CSP %s: %s", cspObj.Name, err.Error()))
}
}
}
func (pc *PoolConfig) createDeployForCSP(csp *apis.CStorPoolInstance) error {
deployObj, err := pc.GetPoolDeploySpec(csp)
if err != nil {
return errors.Wrapf(err, "could not get deployment spec for csp {%s}", csp.Name)
}
err = pc.createPoolDeployment(deployObj)
if err != nil {
return errors.Wrapf(err, "could not create deployment for csp {%s}", csp.Name)
}
return nil
}
// handleCSPCDeletion handles deletion of a CSPC resource by deleting
// the associated CSP resource to it, removing the CSPC finalizer
// on BDC(s) used and then removing the CSPC finalizer on CSPC resource
// itself.
// It is necessary that CSPC resource has the CSPC finalizer on it in order to
// execute the handler.
func (pc *PoolConfig) handleCSPCDeletion() error {
err := pc.deleteAssociatedCSPI()
if err != nil {
return errors.Wrapf(err, "failed to handle CSPC deletion")
}
cspcBuilderObj, err := apiscspc.BuilderForAPIObject(pc.AlgorithmConfig.CSPC).Build()
if err != nil {
klog.Errorf("Failed to build CSPC api object %s:%s", pc.AlgorithmConfig.CSPC.Name, err.Error())
return nil
}
if cspcBuilderObj.HasFinalizer(apiscspc.CSPCFinalizer) {
err := pc.removeCSPCFinalizer()
if err != nil {
return errors.Wrapf(err, "failed to handle CSPC %s deletion", pc.AlgorithmConfig.CSPC.Name)
}
}
return nil
}
// deleteAssociatedCSPI deletes the CSPI resource(s) belonging to the given CSPC resource.
// If no CSPI resource exists for the CSPC, then a levelled info log is logged and function
// returns.
func (pc *PoolConfig) deleteAssociatedCSPI() error {
err := apiscsp.NewKubeClient().WithNamespace(pc.AlgorithmConfig.Namespace).DeleteCollection(
metav1.ListOptions{
LabelSelector: string(apis.CStorPoolClusterCPK) + "=" + pc.AlgorithmConfig.CSPC.Name,
},
&metav1.DeleteOptions{},
)
if k8serror.IsNotFound(err) {
klog.V(2).Infof("Associated CSPI(s) of CSPC %s is already deleted:%s", pc.AlgorithmConfig.CSPC.Name, err.Error())
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete associated CSPI(s):%s", err.Error())
}
klog.Infof("Associated CSPI(s) of CSPC %s deleted successfully ", pc.AlgorithmConfig.CSPC.Name)
return nil
}
// removeSPCFinalizer removes CSPC finalizers on associated
// BDC resources and CSPC object itself.
func (pc *PoolConfig) removeCSPCFinalizer() error {
cspList, err := apiscsp.NewKubeClient().List(metav1.ListOptions{
LabelSelector: string(apis.StoragePoolClaimCPK) + "=" + pc.AlgorithmConfig.CSPC.Name,
})
if err != nil {
return errors.Wrap(err, "failed to remove CSPC finalizer on associated resources")
}
if len(cspList.Items) > 0 {
return errors.Wrap(err, "failed to remove CSPC finalizer on associated resources as "+
"CSPI(s) still exists for CSPC")
}
err = pc.removeSPCFinalizerOnAssociatedBDC()
if err != nil {
return errors.Wrap(err, "failed to remove CSPC finalizer on associated resources")
}
cspcBuilderObj, err := apiscspc.BuilderForAPIObject(pc.AlgorithmConfig.CSPC).Build()
if err != nil {
klog.Errorf("Failed to build CSPC api object %s", pc.AlgorithmConfig.CSPC.Name)
return nil
}
err = cspcBuilderObj.RemoveFinalizer(apiscspc.CSPCFinalizer)
if err != nil {
return errors.Wrap(err, "failed to remove CSPC finalizer on associated resources")
}
return nil
}
// removeSPCFinalizerOnAssociatedBDC removes CSPC finalizer on associated BDC resource(s)
func (pc *PoolConfig) removeSPCFinalizerOnAssociatedBDC() error {
bdcList, err := bdc.NewKubeClient().WithNamespace(pc.AlgorithmConfig.Namespace).List(
metav1.ListOptions{
LabelSelector: string(apis.CStorPoolClusterCPK) + "=" + pc.AlgorithmConfig.CSPC.Name,
})
if err != nil {
return errors.Wrapf(err, "failed to remove CSPC finalizer on BDC resources")
}
for _, bdcObj := range bdcList.Items {
bdcObj := bdcObj
err := bdc.BuilderForAPIObject(&bdcObj).BDC.RemoveFinalizer(apiscspc.CSPCFinalizer)
if err != nil {
return errors.Wrapf(err, "failed to remove CSPC finalizer on BDC %s", bdcObj.Name)
}
}
return nil
}
// populateVersion assigns VersionDetails for old cspc object and newly created
// cspc
func (c *Controller) populateVersion(cspc *apis.CStorPoolCluster) (*apis.CStorPoolCluster, error) {
if cspc.VersionDetails.Status.Current == "" {
var err error
var v string
var obj *apis.CStorPoolCluster
v, err = c.EstimateCSPCVersion(cspc)
if err != nil {
return nil, err
}
cspc.VersionDetails.Status.Current = v
// For newly created spc Desired field will also be empty.
cspc.VersionDetails.Desired = v
cspc.VersionDetails.Status.DependentsUpgraded = true
obj, err = c.clientset.OpenebsV1alpha1().
CStorPoolClusters(env.Get(env.OpenEBSNamespace)).
Update(cspc)
if err != nil {
return nil, errors.Wrapf(
err,
"failed to update spc %s while adding versiondetails",
cspc.Name,
)
}
klog.Infof("Version %s added on spc %s", v, cspc.Name)
return obj, nil
}
return cspc, nil
}
// EstimateCSPCVersion returns the cspi version if any cspi is present for the cspc or
// returns the maya version as the new cspi created will be of maya version
func (c *Controller) EstimateCSPCVersion(cspc *apis.CStorPoolCluster) (string, error) {
cspiList, err := c.clientset.OpenebsV1alpha1().
CStorPoolInstances(env.Get(env.OpenEBSNamespace)).
List(
metav1.ListOptions{
LabelSelector: string(apis.CStorPoolClusterCPK) + "=" + cspc.Name,
})
if err != nil {
return "", errors.Wrapf(
err,
"failed to get the cstorpool instance list related to cspc : %s",
cspc.Name,
)
}
if len(cspiList.Items) == 0 {
return version.Current(), nil
}
return cspiList.Items[0].Labels[string(apis.OpenEBSVersionKey)], nil
}
| 1 | 17,521 | change looks good.. but, better to make populateVersion to return same object in the case of error.. that avoids lot of complex logic and probable issues | openebs-maya | go |
@@ -1358,6 +1358,10 @@ func (exp *Service) GetBlockOrActionByHash(hashStr string) (explorer.GetBlkOrAct
return explorer.GetBlkOrActResponse{Execution: &exe}, nil
}
+ if exe, err := exp.getAddressDetails(hashStr); err == nil {
+ return explorer.GetBlkOrActResponse{Address: &exe}, nil
+ }
+
return explorer.GetBlkOrActResponse{}, nil
}
| 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package explorer
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
peerstore "github.com/libp2p/go-libp2p-peerstore"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol/multichain/mainchain"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/consensus"
"github.com/iotexproject/iotex-core/dispatcher"
"github.com/iotexproject/iotex-core/explorer/idl/explorer"
"github.com/iotexproject/iotex-core/indexservice"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/log"
iproto "github.com/iotexproject/iotex-core/proto"
)
var (
// ErrInternalServer indicates the internal server error
ErrInternalServer = errors.New("internal server error")
// ErrTransfer indicates the error of transfer
ErrTransfer = errors.New("invalid transfer")
// ErrVote indicates the error of vote
ErrVote = errors.New("invalid vote")
// ErrExecution indicates the error of execution
ErrExecution = errors.New("invalid execution")
// ErrReceipt indicates the error of receipt
ErrReceipt = errors.New("invalid receipt")
// ErrAction indicates the error of action
ErrAction = errors.New("invalid action")
)
var (
requestMtc = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "iotex_explorer_request",
Help: "IoTeX Explorer request counter.",
},
[]string{"method", "succeed"},
)
)
func init() {
prometheus.MustRegister(requestMtc)
}
type (
// BroadcastOutbound sends a broadcast message to the whole network
BroadcastOutbound func(ctx context.Context, chainID uint32, msg proto.Message) error
// Neighbors returns the neighbors' addresses
Neighbors func(context.Context) ([]peerstore.PeerInfo, error)
// NetworkInfo returns the self network information
NetworkInfo func() peerstore.PeerInfo
)
// Service provide api for user to query blockchain data
type Service struct {
bc blockchain.Blockchain
c consensus.Consensus
dp dispatcher.Dispatcher
ap actpool.ActPool
gs GasStation
broadcastHandler BroadcastOutbound
neighborsHandler Neighbors
networkInfoHandler NetworkInfo
cfg config.Explorer
idx *indexservice.Server
// TODO: the way to make explorer to access the data model managed by main-chain protocol is hack. We need to
// refactor the code later
mainChain *mainchain.Protocol
}
// SetMainChainProtocol sets the main-chain side multi-chain protocol
func (exp *Service) SetMainChainProtocol(mainChain *mainchain.Protocol) { exp.mainChain = mainChain }
// GetBlockchainHeight returns the current blockchain tip height
func (exp *Service) GetBlockchainHeight() (int64, error) {
tip := exp.bc.TipHeight()
return int64(tip), nil
}
// GetAddressBalance returns the balance of an address
func (exp *Service) GetAddressBalance(address string) (string, error) {
state, err := exp.bc.StateByAddr(address)
if err != nil {
return "", err
}
return state.Balance.String(), nil
}
// GetAddressDetails returns the properties of an address
func (exp *Service) GetAddressDetails(address string) (explorer.AddressDetails, error) {
state, err := exp.bc.StateByAddr(address)
if err != nil {
return explorer.AddressDetails{}, err
}
pendingNonce, err := exp.ap.GetPendingNonce(address)
if err != nil {
return explorer.AddressDetails{}, err
}
details := explorer.AddressDetails{
Address: address,
TotalBalance: state.Balance.String(),
Nonce: int64(state.Nonce),
PendingNonce: int64(pendingNonce),
IsCandidate: state.IsCandidate,
}
return details, nil
}
// GetLastTransfersByRange returns transfers in [-(offset+limit-1), -offset] from block
// with height startBlockHeight
func (exp *Service) GetLastTransfersByRange(startBlockHeight int64, offset int64, limit int64, showCoinBase bool) ([]explorer.Transfer, error) {
var res []explorer.Transfer
transferCount := int64(0)
for height := startBlockHeight; height >= 0; height-- {
var blkID string
hash, err := exp.bc.GetHashByHeight(uint64(height))
if err != nil {
return []explorer.Transfer{}, err
}
blkID = hex.EncodeToString(hash[:])
blk, err := exp.bc.GetBlockByHeight(uint64(height))
if err != nil {
return []explorer.Transfer{}, err
}
selps := make([]action.SealedEnvelope, 0)
for _, selp := range blk.Actions {
act := selp.Action()
if _, ok := act.(*action.Transfer); ok {
selps = append(selps, selp)
}
}
for i := len(selps) - 1; i >= 0; i-- {
transferCount++
if transferCount <= offset {
continue
}
if int64(len(res)) >= limit {
return res, nil
}
explorerTransfer, err := convertTsfToExplorerTsf(selps[i], false)
if err != nil {
return []explorer.Transfer{}, errors.Wrapf(err,
"failed to convert transfer %v to explorer's JSON transfer", selps[i])
}
explorerTransfer.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
explorerTransfer.BlockID = blkID
res = append(res, explorerTransfer)
}
}
return res, nil
}
// GetTransferByID returns transfer by transfer id
func (exp *Service) GetTransferByID(transferID string) (explorer.Transfer, error) {
bytes, err := hex.DecodeString(transferID)
if err != nil {
return explorer.Transfer{}, err
}
var transferHash hash.Hash256
copy(transferHash[:], bytes)
return getTransfer(exp.bc, exp.ap, transferHash, exp.idx, exp.cfg.UseIndexer)
}
// GetTransfersByAddress returns all transfers associated with an address
func (exp *Service) GetTransfersByAddress(address string, offset int64, limit int64) ([]explorer.Transfer, error) {
var res []explorer.Transfer
var transfers []hash.Hash256
if exp.cfg.UseIndexer {
transferHistory, err := exp.idx.Indexer().GetIndexHistory(config.IndexTransfer, address)
if err != nil {
return []explorer.Transfer{}, err
}
transfers = append(transfers, transferHistory...)
} else {
transfersFromAddress, err := exp.bc.GetTransfersFromAddress(address)
if err != nil {
return []explorer.Transfer{}, err
}
transfersToAddress, err := exp.bc.GetTransfersToAddress(address)
if err != nil {
return []explorer.Transfer{}, err
}
transfersFromAddress = append(transfersFromAddress, transfersToAddress...)
transfers = append(transfers, transfersFromAddress...)
}
for i, transferHash := range transfers {
if int64(i) < offset {
continue
}
if int64(len(res)) >= limit {
break
}
explorerTransfer, err := getTransfer(exp.bc, exp.ap, transferHash, exp.idx, exp.cfg.UseIndexer)
if err != nil {
return []explorer.Transfer{}, err
}
res = append(res, explorerTransfer)
}
return res, nil
}
// GetUnconfirmedTransfersByAddress returns all unconfirmed transfers in actpool associated with an address
func (exp *Service) GetUnconfirmedTransfersByAddress(address string, offset int64, limit int64) ([]explorer.Transfer, error) {
res := make([]explorer.Transfer, 0)
if _, err := exp.bc.StateByAddr(address); err != nil {
return []explorer.Transfer{}, err
}
selps := exp.ap.GetUnconfirmedActs(address)
tsfIndex := int64(0)
for _, selp := range selps {
act := selp.Action()
transfer, ok := act.(*action.Transfer)
if !ok {
continue
}
if tsfIndex < offset {
tsfIndex++
continue
}
if int64(len(res)) >= limit {
break
}
explorerTransfer, err := convertTsfToExplorerTsf(selp, true)
if err != nil {
return []explorer.Transfer{}, errors.Wrapf(err, "failed to convert transfer %v to explorer's JSON transfer", transfer)
}
res = append(res, explorerTransfer)
}
return res, nil
}
// GetTransfersByBlockID returns transfers in a block
func (exp *Service) GetTransfersByBlockID(blkID string, offset int64, limit int64) ([]explorer.Transfer, error) {
var res []explorer.Transfer
bytes, err := hex.DecodeString(blkID)
if err != nil {
return []explorer.Transfer{}, err
}
var hash hash.Hash256
copy(hash[:], bytes)
blk, err := exp.bc.GetBlockByHash(hash)
if err != nil {
return []explorer.Transfer{}, err
}
var num int
for _, selp := range blk.Actions {
if _, ok := selp.Action().(*action.Transfer); !ok {
continue
}
if int64(num) < offset {
continue
}
if int64(len(res)) >= limit {
break
}
explorerTransfer, err := convertTsfToExplorerTsf(selp, false)
if err != nil {
return []explorer.Transfer{}, errors.Wrapf(err, "failed to convert transfer %v to explorer's JSON transfer", selp)
}
explorerTransfer.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
explorerTransfer.BlockID = blkID
res = append(res, explorerTransfer)
num++
}
return res, nil
}
// GetLastVotesByRange returns votes in [-(offset+limit-1), -offset] from block
// with height startBlockHeight
func (exp *Service) GetLastVotesByRange(startBlockHeight int64, offset int64, limit int64) ([]explorer.Vote, error) {
var res []explorer.Vote
voteCount := uint64(0)
for height := startBlockHeight; height >= 0; height-- {
hash, err := exp.bc.GetHashByHeight(uint64(height))
if err != nil {
return []explorer.Vote{}, err
}
blkID := hex.EncodeToString(hash[:])
blk, err := exp.bc.GetBlockByHeight(uint64(height))
if err != nil {
return []explorer.Vote{}, err
}
selps := make([]action.SealedEnvelope, 0)
for _, selp := range blk.Actions {
act := selp.Action()
if _, ok := act.(*action.Vote); ok {
selps = append(selps, selp)
}
}
for i := int64(len(selps) - 1); i >= 0; i-- {
voteCount++
if voteCount <= uint64(offset) {
continue
}
if int64(len(res)) >= limit {
return res, nil
}
explorerVote, err := convertVoteToExplorerVote(selps[i], false)
if err != nil {
return []explorer.Vote{}, errors.Wrapf(err, "failed to convert vote %v to explorer's JSON vote", selps[i])
}
explorerVote.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
explorerVote.BlockID = blkID
res = append(res, explorerVote)
}
}
return res, nil
}
// GetVoteByID returns vote by vote id
func (exp *Service) GetVoteByID(voteID string) (explorer.Vote, error) {
bytes, err := hex.DecodeString(voteID)
if err != nil {
return explorer.Vote{}, err
}
var voteHash hash.Hash256
copy(voteHash[:], bytes)
return getVote(exp.bc, exp.ap, voteHash, exp.idx, exp.cfg.UseIndexer)
}
// GetVotesByAddress returns all votes associated with an address
func (exp *Service) GetVotesByAddress(address string, offset int64, limit int64) ([]explorer.Vote, error) {
var res []explorer.Vote
var votes []hash.Hash256
if exp.cfg.UseIndexer {
voteHistory, err := exp.idx.Indexer().GetIndexHistory(config.IndexVote, address)
if err != nil {
return []explorer.Vote{}, err
}
votes = append(votes, voteHistory...)
} else {
votesFromAddress, err := exp.bc.GetVotesFromAddress(address)
if err != nil {
return []explorer.Vote{}, err
}
votesToAddress, err := exp.bc.GetVotesToAddress(address)
if err != nil {
return []explorer.Vote{}, err
}
votesFromAddress = append(votesFromAddress, votesToAddress...)
votes = append(votes, votesFromAddress...)
}
for i, voteHash := range votes {
if int64(i) < offset {
continue
}
if int64(len(res)) >= limit {
break
}
explorerVote, err := getVote(exp.bc, exp.ap, voteHash, exp.idx, exp.cfg.UseIndexer)
if err != nil {
return []explorer.Vote{}, err
}
res = append(res, explorerVote)
}
return res, nil
}
// GetUnconfirmedVotesByAddress returns all unconfirmed votes in actpool associated with an address
func (exp *Service) GetUnconfirmedVotesByAddress(address string, offset int64, limit int64) ([]explorer.Vote, error) {
res := make([]explorer.Vote, 0)
if _, err := exp.bc.StateByAddr(address); err != nil {
return []explorer.Vote{}, err
}
selps := exp.ap.GetUnconfirmedActs(address)
voteIndex := int64(0)
for _, selp := range selps {
act := selp.Action()
vote, ok := act.(*action.Vote)
if !ok {
continue
}
if voteIndex < offset {
voteIndex++
continue
}
if int64(len(res)) >= limit {
break
}
explorerVote, err := convertVoteToExplorerVote(selp, true)
if err != nil {
return []explorer.Vote{}, errors.Wrapf(err, "failed to convert vote %v to explorer's JSON vote", vote)
}
res = append(res, explorerVote)
}
return res, nil
}
// GetVotesByBlockID returns votes in a block
func (exp *Service) GetVotesByBlockID(blkID string, offset int64, limit int64) ([]explorer.Vote, error) {
var res []explorer.Vote
bytes, err := hex.DecodeString(blkID)
if err != nil {
return []explorer.Vote{}, err
}
var hash hash.Hash256
copy(hash[:], bytes)
blk, err := exp.bc.GetBlockByHash(hash)
if err != nil {
return []explorer.Vote{}, err
}
var num int
for _, selp := range blk.Actions {
if _, ok := selp.Action().(*action.Vote); !ok {
continue
}
if int64(num) < offset {
continue
}
if int64(len(res)) >= limit {
break
}
explorerVote, err := convertVoteToExplorerVote(selp, false)
if err != nil {
return []explorer.Vote{}, errors.Wrapf(err, "failed to convert vote %v to explorer's JSON vote", selp)
}
explorerVote.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
explorerVote.BlockID = blkID
res = append(res, explorerVote)
num++
}
return res, nil
}
// GetLastExecutionsByRange returns executions in [-(offset+limit-1), -offset] from block
// with height startBlockHeight
func (exp *Service) GetLastExecutionsByRange(startBlockHeight int64, offset int64, limit int64) ([]explorer.Execution, error) {
var res []explorer.Execution
executionCount := uint64(0)
for height := startBlockHeight; height >= 0; height-- {
hash, err := exp.bc.GetHashByHeight(uint64(height))
if err != nil {
return []explorer.Execution{}, err
}
blkID := hex.EncodeToString(hash[:])
blk, err := exp.bc.GetBlockByHeight(uint64(height))
if err != nil {
return []explorer.Execution{}, err
}
selps := make([]action.SealedEnvelope, 0)
for _, selp := range blk.Actions {
act := selp.Action()
if _, ok := act.(*action.Execution); ok {
selps = append(selps, selp)
}
}
for i := len(selps) - 1; i >= 0; i-- {
executionCount++
if executionCount <= uint64(offset) {
continue
}
if int64(len(res)) >= limit {
return res, nil
}
explorerExecution, err := convertExecutionToExplorerExecution(selps[i], false)
if err != nil {
return []explorer.Execution{}, errors.Wrapf(err,
"failed to convert execution %v to explorer's JSON execution", selps[i])
}
explorerExecution.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
explorerExecution.BlockID = blkID
res = append(res, explorerExecution)
}
}
return res, nil
}
// GetExecutionByID returns execution by execution id
func (exp *Service) GetExecutionByID(executionID string) (explorer.Execution, error) {
bytes, err := hex.DecodeString(executionID)
if err != nil {
return explorer.Execution{}, err
}
var executionHash hash.Hash256
copy(executionHash[:], bytes)
return getExecution(exp.bc, exp.ap, executionHash, exp.idx, exp.cfg.UseIndexer)
}
// GetExecutionsByAddress returns all executions associated with an address
func (exp *Service) GetExecutionsByAddress(address string, offset int64, limit int64) ([]explorer.Execution, error) {
var res []explorer.Execution
var executions []hash.Hash256
if exp.cfg.UseIndexer {
executionHistory, err := exp.idx.Indexer().GetIndexHistory(config.IndexExecution, address)
if err != nil {
return []explorer.Execution{}, err
}
executions = append(executions, executionHistory...)
} else {
executionsFromAddress, err := exp.bc.GetExecutionsFromAddress(address)
if err != nil {
return []explorer.Execution{}, err
}
executionsToAddress, err := exp.bc.GetExecutionsToAddress(address)
if err != nil {
return []explorer.Execution{}, err
}
executionsFromAddress = append(executionsFromAddress, executionsToAddress...)
executions = append(executions, executionsFromAddress...)
}
for i, executionHash := range executions {
if int64(i) < offset {
continue
}
if int64(len(res)) >= limit {
break
}
explorerExecution, err := getExecution(exp.bc, exp.ap, executionHash, exp.idx, exp.cfg.UseIndexer)
if err != nil {
return []explorer.Execution{}, err
}
res = append(res, explorerExecution)
}
return res, nil
}
// GetUnconfirmedExecutionsByAddress returns all unconfirmed executions in actpool associated with an address
func (exp *Service) GetUnconfirmedExecutionsByAddress(address string, offset int64, limit int64) ([]explorer.Execution, error) {
res := make([]explorer.Execution, 0)
if _, err := exp.bc.StateByAddr(address); err != nil {
return []explorer.Execution{}, err
}
selps := exp.ap.GetUnconfirmedActs(address)
executionIndex := int64(0)
for _, selp := range selps {
if _, ok := selp.Action().(*action.Execution); !ok {
continue
}
if executionIndex < offset {
executionIndex++
continue
}
if int64(len(res)) >= limit {
break
}
explorerExecution, err := convertExecutionToExplorerExecution(selp, true)
if err != nil {
return []explorer.Execution{}, errors.Wrapf(err, "failed to convert execution %v to explorer's JSON execution", selp)
}
res = append(res, explorerExecution)
}
return res, nil
}
// GetExecutionsByBlockID returns executions in a block
func (exp *Service) GetExecutionsByBlockID(blkID string, offset int64, limit int64) ([]explorer.Execution, error) {
var res []explorer.Execution
bytes, err := hex.DecodeString(blkID)
if err != nil {
return []explorer.Execution{}, err
}
var hash hash.Hash256
copy(hash[:], bytes)
blk, err := exp.bc.GetBlockByHash(hash)
if err != nil {
return []explorer.Execution{}, err
}
var num int
for _, selp := range blk.Actions {
if _, ok := selp.Action().(*action.Execution); !ok {
continue
}
if int64(num) < offset {
continue
}
if int64(len(res)) >= limit {
break
}
explorerExecution, err := convertExecutionToExplorerExecution(selp, false)
if err != nil {
return []explorer.Execution{}, errors.Wrapf(err, "failed to convert execution %v to explorer's JSON execution", selp)
}
explorerExecution.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
explorerExecution.BlockID = blkID
res = append(res, explorerExecution)
num++
}
return res, nil
}
// GetReceiptByExecutionID gets receipt with corresponding execution id
// Deprecated
func (exp *Service) GetReceiptByExecutionID(id string) (explorer.Receipt, error) {
return exp.GetReceiptByActionID(id)
}
// GetReceiptByActionID gets receipt with corresponding action id
func (exp *Service) GetReceiptByActionID(id string) (explorer.Receipt, error) {
bytes, err := hex.DecodeString(id)
if err != nil {
return explorer.Receipt{}, err
}
var actionHash hash.Hash256
copy(actionHash[:], bytes)
// get receipt from boltdb
if !exp.cfg.UseIndexer {
receipt, err := exp.bc.GetReceiptByActionHash(actionHash)
if err != nil {
return explorer.Receipt{}, err
}
return convertReceiptToExplorerReceipt(receipt)
}
// get receipt from indexer
blkHash, err := exp.idx.Indexer().GetBlockByIndex(config.IndexReceipt, actionHash)
if err != nil {
return explorer.Receipt{}, err
}
blk, err := exp.bc.GetBlockByHash(blkHash)
if err != nil {
return explorer.Receipt{}, err
}
for _, receipt := range blk.Receipts {
if receipt.Hash() == actionHash {
return convertReceiptToExplorerReceipt(receipt)
}
}
return explorer.Receipt{}, err
}
// GetCreateDeposit gets create deposit by ID
func (exp *Service) GetCreateDeposit(createDepositID string) (explorer.CreateDeposit, error) {
bytes, err := hex.DecodeString(createDepositID)
if err != nil {
return explorer.CreateDeposit{}, err
}
var createDepositHash hash.Hash256
copy(createDepositHash[:], bytes)
return getCreateDeposit(exp.bc, exp.ap, createDepositHash)
}
// GetCreateDepositsByAddress gets the relevant create deposits of an address
func (exp *Service) GetCreateDepositsByAddress(
address string,
offset int64,
limit int64,
) ([]explorer.CreateDeposit, error) {
res := make([]explorer.CreateDeposit, 0)
depositsFromAddress, err := exp.bc.GetActionsFromAddress(address)
if err != nil {
return []explorer.CreateDeposit{}, err
}
for i, depositHash := range depositsFromAddress {
if int64(i) < offset {
continue
}
if int64(len(res)) >= limit {
break
}
createDeposit, err := getCreateDeposit(exp.bc, exp.ap, depositHash)
if err != nil {
continue
}
res = append(res, createDeposit)
}
return res, nil
}
// GetSettleDeposit gets settle deposit by ID
func (exp *Service) GetSettleDeposit(settleDepositID string) (explorer.SettleDeposit, error) {
bytes, err := hex.DecodeString(settleDepositID)
if err != nil {
return explorer.SettleDeposit{}, err
}
var settleDepositHash hash.Hash256
copy(settleDepositHash[:], bytes)
return getSettleDeposit(exp.bc, exp.ap, settleDepositHash)
}
// GetSettleDepositsByAddress gets the relevant settle deposits of an address
func (exp *Service) GetSettleDepositsByAddress(
address string,
offset int64,
limit int64,
) ([]explorer.SettleDeposit, error) {
res := make([]explorer.SettleDeposit, 0)
depositsToAddress, err := exp.bc.GetActionsToAddress(address)
if err != nil {
return []explorer.SettleDeposit{}, err
}
for i, depositHash := range depositsToAddress {
if int64(i) < offset {
continue
}
if int64(len(res)) >= limit {
break
}
settleDeposit, err := getSettleDeposit(exp.bc, exp.ap, depositHash)
if err != nil {
continue
}
res = append(res, settleDeposit)
}
return res, nil
}
// GetLastBlocksByRange get block with height [offset-limit+1, offset]
func (exp *Service) GetLastBlocksByRange(offset int64, limit int64) ([]explorer.Block, error) {
var res []explorer.Block
for height := offset; height >= 0 && int64(len(res)) < limit; height-- {
blk, err := exp.bc.GetBlockByHeight(uint64(height))
if err != nil {
return []explorer.Block{}, err
}
blockHeaderPb := blk.ConvertToBlockHeaderPb()
hash, err := exp.bc.GetHashByHeight(uint64(height))
if err != nil {
return []explorer.Block{}, err
}
transfers, votes, executions := action.ClassifyActions(blk.Actions)
totalAmount := big.NewInt(0)
totalSize := uint32(0)
for _, transfer := range transfers {
totalAmount.Add(totalAmount, transfer.Amount())
totalSize += transfer.TotalSize()
}
txRoot := blk.TxRoot()
stateRoot := blk.StateRoot()
deltaStateDigest := blk.DeltaStateDigest()
explorerBlock := explorer.Block{
ID: hex.EncodeToString(hash[:]),
Height: int64(blockHeaderPb.Height),
Timestamp: blockHeaderPb.GetTimestamp().GetSeconds(),
Transfers: int64(len(transfers)),
Votes: int64(len(votes)),
Executions: int64(len(executions)),
Amount: totalAmount.String(),
Size: int64(totalSize),
GenerateBy: explorer.BlockGenerator{
Name: "",
Address: keypair.EncodePublicKey(blk.PublicKey()),
},
TxRoot: hex.EncodeToString(txRoot[:]),
StateRoot: hex.EncodeToString(stateRoot[:]),
DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]),
}
res = append(res, explorerBlock)
}
return res, nil
}
// GetBlockByID returns block by block id
func (exp *Service) GetBlockByID(blkID string) (explorer.Block, error) {
bytes, err := hex.DecodeString(blkID)
if err != nil {
return explorer.Block{}, err
}
var hash hash.Hash256
copy(hash[:], bytes)
blk, err := exp.bc.GetBlockByHash(hash)
if err != nil {
return explorer.Block{}, err
}
blkHeaderPb := blk.ConvertToBlockHeaderPb()
transfers, votes, executions := action.ClassifyActions(blk.Actions)
totalAmount := big.NewInt(0)
totalSize := uint32(0)
for _, transfer := range transfers {
totalAmount.Add(totalAmount, transfer.Amount())
totalSize += transfer.TotalSize()
}
txRoot := blk.TxRoot()
stateRoot := blk.StateRoot()
deltaStateDigest := blk.DeltaStateDigest()
explorerBlock := explorer.Block{
ID: blkID,
Height: int64(blkHeaderPb.Height),
Timestamp: blkHeaderPb.GetTimestamp().GetSeconds(),
Transfers: int64(len(transfers)),
Votes: int64(len(votes)),
Executions: int64(len(executions)),
Amount: totalAmount.String(),
Size: int64(totalSize),
GenerateBy: explorer.BlockGenerator{
Name: "",
Address: keypair.EncodePublicKey(blk.PublicKey()),
},
TxRoot: hex.EncodeToString(txRoot[:]),
StateRoot: hex.EncodeToString(stateRoot[:]),
DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]),
}
return explorerBlock, nil
}
// GetCoinStatistic returns stats in blockchain
func (exp *Service) GetCoinStatistic() (explorer.CoinStatistic, error) {
stat := explorer.CoinStatistic{}
tipHeight := exp.bc.TipHeight()
totalTransfers, err := exp.bc.GetTotalTransfers()
if err != nil {
return stat, err
}
totalVotes, err := exp.bc.GetTotalVotes()
if err != nil {
return stat, err
}
totalExecutions, err := exp.bc.GetTotalExecutions()
if err != nil {
return stat, err
}
blockLimit := int64(exp.cfg.TpsWindow)
if blockLimit <= 0 {
return stat, errors.Wrapf(ErrInternalServer, "block limit is %d", blockLimit)
}
// avoid genesis block
if int64(tipHeight) < blockLimit {
blockLimit = int64(tipHeight)
}
blks, err := exp.GetLastBlocksByRange(int64(tipHeight), blockLimit)
if err != nil {
return stat, err
}
if len(blks) == 0 {
return stat, errors.New("get 0 blocks! not able to calculate aps")
}
timeDuration := blks[0].Timestamp - blks[len(blks)-1].Timestamp
// if time duration is less than 1 second, we set it to be 1 second
if timeDuration == 0 {
timeDuration = 1
}
actionNumber := int64(0)
for _, blk := range blks {
actionNumber += blk.Transfers + blk.Votes + blk.Executions
}
aps := actionNumber / timeDuration
explorerCoinStats := explorer.CoinStatistic{
Height: int64(tipHeight),
Supply: blockchain.Gen.TotalSupply.String(),
Transfers: int64(totalTransfers),
Votes: int64(totalVotes),
Executions: int64(totalExecutions),
Aps: aps,
}
return explorerCoinStats, nil
}
// GetConsensusMetrics returns the latest consensus metrics
func (exp *Service) GetConsensusMetrics() (explorer.ConsensusMetrics, error) {
cm, err := exp.c.Metrics()
if err != nil {
return explorer.ConsensusMetrics{}, err
}
dStrs := make([]string, len(cm.LatestDelegates))
copy(dStrs, cm.LatestDelegates)
var bpStr string
if cm.LatestBlockProducer != "" {
bpStr = cm.LatestBlockProducer
}
cStrs := make([]string, len(cm.Candidates))
copy(cStrs, cm.Candidates)
return explorer.ConsensusMetrics{
LatestEpoch: int64(cm.LatestEpoch),
LatestDelegates: dStrs,
LatestBlockProducer: bpStr,
Candidates: cStrs,
}, nil
}
// GetCandidateMetrics returns the latest delegates metrics
func (exp *Service) GetCandidateMetrics() (explorer.CandidateMetrics, error) {
cm, err := exp.c.Metrics()
if err != nil {
return explorer.CandidateMetrics{}, errors.Wrapf(
err,
"Failed to get the candidate metrics")
}
delegateSet := make(map[string]bool, len(cm.LatestDelegates))
for _, d := range cm.LatestDelegates {
delegateSet[d] = true
}
allCandidates, err := exp.bc.CandidatesByHeight(cm.LatestHeight)
if err != nil {
return explorer.CandidateMetrics{}, errors.Wrapf(err,
"Failed to get the candidate metrics")
}
candidates := make([]explorer.Candidate, len(cm.Candidates))
for i, c := range allCandidates {
candidates[i] = explorer.Candidate{
Address: c.Address,
TotalVote: c.Votes.String(),
CreationHeight: int64(c.CreationHeight),
LastUpdateHeight: int64(c.LastUpdateHeight),
IsDelegate: false,
IsProducer: false,
}
if _, ok := delegateSet[c.Address]; ok {
candidates[i].IsDelegate = true
}
if cm.LatestBlockProducer == c.Address {
candidates[i].IsProducer = true
}
}
return explorer.CandidateMetrics{
Candidates: candidates,
LatestEpoch: int64(cm.LatestEpoch),
LatestHeight: int64(cm.LatestHeight),
}, nil
}
// GetCandidateMetricsByHeight returns the candidates metrics for given height.
func (exp *Service) GetCandidateMetricsByHeight(h int64) (explorer.CandidateMetrics, error) {
if h < 0 {
return explorer.CandidateMetrics{}, errors.New("Invalid height")
}
allCandidates, err := exp.bc.CandidatesByHeight(uint64(h))
if err != nil {
return explorer.CandidateMetrics{}, errors.Wrapf(err,
"Failed to get the candidate metrics")
}
candidates := make([]explorer.Candidate, 0, len(allCandidates))
for _, c := range allCandidates {
pubKey := keypair.EncodePublicKey(c.PublicKey)
candidates = append(candidates, explorer.Candidate{
Address: c.Address,
PubKey: pubKey,
TotalVote: c.Votes.String(),
CreationHeight: int64(c.CreationHeight),
LastUpdateHeight: int64(c.LastUpdateHeight),
})
}
return explorer.CandidateMetrics{
Candidates: candidates,
}, nil
}
// SendTransfer sends a transfer
func (exp *Service) SendTransfer(tsfJSON explorer.SendTransferRequest) (resp explorer.SendTransferResponse, err error) {
log.L().Debug("receive send transfer request")
defer func() {
succeed := "true"
if err != nil {
succeed = "false"
}
requestMtc.WithLabelValues("SendTransfer", succeed).Inc()
}()
actPb, err := convertExplorerTransferToActionPb(&tsfJSON, exp.cfg.MaxTransferPayloadBytes)
if err != nil {
return explorer.SendTransferResponse{}, err
}
// broadcast to the network
if err = exp.broadcastHandler(context.Background(), exp.bc.ChainID(), actPb); err != nil {
return explorer.SendTransferResponse{}, err
}
// send to actpool via dispatcher
exp.dp.HandleBroadcast(context.Background(), exp.bc.ChainID(), actPb)
tsf := &action.SealedEnvelope{}
if err := tsf.LoadProto(actPb); err != nil {
return explorer.SendTransferResponse{}, err
}
h := tsf.Hash()
return explorer.SendTransferResponse{Hash: hex.EncodeToString(h[:])}, nil
}
// SendVote sends a vote
func (exp *Service) SendVote(voteJSON explorer.SendVoteRequest) (resp explorer.SendVoteResponse, err error) {
log.L().Debug("receive send vote request")
defer func() {
succeed := "true"
if err != nil {
succeed = "false"
}
requestMtc.WithLabelValues("SendVote", succeed).Inc()
}()
selfPubKey, err := keypair.StringToPubKeyBytes(voteJSON.VoterPubKey)
if err != nil {
return explorer.SendVoteResponse{}, err
}
signature, err := hex.DecodeString(voteJSON.Signature)
if err != nil {
return explorer.SendVoteResponse{}, err
}
gasPrice, ok := big.NewInt(0).SetString(voteJSON.GasPrice, 10)
if !ok {
return explorer.SendVoteResponse{}, errors.New("failed to set vote gas price")
}
actPb := &iproto.ActionPb{
Action: &iproto.ActionPb_Vote{
Vote: &iproto.VotePb{
VoteeAddress: voteJSON.Votee,
},
},
Version: uint32(voteJSON.Version),
SenderPubKey: selfPubKey,
Nonce: uint64(voteJSON.Nonce),
GasLimit: uint64(voteJSON.GasLimit),
GasPrice: gasPrice.Bytes(),
Signature: signature,
}
// broadcast to the network
if err := exp.broadcastHandler(context.Background(), exp.bc.ChainID(), actPb); err != nil {
return explorer.SendVoteResponse{}, err
}
// send to actpool via dispatcher
exp.dp.HandleBroadcast(context.Background(), exp.bc.ChainID(), actPb)
v := &action.SealedEnvelope{}
if err := v.LoadProto(actPb); err != nil {
return explorer.SendVoteResponse{}, err
}
h := v.Hash()
return explorer.SendVoteResponse{Hash: hex.EncodeToString(h[:])}, nil
}
// PutSubChainBlock put block merkel root on root chain.
func (exp *Service) PutSubChainBlock(putBlockJSON explorer.PutSubChainBlockRequest) (resp explorer.PutSubChainBlockResponse, err error) {
log.L().Debug("receive put block request")
defer func() {
succeed := "true"
if err != nil {
succeed = "false"
}
requestMtc.WithLabelValues("PutBlock", succeed).Inc()
}()
senderPubKey, err := keypair.StringToPubKeyBytes(putBlockJSON.SenderPubKey)
if err != nil {
return explorer.PutSubChainBlockResponse{}, err
}
signature, err := hex.DecodeString(putBlockJSON.Signature)
if err != nil {
return explorer.PutSubChainBlockResponse{}, err
}
gasPrice, ok := big.NewInt(0).SetString(putBlockJSON.GasPrice, 10)
if !ok {
return explorer.PutSubChainBlockResponse{}, errors.New("failed to set vote gas price")
}
roots := make([]*iproto.MerkleRoot, 0)
for _, mr := range putBlockJSON.Roots {
v, err := hex.DecodeString(mr.Value)
if err != nil {
return explorer.PutSubChainBlockResponse{}, err
}
roots = append(roots, &iproto.MerkleRoot{
Name: mr.Name,
Value: v,
})
}
actPb := &iproto.ActionPb{
Action: &iproto.ActionPb_PutBlock{
PutBlock: &iproto.PutBlockPb{
SubChainAddress: putBlockJSON.SubChainAddress,
Height: uint64(putBlockJSON.Height),
Roots: roots,
},
},
Version: uint32(putBlockJSON.Version),
SenderPubKey: senderPubKey,
Nonce: uint64(putBlockJSON.Nonce),
GasLimit: uint64(putBlockJSON.GasLimit),
GasPrice: gasPrice.Bytes(),
Signature: signature,
}
// broadcast to the network
if err := exp.broadcastHandler(context.Background(), exp.bc.ChainID(), actPb); err != nil {
return explorer.PutSubChainBlockResponse{}, err
}
// send to actpool via dispatcher
exp.dp.HandleBroadcast(context.Background(), exp.bc.ChainID(), actPb)
v := &action.SealedEnvelope{}
if err := v.LoadProto(actPb); err != nil {
return explorer.PutSubChainBlockResponse{}, err
}
h := v.Hash()
return explorer.PutSubChainBlockResponse{Hash: hex.EncodeToString(h[:])}, nil
}
// SendAction is the API to send an action to blockchain.
func (exp *Service) SendAction(req explorer.SendActionRequest) (resp explorer.SendActionResponse, err error) {
log.L().Debug("receive send action request")
defer func() {
succeed := "true"
if err != nil {
succeed = "false"
}
requestMtc.WithLabelValues("SendAction", succeed).Inc()
}()
var action iproto.ActionPb
if err := jsonpb.UnmarshalString(req.Payload, &action); err != nil {
return explorer.SendActionResponse{}, err
}
// broadcast to the network
if err = exp.broadcastHandler(context.Background(), exp.bc.ChainID(), &action); err != nil {
log.L().Warn("Failed to broadcast SendAction request.", zap.Error(err))
}
// send to actpool via dispatcher
exp.dp.HandleBroadcast(context.Background(), exp.bc.ChainID(), &action)
// TODO: include action hash
return explorer.SendActionResponse{}, nil
}
// GetPeers return a list of node peers and itself's network addsress info.
func (exp *Service) GetPeers() (explorer.GetPeersResponse, error) {
var exppeers []explorer.Node
ctx := context.Background()
peers, err := exp.neighborsHandler(ctx)
if err != nil {
return explorer.GetPeersResponse{}, err
}
for _, p := range peers {
exppeers = append(exppeers, explorer.Node{
Address: fmt.Sprintf("%v", p),
})
}
return explorer.GetPeersResponse{
Self: explorer.Node{Address: fmt.Sprintf("%v", exp.networkInfoHandler())},
Peers: exppeers,
}, nil
}
// SendSmartContract sends a smart contract
func (exp *Service) SendSmartContract(execution explorer.Execution) (resp explorer.SendSmartContractResponse, err error) {
log.L().Debug("receive send smart contract request")
defer func() {
succeed := "true"
if err != nil {
succeed = "false"
}
requestMtc.WithLabelValues("SendSmartContract", succeed).Inc()
}()
executorPubKey, err := keypair.StringToPubKeyBytes(execution.ExecutorPubKey)
if err != nil {
return explorer.SendSmartContractResponse{}, err
}
data, err := hex.DecodeString(execution.Data)
if err != nil {
return explorer.SendSmartContractResponse{}, err
}
signature, err := hex.DecodeString(execution.Signature)
if err != nil {
return explorer.SendSmartContractResponse{}, err
}
amount, ok := big.NewInt(0).SetString(execution.Amount, 10)
if !ok {
return explorer.SendSmartContractResponse{}, errors.New("failed to set execution amount")
}
gasPrice, ok := big.NewInt(0).SetString(execution.GasPrice, 10)
if !ok {
return explorer.SendSmartContractResponse{}, errors.New("failed to set execution gas price")
}
actPb := &iproto.ActionPb{
Action: &iproto.ActionPb_Execution{
Execution: &iproto.ExecutionPb{
Amount: amount.Bytes(),
Contract: execution.Contract,
Data: data,
},
},
Version: uint32(execution.Version),
SenderPubKey: executorPubKey,
Nonce: uint64(execution.Nonce),
GasLimit: uint64(execution.GasLimit),
GasPrice: gasPrice.Bytes(),
Signature: signature,
}
// broadcast to the network
if err := exp.broadcastHandler(context.Background(), exp.bc.ChainID(), actPb); err != nil {
return explorer.SendSmartContractResponse{}, err
}
// send to actpool via dispatcher
exp.dp.HandleBroadcast(context.Background(), exp.bc.ChainID(), actPb)
sc := &action.SealedEnvelope{}
if err := sc.LoadProto(actPb); err != nil {
return explorer.SendSmartContractResponse{}, err
}
h := sc.Hash()
return explorer.SendSmartContractResponse{Hash: hex.EncodeToString(h[:])}, nil
}
// ReadExecutionState reads the state in a contract address specified by the slot
func (exp *Service) ReadExecutionState(execution explorer.Execution) (string, error) {
log.L().Debug("receive read smart contract request")
actPb, err := convertExplorerExecutionToActionPb(&execution)
if err != nil {
return "", err
}
selp := &action.SealedEnvelope{}
if err := selp.LoadProto(actPb); err != nil {
return "", err
}
sc, ok := selp.Action().(*action.Execution)
if !ok {
return "", errors.New("not execution")
}
callerPKHash := keypair.HashPubKey(selp.SrcPubkey())
callerAddr, err := address.FromBytes(callerPKHash[:])
if err != nil {
return "", err
}
res, err := exp.bc.ExecuteContractRead(callerAddr, sc)
if err != nil {
return "", err
}
return hex.EncodeToString(res.ReturnValue), nil
}
// GetBlockOrActionByHash get block or action by a hash
func (exp *Service) GetBlockOrActionByHash(hashStr string) (explorer.GetBlkOrActResponse, error) {
if blk, err := exp.GetBlockByID(hashStr); err == nil {
return explorer.GetBlkOrActResponse{Block: &blk}, nil
}
if tsf, err := exp.GetTransferByID(hashStr); err == nil {
return explorer.GetBlkOrActResponse{Transfer: &tsf}, nil
}
if vote, err := exp.GetVoteByID(hashStr); err == nil {
return explorer.GetBlkOrActResponse{Vote: &vote}, nil
}
if exe, err := exp.GetExecutionByID(hashStr); err == nil {
return explorer.GetBlkOrActResponse{Execution: &exe}, nil
}
return explorer.GetBlkOrActResponse{}, nil
}
// CreateDeposit deposits balance from main-chain to sub-chain
func (exp *Service) CreateDeposit(req explorer.CreateDepositRequest) (res explorer.CreateDepositResponse, err error) {
defer func() {
succeed := "true"
if err != nil {
succeed = "false"
}
requestMtc.WithLabelValues("createDeposit", succeed).Inc()
}()
senderPubKey, err := keypair.StringToPubKeyBytes(req.SenderPubKey)
if err != nil {
return res, err
}
signature, err := hex.DecodeString(req.Signature)
if err != nil {
return res, err
}
amount, ok := big.NewInt(0).SetString(req.Amount, 10)
if !ok {
return res, errors.New("error when converting amount string into big int type")
}
gasPrice, ok := big.NewInt(0).SetString(req.GasPrice, 10)
if !ok {
return res, errors.New("error when converting gas price string into big int type")
}
actPb := &iproto.ActionPb{
Action: &iproto.ActionPb_CreateDeposit{
CreateDeposit: &iproto.CreateDepositPb{
ChainID: uint32(req.ChainID),
Amount: amount.Bytes(),
Recipient: req.Recipient,
},
},
Version: uint32(req.Version),
SenderPubKey: senderPubKey,
Nonce: uint64(req.Nonce),
GasLimit: uint64(req.GasLimit),
GasPrice: gasPrice.Bytes(),
Signature: signature,
}
// broadcast to the network
if err := exp.broadcastHandler(context.Background(), exp.bc.ChainID(), actPb); err != nil {
return res, err
}
// send to actpool via dispatcher
exp.dp.HandleBroadcast(context.Background(), exp.bc.ChainID(), actPb)
selp := &action.SealedEnvelope{}
if err := selp.LoadProto(actPb); err != nil {
return res, err
}
h := selp.Hash()
return explorer.CreateDepositResponse{Hash: hex.EncodeToString(h[:])}, nil
}
// GetDeposits returns the deposits of a sub-chain in the given range in descending order by the index
func (exp *Service) GetDeposits(subChainID int64, offset int64, limit int64) ([]explorer.Deposit, error) {
subChainsInOp, err := exp.mainChain.SubChainsInOperation()
if err != nil {
return nil, err
}
var targetSubChain mainchain.InOperation
for _, subChainInOp := range subChainsInOp {
if subChainInOp.ID == uint32(subChainID) {
targetSubChain = subChainInOp
}
}
if targetSubChain.ID != uint32(subChainID) {
return nil, errors.Errorf("sub-chain %d is not found in operation", subChainID)
}
subChainAddr, err := address.FromBytes(targetSubChain.Addr)
if err != nil {
return nil, err
}
subChain, err := exp.mainChain.SubChain(subChainAddr)
if err != nil {
return nil, err
}
idx := uint64(offset)
// If the last deposit index is lower than the start index, reset it
if subChain.DepositCount-1 < idx {
idx = subChain.DepositCount - 1
}
var deposits []explorer.Deposit
for count := int64(0); count < limit; count++ {
deposit, err := exp.mainChain.Deposit(subChainAddr, idx)
if err != nil {
return nil, err
}
recipient, err := address.FromBytes(deposit.Addr)
if err != nil {
return nil, err
}
deposits = append(deposits, explorer.Deposit{
Amount: deposit.Amount.String(),
Address: recipient.String(),
Confirmed: deposit.Confirmed,
})
if idx > 0 {
idx--
} else {
break
}
}
return deposits, nil
}
// SettleDeposit settles deposit on sub-chain
func (exp *Service) SettleDeposit(req explorer.SettleDepositRequest) (res explorer.SettleDepositResponse, err error) {
defer func() {
succeed := "true"
if err != nil {
succeed = "false"
}
requestMtc.WithLabelValues("settleDeposit", succeed).Inc()
}()
senderPubKey, err := keypair.StringToPubKeyBytes(req.SenderPubKey)
if err != nil {
return res, err
}
signature, err := hex.DecodeString(req.Signature)
if err != nil {
return res, err
}
amount, ok := big.NewInt(0).SetString(req.Amount, 10)
if !ok {
return res, errors.New("error when converting amount string into big int type")
}
gasPrice, ok := big.NewInt(0).SetString(req.GasPrice, 10)
if !ok {
return res, errors.New("error when converting gas price string into big int type")
}
actPb := &iproto.ActionPb{
Action: &iproto.ActionPb_SettleDeposit{
SettleDeposit: &iproto.SettleDepositPb{
Amount: amount.Bytes(),
Index: uint64(req.Index),
Recipient: req.Recipient,
},
},
Version: uint32(req.Version),
SenderPubKey: senderPubKey,
Nonce: uint64(req.Nonce),
GasLimit: uint64(req.GasLimit),
GasPrice: gasPrice.Bytes(),
Signature: signature,
}
// broadcast to the network
if err := exp.broadcastHandler(context.Background(), exp.bc.ChainID(), actPb); err != nil {
return res, err
}
// send to actpool via dispatcher
exp.dp.HandleBroadcast(context.Background(), exp.bc.ChainID(), actPb)
deposit := &action.SealedEnvelope{}
if err := deposit.LoadProto(actPb); err != nil {
return res, err
}
h := deposit.Hash()
return explorer.SettleDepositResponse{Hash: hex.EncodeToString(h[:])}, nil
}
// SuggestGasPrice suggest gas price
func (exp *Service) SuggestGasPrice() (int64, error) {
return exp.gs.suggestGasPrice()
}
// EstimateGasForTransfer estimate gas for transfer
func (exp *Service) EstimateGasForTransfer(tsfJSON explorer.SendTransferRequest) (int64, error) {
return exp.gs.estimateGasForTransfer(tsfJSON)
}
// EstimateGasForVote suggest gas for vote
func (exp *Service) EstimateGasForVote() (int64, error) {
return exp.gs.estimateGasForVote()
}
// EstimateGasForSmartContract suggest gas for smart contract
func (exp *Service) EstimateGasForSmartContract(execution explorer.Execution) (int64, error) {
return exp.gs.estimateGasForSmartContract(execution)
}
// GetStateRootHash gets the state root hash of a given block height
func (exp *Service) GetStateRootHash(blockHeight int64) (string, error) {
rootHash, err := exp.bc.GetFactory().RootHashByHeight(uint64(blockHeight))
if err != nil {
return "", err
}
return hex.EncodeToString(rootHash[:]), nil
}
// getTransfer takes in a blockchain and transferHash and returns an Explorer Transfer
func getTransfer(bc blockchain.Blockchain, ap actpool.ActPool, transferHash hash.Hash256, idx *indexservice.Server, useIndexer bool) (explorer.Transfer, error) {
explorerTransfer := explorer.Transfer{}
selp, err := bc.GetActionByActionHash(transferHash)
if err != nil {
// Try to fetch pending transfer from actpool
selp, err := ap.GetActionByHash(transferHash)
if err != nil {
return explorerTransfer, err
}
return convertTsfToExplorerTsf(selp, true)
}
// Fetch from block
var blkHash hash.Hash256
if useIndexer {
hash, err := idx.Indexer().GetBlockByIndex(config.IndexTransfer, transferHash)
if err != nil {
return explorerTransfer, err
}
blkHash = hash
} else {
hash, err := bc.GetBlockHashByTransferHash(transferHash)
if err != nil {
return explorerTransfer, err
}
blkHash = hash
}
blk, err := bc.GetBlockByHash(blkHash)
if err != nil {
return explorerTransfer, err
}
if explorerTransfer, err = convertTsfToExplorerTsf(selp, false); err != nil {
return explorerTransfer, errors.Wrapf(err, "failed to convert transfer %v to explorer's JSON transfer", selp)
}
explorerTransfer.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
explorerTransfer.BlockID = hex.EncodeToString(blkHash[:])
return explorerTransfer, nil
}
// getVote takes in a blockchain and voteHash and returns an Explorer Vote
func getVote(bc blockchain.Blockchain, ap actpool.ActPool, voteHash hash.Hash256, idx *indexservice.Server, useIndexer bool) (explorer.Vote, error) {
explorerVote := explorer.Vote{}
selp, err := bc.GetActionByActionHash(voteHash)
if err != nil {
// Try to fetch pending vote from actpool
selp, err := ap.GetActionByHash(voteHash)
if err != nil {
return explorerVote, err
}
return convertVoteToExplorerVote(selp, true)
}
// Fetch from block
var blkHash hash.Hash256
if useIndexer {
hash, err := idx.Indexer().GetBlockByIndex(config.IndexVote, voteHash)
if err != nil {
return explorerVote, err
}
blkHash = hash
} else {
hash, err := bc.GetBlockHashByVoteHash(voteHash)
if err != nil {
return explorerVote, err
}
blkHash = hash
}
blk, err := bc.GetBlockByHash(blkHash)
if err != nil {
return explorerVote, err
}
if explorerVote, err = convertVoteToExplorerVote(selp, false); err != nil {
return explorerVote, errors.Wrapf(err, "failed to convert vote %v to explorer's JSON vote", selp)
}
explorerVote.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
explorerVote.BlockID = hex.EncodeToString(blkHash[:])
return explorerVote, nil
}
// getExecution takes in a blockchain and executionHash and returns an Explorer execution
func getExecution(bc blockchain.Blockchain, ap actpool.ActPool, executionHash hash.Hash256, idx *indexservice.Server, useIndexer bool) (explorer.Execution, error) {
explorerExecution := explorer.Execution{}
selp, err := bc.GetActionByActionHash(executionHash)
if err != nil {
// Try to fetch pending execution from actpool
selp, err = ap.GetActionByHash(executionHash)
if err != nil {
return explorerExecution, err
}
return convertExecutionToExplorerExecution(selp, true)
}
// Fetch from block
var blkHash hash.Hash256
if useIndexer {
hash, err := idx.Indexer().GetBlockByIndex(config.IndexExecution, executionHash)
if err != nil {
return explorerExecution, err
}
blkHash = hash
} else {
hash, err := bc.GetBlockHashByExecutionHash(executionHash)
if err != nil {
return explorerExecution, err
}
blkHash = hash
}
blk, err := bc.GetBlockByHash(blkHash)
if err != nil {
return explorerExecution, err
}
if explorerExecution, err = convertExecutionToExplorerExecution(selp, false); err != nil {
return explorerExecution, errors.Wrapf(err, "failed to convert execution %v to explorer's JSON execution", selp)
}
explorerExecution.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
explorerExecution.BlockID = hex.EncodeToString(blkHash[:])
return explorerExecution, nil
}
// getCreateDeposit takes in a blockchain and create deposit hash and returns an Explorer create deposit
func getCreateDeposit(
bc blockchain.Blockchain,
ap actpool.ActPool,
createDepositHash hash.Hash256,
) (explorer.CreateDeposit, error) {
pending := false
var selp action.SealedEnvelope
var err error
selp, err = bc.GetActionByActionHash(createDepositHash)
if err != nil {
// Try to fetch pending create deposit from actpool
selp, err = ap.GetActionByHash(createDepositHash)
if err != nil {
return explorer.CreateDeposit{}, err
}
pending = true
}
// Fetch from block
blkHash, err := bc.GetBlockHashByActionHash(createDepositHash)
if err != nil {
return explorer.CreateDeposit{}, err
}
blk, err := bc.GetBlockByHash(blkHash)
if err != nil {
return explorer.CreateDeposit{}, err
}
cd, err := castActionToCreateDeposit(selp, pending)
if err != nil {
return explorer.CreateDeposit{}, err
}
cd.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
cd.BlockID = hex.EncodeToString(blkHash[:])
return cd, nil
}
func castActionToCreateDeposit(selp action.SealedEnvelope, pending bool) (explorer.CreateDeposit, error) {
cd, ok := selp.Action().(*action.CreateDeposit)
if !ok {
return explorer.CreateDeposit{}, errors.Wrap(ErrAction, "action type is not create deposit")
}
hash := selp.Hash()
createDeposit := explorer.CreateDeposit{
Nonce: int64(selp.Nonce()),
ID: hex.EncodeToString(hash[:]),
Recipient: cd.Recipient(),
Fee: "", // TODO: we need to get the actual fee.
GasLimit: int64(selp.GasLimit()),
IsPending: pending,
}
if cd.Amount() != nil && len(cd.Amount().String()) > 0 {
createDeposit.Amount = cd.Amount().String()
}
if selp.GasPrice() != nil && len(selp.GasPrice().String()) > 0 {
createDeposit.GasPrice = selp.GasPrice().String()
}
return createDeposit, nil
}
// getSettleDeposit takes in a blockchain and settle deposit hash and returns an Explorer settle deposit
func getSettleDeposit(
bc blockchain.Blockchain,
ap actpool.ActPool,
settleDepositHash hash.Hash256,
) (explorer.SettleDeposit, error) {
pending := false
var selp action.SealedEnvelope
var err error
selp, err = bc.GetActionByActionHash(settleDepositHash)
if err != nil {
// Try to fetch pending settle deposit from actpool
selp, err = ap.GetActionByHash(settleDepositHash)
if err != nil {
return explorer.SettleDeposit{}, err
}
pending = true
}
// Fetch from block
blkHash, err := bc.GetBlockHashByActionHash(settleDepositHash)
if err != nil {
return explorer.SettleDeposit{}, err
}
blk, err := bc.GetBlockByHash(blkHash)
if err != nil {
return explorer.SettleDeposit{}, err
}
sd, err := castActionToSettleDeposit(selp, pending)
if err != nil {
return explorer.SettleDeposit{}, err
}
sd.Timestamp = blk.ConvertToBlockHeaderPb().GetTimestamp().GetSeconds()
sd.BlockID = hex.EncodeToString(blkHash[:])
return sd, nil
}
func castActionToSettleDeposit(selp action.SealedEnvelope, pending bool) (explorer.SettleDeposit, error) {
sd, ok := selp.Action().(*action.SettleDeposit)
if !ok {
return explorer.SettleDeposit{}, errors.Wrap(ErrAction, "action type is not settle deposit")
}
hash := selp.Hash()
settleDeposit := explorer.SettleDeposit{
Nonce: int64(selp.Nonce()),
ID: hex.EncodeToString(hash[:]),
Recipient: sd.Recipient(),
Index: int64(sd.Index()),
Fee: "", // TODO: we need to get the actual fee.
GasLimit: int64(selp.GasLimit()),
IsPending: pending,
}
if sd.Amount() != nil && len(sd.Amount().String()) > 0 {
settleDeposit.Amount = sd.Amount().String()
}
if selp.GasPrice() != nil && len(selp.GasPrice().String()) > 0 {
settleDeposit.GasPrice = selp.GasPrice().String()
}
return settleDeposit, nil
}
func convertTsfToExplorerTsf(selp action.SealedEnvelope, isPending bool) (explorer.Transfer, error) {
transfer, ok := selp.Action().(*action.Transfer)
if !ok {
return explorer.Transfer{}, errors.Wrap(ErrTransfer, "action is not transfer")
}
if transfer == nil {
return explorer.Transfer{}, errors.Wrap(ErrTransfer, "transfer cannot be nil")
}
hash := selp.Hash()
explorerTransfer := explorer.Transfer{
Nonce: int64(selp.Nonce()),
ID: hex.EncodeToString(hash[:]),
Recipient: transfer.Recipient(),
Fee: "", // TODO: we need to get the actual fee.
Payload: hex.EncodeToString(transfer.Payload()),
GasLimit: int64(selp.GasLimit()),
IsCoinbase: false,
IsPending: isPending,
}
if transfer.Amount() != nil && len(transfer.Amount().String()) > 0 {
explorerTransfer.Amount = transfer.Amount().String()
}
if selp.GasPrice() != nil && len(selp.GasPrice().String()) > 0 {
explorerTransfer.GasPrice = selp.GasPrice().String()
}
return explorerTransfer, nil
}
func convertVoteToExplorerVote(selp action.SealedEnvelope, isPending bool) (explorer.Vote, error) {
vote, ok := selp.Action().(*action.Vote)
if !ok {
return explorer.Vote{}, errors.Wrap(ErrTransfer, "action is not vote")
}
if vote == nil {
return explorer.Vote{}, errors.Wrap(ErrVote, "vote cannot be nil")
}
hash := selp.Hash()
voterPubkey := vote.VoterPublicKey()
explorerVote := explorer.Vote{
ID: hex.EncodeToString(hash[:]),
Nonce: int64(selp.Nonce()),
VoterPubKey: keypair.EncodePublicKey(voterPubkey),
Votee: vote.Votee(),
GasLimit: int64(selp.GasLimit()),
GasPrice: selp.GasPrice().String(),
IsPending: isPending,
}
return explorerVote, nil
}
func convertExecutionToExplorerExecution(selp action.SealedEnvelope, isPending bool) (explorer.Execution, error) {
execution, ok := selp.Action().(*action.Execution)
if !ok {
return explorer.Execution{}, errors.Wrap(ErrTransfer, "action is not execution")
}
if execution == nil {
return explorer.Execution{}, errors.Wrap(ErrExecution, "execution cannot be nil")
}
hash := execution.Hash()
explorerExecution := explorer.Execution{
Nonce: int64(selp.Nonce()),
ID: hex.EncodeToString(hash[:]),
Contract: execution.Contract(),
GasLimit: int64(selp.GasLimit()),
Data: hex.EncodeToString(execution.Data()),
IsPending: isPending,
}
if execution.Amount() != nil && len(execution.Amount().String()) > 0 {
explorerExecution.Amount = execution.Amount().String()
}
if selp.GasPrice() != nil && len(selp.GasPrice().String()) > 0 {
explorerExecution.GasPrice = selp.GasPrice().String()
}
return explorerExecution, nil
}
func convertReceiptToExplorerReceipt(receipt *action.Receipt) (explorer.Receipt, error) {
if receipt == nil {
return explorer.Receipt{}, errors.Wrap(ErrReceipt, "receipt cannot be nil")
}
logs := []explorer.Log{}
for _, log := range receipt.Logs {
topics := []string{}
for _, topic := range log.Topics {
topics = append(topics, hex.EncodeToString(topic[:]))
}
logs = append(logs, explorer.Log{
Address: log.Address,
Topics: topics,
Data: hex.EncodeToString(log.Data),
BlockNumber: int64(log.BlockNumber),
TxnHash: hex.EncodeToString(log.TxnHash[:]),
BlockHash: hex.EncodeToString(log.BlockHash[:]),
Index: int64(log.Index),
})
}
return explorer.Receipt{
ReturnValue: hex.EncodeToString(receipt.ReturnValue),
Status: int64(receipt.Status),
Hash: hex.EncodeToString(receipt.ActHash[:]),
GasConsumed: int64(receipt.GasConsumed),
ContractAddress: receipt.ContractAddress,
Logs: logs,
}, nil
}
func convertExplorerExecutionToActionPb(execution *explorer.Execution) (*iproto.ActionPb, error) {
executorPubKey, err := keypair.StringToPubKeyBytes(execution.ExecutorPubKey)
if err != nil {
return nil, err
}
data, err := hex.DecodeString(execution.Data)
if err != nil {
return nil, err
}
signature, err := hex.DecodeString(execution.Signature)
if err != nil {
return nil, err
}
amount, ok := big.NewInt(0).SetString(execution.Amount, 10)
if !ok {
return nil, errors.New("failed to set execution amount")
}
gasPrice, ok := big.NewInt(0).SetString(execution.GasPrice, 10)
if !ok {
return nil, errors.New("failed to set execution gas price")
}
actPb := &iproto.ActionPb{
Action: &iproto.ActionPb_Execution{
Execution: &iproto.ExecutionPb{
Amount: amount.Bytes(),
Contract: execution.Contract,
Data: data,
},
},
Version: uint32(execution.Version),
SenderPubKey: executorPubKey,
Nonce: uint64(execution.Nonce),
GasLimit: uint64(execution.GasLimit),
GasPrice: gasPrice.Bytes(),
Signature: signature,
}
return actPb, nil
}
func convertExplorerTransferToActionPb(tsfJSON *explorer.SendTransferRequest,
maxTransferPayloadBytes uint64) (*iproto.ActionPb, error) {
payload, err := hex.DecodeString(tsfJSON.Payload)
if err != nil {
return nil, err
}
if uint64(len(payload)) > maxTransferPayloadBytes {
return nil, errors.Wrapf(
ErrTransfer,
"transfer payload contains %d bytes, and is longer than %d bytes limit",
len(payload),
maxTransferPayloadBytes,
)
}
senderPubKey, err := keypair.StringToPubKeyBytes(tsfJSON.SenderPubKey)
if err != nil {
return nil, err
}
signature, err := hex.DecodeString(tsfJSON.Signature)
if err != nil {
return nil, err
}
amount, ok := big.NewInt(0).SetString(tsfJSON.Amount, 10)
if !ok {
return nil, errors.New("failed to set transfer amount")
}
gasPrice, ok := big.NewInt(0).SetString(tsfJSON.GasPrice, 10)
if !ok {
return nil, errors.New("failed to set transfer gas price")
}
actPb := &iproto.ActionPb{
Action: &iproto.ActionPb_Transfer{
Transfer: &iproto.TransferPb{
Amount: amount.Bytes(),
Recipient: tsfJSON.Recipient,
Payload: payload,
},
},
Version: uint32(tsfJSON.Version),
SenderPubKey: senderPubKey,
Nonce: uint64(tsfJSON.Nonce),
GasLimit: uint64(tsfJSON.GasLimit),
GasPrice: gasPrice.Bytes(),
Signature: signature,
}
return actPb, nil
}
| 1 | 15,052 | invalid operation: exp (variable of type *Service) has no field or method getAddressDetails (from `typecheck`) | iotexproject-iotex-core | go |
@@ -4479,7 +4479,8 @@ func TestKBFSOpsBackgroundFlush(t *testing.T) {
// start the background flusher
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
- go ops.backgroundFlusher(1 * time.Millisecond)
+ config.SetBGFlushPeriod(1 * time.Millisecond)
+ go ops.backgroundFlusher()
// Make sure we get the notification
select { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"bytes"
"fmt"
"math/rand"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-codec/codec"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfshash"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
type CheckBlockOps struct {
BlockOps
tr gomock.TestReporter
}
var _ BlockOps = (*CheckBlockOps)(nil)
func (cbo *CheckBlockOps) Ready(ctx context.Context, kmd KeyMetadata,
block Block) (id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData,
err error) {
id, plainSize, readyBlockData, err = cbo.BlockOps.Ready(ctx, kmd, block)
encodedSize := readyBlockData.GetEncodedSize()
if plainSize > encodedSize {
cbo.tr.Errorf("expected plainSize <= encodedSize, got plainSize = %d, "+
"encodedSize = %d", plainSize, encodedSize)
}
return
}
type tCtxIDType int
const (
tCtxID tCtxIDType = iota
)
// Time out individual tests after 10 seconds.
var individualTestTimeout = 10 * time.Second
func kbfsOpsInit(t *testing.T, changeMd bool) (mockCtrl *gomock.Controller,
config *ConfigMock, ctx context.Context, cancel context.CancelFunc) {
ctr := NewSafeTestReporter(t)
mockCtrl = gomock.NewController(ctr)
config = NewConfigMock(mockCtrl, ctr)
config.SetCodec(kbfscodec.NewMsgpack())
blockops := &CheckBlockOps{config.mockBops, ctr}
config.SetBlockOps(blockops)
kbfsops := NewKBFSOpsStandard(config)
config.SetKBFSOps(kbfsops)
config.SetNotifier(kbfsops)
// Use real caches, to avoid the overhead of tracking cache calls.
// Each test is expected to check the cache for correctness at the
// end of the test.
config.SetBlockCache(NewBlockCacheStandard(100, 1<<30))
config.SetDirtyBlockCache(NewDirtyBlockCacheStandard(wallClock{},
config.MakeLogger(""), 5<<20, 10<<20, 5<<20))
config.mockBcache = nil
config.mockDirtyBcache = nil
if changeMd {
// Give different values for the MD Id so we can test that it
// is properly cached
config.mockCrypto.EXPECT().MakeMdID(gomock.Any()).AnyTimes().
Return(fakeMdID(2), nil)
} else {
config.mockCrypto.EXPECT().MakeMdID(gomock.Any()).AnyTimes().
Return(fakeMdID(1), nil)
}
// These tests don't rely on external notifications at all, so ignore any
// goroutine attempting to register:
c := make(chan error, 1)
config.mockMdserv.EXPECT().RegisterForUpdate(gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return(c, nil)
config.mockMdserv.EXPECT().OffsetFromServerTime().
Return(time.Duration(0), true).AnyTimes()
// None of these tests depend on time
config.mockClock.EXPECT().Now().AnyTimes().Return(time.Now())
// Ignore Notify calls for now
config.mockRep.EXPECT().Notify(gomock.Any(), gomock.Any()).AnyTimes()
// Max out MaxPtrsPerBlock
config.mockBsplit.EXPECT().MaxPtrsPerBlock().
Return(int((^uint(0)) >> 1)).AnyTimes()
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore Archive calls for now
config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// Ignore Prefetcher calls
brc := &testBlockRetrievalConfig{nil, newTestLogMaker(t),
config.BlockCache(), nil, newTestDiskBlockCacheGetter(t, nil)}
pre := newBlockPrefetcher(nil, brc)
config.mockBops.EXPECT().Prefetcher().AnyTimes().Return(pre)
// Ignore BlockRetriever calls
brq := newBlockRetrievalQueue(0, brc)
config.mockBops.EXPECT().BlockRetriever().AnyTimes().Return(brq)
// Ignore key bundle ID creation calls for now
config.mockCrypto.EXPECT().MakeTLFWriterKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFWriterKeyBundleID{}, nil)
config.mockCrypto.EXPECT().MakeTLFReaderKeyBundleID(gomock.Any()).
AnyTimes().Return(TLFReaderKeyBundleID{}, nil)
// Ignore favorites
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).AnyTimes().
Return(nil, nil)
config.mockKbpki.EXPECT().FavoriteAdd(gomock.Any(), gomock.Any()).
AnyTimes().Return(nil)
interposeDaemonKBPKI(config, "alice", "bob", "charlie")
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
initSuccess := false
defer func() {
if !initSuccess {
cancel()
}
}()
// make the context identifiable, to verify that it is passed
// correctly to the observer
id := rand.Int()
ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(ctx context.Context) context.Context {
return context.WithValue(ctx, tCtxID, id)
}))
if err != nil {
t.Fatal(err)
}
initSuccess = true
return mockCtrl, config, ctx, cancel
}
func kbfsTestShutdown(mockCtrl *gomock.Controller, config *ConfigMock,
ctx context.Context, cancel context.CancelFunc) {
config.ctr.CheckForFailures()
config.KBFSOps().(*KBFSOpsStandard).Shutdown(ctx)
if config.mockDirtyBcache == nil {
if err := config.DirtyBlockCache().Shutdown(); err != nil {
// Ignore error; some tests intentionally leave around dirty data.
}
}
cancel()
if err := CleanupCancellationDelayer(ctx); err != nil {
panic(err)
}
mockCtrl.Finish()
}
// kbfsOpsInitNoMocks returns a config that doesn't use any mocks. The
// shutdown call is kbfsTestShutdownNoMocks.
func kbfsOpsInitNoMocks(t *testing.T, users ...libkb.NormalizedUsername) (
*ConfigLocal, keybase1.UID, context.Context, context.CancelFunc) {
config := MakeTestConfigOrBust(t, users...)
config.SetRekeyWithPromptWaitTime(individualTestTimeout)
timeoutCtx, cancel := context.WithTimeout(
context.Background(), individualTestTimeout)
initSuccess := false
defer func() {
if !initSuccess {
cancel()
}
}()
ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
timeoutCtx, func(c context.Context) context.Context {
return c
}))
if err != nil {
t.Fatal(err)
}
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
t.Fatal(err)
}
initSuccess = true
return config, session.UID, ctx, cancel
}
func kbfsTestShutdownNoMocks(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
CheckConfigAndShutdown(ctx, t, config)
cancel()
CleanupCancellationDelayer(ctx)
}
// TODO: Get rid of all users of this.
func kbfsTestShutdownNoMocksNoCheck(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
config.Shutdown(ctx)
cancel()
CleanupCancellationDelayer(ctx)
}
func checkBlockCache(t *testing.T, config *ConfigMock, id tlf.ID,
expectedCleanBlocks []kbfsblock.ID,
expectedDirtyBlocks map[BlockPointer]BranchName) {
bcache := config.BlockCache().(*BlockCacheStandard)
// make sure the LRU consists of exactly the right set of clean blocks
for _, id := range expectedCleanBlocks {
_, ok := bcache.cleanTransient.Get(id)
if !ok {
t.Errorf("BlockCache missing clean block %v at the end of the test",
id)
}
}
if bcache.cleanTransient.Len() != len(expectedCleanBlocks) {
t.Errorf("BlockCache has extra clean blocks at end of test")
}
// make sure the dirty cache consists of exactly the right set of
// dirty blocks
dirtyBcache := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
for ptr, branch := range expectedDirtyBlocks {
_, err := dirtyBcache.Get(id, ptr, branch)
if err != nil {
t.Errorf("BlockCache missing dirty block %v, branch %s at "+
"the end of the test: err %+v", ptr, branch, err)
}
if !dirtyBcache.IsDirty(id, ptr, branch) {
t.Errorf("BlockCache has incorrectly clean block %v, branch %s at "+
"the end of the test: err %+v", ptr, branch, err)
}
}
if len(dirtyBcache.cache) != len(expectedDirtyBlocks) {
t.Errorf("BlockCache has extra dirty blocks at end of test")
}
}
func TestKBFSOpsGetFavoritesSuccess(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
handle1 := parseTlfHandleOrBust(t, config, "alice", false)
handle2 := parseTlfHandleOrBust(t, config, "alice,bob", false)
// dup for testing
handles := []*TlfHandle{handle1, handle2, handle2}
for _, h := range handles {
config.KeybaseService().FavoriteAdd(
context.Background(), h.ToFavorite().toKBFolder(false))
}
// The favorites list contains our own public dir by default, even
// if KBPKI doesn't return it.
handle3 := parseTlfHandleOrBust(t, config, "alice", true)
handles = append(handles, handle3)
handles2, err := config.KBFSOps().GetFavorites(ctx)
if err != nil {
t.Errorf("Got error on favorites: %+v", err)
}
if len(handles2) != len(handles)-1 {
t.Errorf("Got bad handles back: %v", handles2)
}
}
func TestKBFSOpsGetFavoritesFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
err := errors.New("Fake fail")
// Replace the old one (added in init function)
config.mockKbpki = NewMockKBPKI(mockCtrl)
config.SetKBPKI(config.mockKbpki)
// expect one call to favorites, and fail it
config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).Return(nil, err)
if _, err2 := config.KBFSOps().GetFavorites(ctx); err2 != err {
t.Errorf("Got bad error on favorites: %+v", err2)
}
}
func getOps(config Config, id tlf.ID) *folderBranchOps {
return config.KBFSOps().(*KBFSOpsStandard).
getOpsNoAdd(FolderBranch{id, MasterBranch})
}
// createNewRMD creates a new RMD for the given name. Returns its ID
// and handle also.
func createNewRMD(t *testing.T, config Config, name string, public bool) (
tlf.ID, *TlfHandle, *RootMetadata) {
id := tlf.FakeID(1, public)
h := parseTlfHandleOrBust(t, config, name, public)
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
return id, h, rmd
}
func makeImmutableRMDForTest(t *testing.T, config Config, rmd *RootMetadata,
mdID MdID) ImmutableRootMetadata {
session, err := config.KBPKI().GetCurrentSession(context.Background())
require.NoError(t, err)
// We have to fake out the signature here because most tests
// in this file modify the returned value, invalidating any
// real signatures. TODO: Fix all the tests in this file to
// not do so, and then just use MakeImmutableRootMetadata.
if brmdv2, ok := rmd.bareMd.(*BareRootMetadataV2); ok {
vk := brmdv2.WriterMetadataSigInfo.VerifyingKey
require.True(t, vk == (kbfscrypto.VerifyingKey{}) || vk == session.VerifyingKey,
"Writer signature %s with unexpected non-nil verifying key != %s",
brmdv2.WriterMetadataSigInfo, session.VerifyingKey)
brmdv2.WriterMetadataSigInfo = kbfscrypto.SignatureInfo{
VerifyingKey: session.VerifyingKey,
}
}
return MakeImmutableRootMetadata(rmd, session.VerifyingKey, mdID, time.Now())
}
// injectNewRMD creates a new RMD and makes sure the existing ops for
// its ID has as its head that RMD.
func injectNewRMD(t *testing.T, config *ConfigMock) (
keybase1.UID, tlf.ID, *RootMetadata) {
id, h, rmd := createNewRMD(t, config, "alice", false)
var keyGen KeyGen
if id.IsPublic() {
keyGen = PublicKeyGen
} else {
keyGen = 1
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
KeyGen: keyGen,
DataVer: 1,
},
EncodedSize: 1,
},
}
rmd.fakeInitialRekey()
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(
t, config, rmd, fakeMdID(tlf.FakeIDByte(id)))
ops.headStatus = headTrusted
rmd.SetSerializedPrivateMetadata(make([]byte, 1))
config.Notifier().RegisterForChanges(
[]FolderBranch{{id, MasterBranch}}, config.observer)
uid := h.FirstResolvedWriter()
rmd.data.Dir.Creator = uid
return uid, id, rmd
}
func TestKBFSOpsGetRootNodeCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
func TestKBFSOpsGetRootNodeReIdentify(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.False(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
assert.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
assert.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
assert.Equal(t, rmd.GetTlfHandle(), h)
// Trigger identify.
lState := makeFBOLockState()
_, err = ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
// Mark everything for reidentifying, and wait for it to finish
// before checking.
kop := config.KBFSOps().(*KBFSOpsStandard)
returnCh := make(chan struct{})
kop.reIdentifyControlChan <- returnCh
<-returnCh
assert.False(t, fboIdentityDone(ops))
// Trigger new identify.
lState = makeFBOLockState()
_, err = ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
}
// fboIdentityDone is needed to avoid data races.
func fboIdentityDone(fbo *folderBranchOps) bool {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
return fbo.identifyDone
}
type failIdentifyKBPKI struct {
KBPKI
identifyErr error
}
func (kbpki failIdentifyKBPKI) Identify(ctx context.Context, assertion, reason string) (UserInfo, error) {
return UserInfo{}, kbpki.identifyErr
}
func TestKBFSOpsGetRootNodeCacheIdentifyFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
_, id, rmd := injectNewRMD(t, config)
rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
rmd.data.Dir.Type = Dir
ops := getOps(config, id)
expectedErr := errors.New("Identify failure")
config.SetKBPKI(failIdentifyKBPKI{config.KBPKI(), expectedErr})
// Trigger identify.
lState := makeFBOLockState()
_, err := ops.getMDForReadLocked(ctx, lState, mdReadNeedIdentify)
assert.Equal(t, expectedErr, err)
assert.False(t, fboIdentityDone(ops))
}
func expectBlock(config *ConfigMock, kmd KeyMetadata, blockPtr BlockPointer, block Block, err error) {
config.mockBops.EXPECT().Get(gomock.Any(), kmdMatcher{kmd},
ptrMatcher{blockPtr}, gomock.Any(), gomock.Any()).
Do(func(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer, getBlock Block, lifetime BlockCacheLifetime) {
getBlock.Set(block)
config.BlockCache().Put(blockPtr, kmd.TlfID(), getBlock, lifetime)
}).Return(err)
}
// ptrMatcher implements the gomock.Matcher interface to compare
// BlockPointer objects. We don't care about some of the fields in a
// pointer for the purposes of these tests.
type ptrMatcher struct {
ptr BlockPointer
}
// Matches implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) Matches(x interface{}) bool {
xPtr, ok := x.(BlockPointer)
if !ok {
return false
}
return (xPtr.ID == p.ptr.ID && xPtr.RefNonce == p.ptr.RefNonce)
}
// String implements the Matcher interface for ptrMatcher.
func (p ptrMatcher) String() string {
return fmt.Sprintf("Matches BlockPointer %v", p.ptr)
}
func fillInNewMD(t *testing.T, config *ConfigMock, rmd *RootMetadata) {
if !rmd.TlfID().IsPublic() {
rmd.fakeInitialRekey()
}
rootPtr := BlockPointer{
ID: kbfsblock.FakeID(42),
KeyGen: 1,
DataVer: 1,
}
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: rootPtr,
EncodedSize: 5,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 3,
},
}
return
}
func testKBFSOpsGetRootNodeCreateNewSuccess(t *testing.T, public bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", public)
fillInNewMD(t, config, rmd)
// create a new MD
config.mockMdops.EXPECT().GetUnmergedForTLF(
gomock.Any(), id, gomock.Any()).Return(ImmutableRootMetadata{}, nil)
irmd := makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
config.mockMdops.EXPECT().GetForTLF(gomock.Any(), id).Return(irmd, nil)
config.mockMdcache.EXPECT().Put(irmd).Return(nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
n, ei, h, err := ops.getRootNode(ctx)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
require.Equal(t, id, p.Tlf)
require.Equal(t, 1, len(p.path))
require.Equal(t, rmd.data.Dir.ID, p.path[0].ID)
require.Equal(t, rmd.data.Dir.EntryInfo, ei)
require.Equal(t, rmd.GetTlfHandle(), h)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPublic(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, true)
}
func TestKBFSOpsGetRootNodeCreateNewSuccessPrivate(t *testing.T) {
testKBFSOpsGetRootNodeCreateNewSuccess(t, false)
}
func TestKBFSOpsGetRootMDForHandleExisting(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
rmd.data.Dir = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: BlockPointer{
ID: kbfsblock.FakeID(1),
},
EncodedSize: 15,
},
EntryInfo: EntryInfo{
Type: Dir,
Size: 10,
Mtime: 1,
Ctime: 2,
},
}
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Unmerged).Return(
tlf.ID{}, ImmutableRootMetadata{}, nil)
config.mockMdops.EXPECT().GetForHandle(gomock.Any(), h, Merged).Return(
tlf.ID{}, makeImmutableRMDForTest(t, config, rmd, fakeMdID(1)), nil)
ops := getOps(config, id)
assert.False(t, fboIdentityDone(ops))
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(2))
ops.headStatus = headTrusted
n, ei, err :=
config.KBFSOps().GetOrCreateRootNode(ctx, h, MasterBranch)
require.NoError(t, err)
assert.True(t, fboIdentityDone(ops))
p := ops.nodeCache.PathFromNode(n)
if p.Tlf != id {
t.Errorf("Got bad dir id back: %v", p.Tlf)
} else if len(p.path) != 1 {
t.Errorf("Got bad MD back: path size %d", len(p.path))
} else if p.path[0].ID != rmd.data.Dir.ID {
t.Errorf("Got bad MD back: root ID %v", p.path[0].ID)
} else if ei.Type != Dir {
t.Error("Got bad MD non-dir rootID back")
} else if ei.Size != 10 {
t.Errorf("Got bad MD Size back: %d", ei.Size)
} else if ei.Mtime != 1 {
t.Errorf("Got bad MD MTime back: %d", ei.Mtime)
} else if ei.Ctime != 2 {
t.Errorf("Got bad MD CTime back: %d", ei.Ctime)
}
}
// rmd should really be a ReadOnlyRootMetadata or *BareRootMetadata in
// the helper functions below, but all the callers would have to go
// md.ReadOnly(), which doesn't buy us much in tests.
func makeBP(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID) BlockPointer {
return BlockPointer{
ID: id,
KeyGen: kmd.LatestKeyGeneration(),
DataVer: DefaultNewBlockDataVersion(false),
Context: kbfsblock.Context{
Creator: u,
// Refnonces not needed; explicit refnonce
// testing happens elsewhere.
},
}
}
func makeBI(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32) BlockInfo {
return BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
}
}
func makeIFP(id kbfsblock.ID, kmd KeyMetadata, config Config,
u keybase1.UID, encodedSize uint32, off int64) IndirectFilePtr {
return IndirectFilePtr{
BlockInfo{
BlockPointer: makeBP(id, kmd, config, u),
EncodedSize: encodedSize,
},
off,
false,
codec.UnknownFieldSetHandler{},
}
}
func makeBIFromID(id kbfsblock.ID, user keybase1.UID) BlockInfo {
return BlockInfo{
BlockPointer: BlockPointer{
ID: id, KeyGen: 1, DataVer: 1,
Context: kbfsblock.Context{
Creator: user,
},
},
EncodedSize: 1,
}
}
func nodeFromPath(t *testing.T, ops *folderBranchOps, p path) Node {
var prevNode Node
// populate the node cache with all the nodes we'll need
for _, pathNode := range p.path {
n, err := ops.nodeCache.GetOrCreate(pathNode.BlockPointer,
pathNode.Name, prevNode)
if err != nil {
t.Fatal(err)
}
prevNode = n
}
return prevNode
}
func testPutBlockInCache(
t *testing.T, config *ConfigMock, ptr BlockPointer, id tlf.ID,
block Block) {
err := config.BlockCache().Put(ptr, id, block, TransientEntry)
require.NoError(t, err)
if config.mockBcache != nil {
config.mockBcache.EXPECT().Get(ptr).AnyTimes().Return(block, nil)
}
}
func TestKBFSOpsGetBaseDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: File}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Dir}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
testPutBlockInCache(t, config, node.BlockPointer, id, dirBlock)
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %+v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, blockPtr, dirBlock, nil)
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err != nil {
t.Errorf("Got error on getdir: %+v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailNonReader(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id := tlf.FakeID(1, false)
h := parseTlfHandleOrBust(t, config, "bob#alice", false)
// Hack around access check in ParseTlfHandle.
h.resolvedReaders = nil
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
t.Fatal(err)
}
rootID := kbfsblock.FakeID(42)
node := pathNode{makeBP(rootID, rmd, config, session.UID), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
// won't even try getting the block if the user isn't a reader
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
expectedErr := NewReadAccessError(h, "alice", "/keybase/private/bob#alice")
if _, err := config.KBFSOps().GetDirChildren(ctx, n); err == nil {
t.Errorf("Got no expected error on getdir")
} else if err != expectedErr {
t.Errorf("Got unexpected error on root MD: %+v", err)
}
}
func TestKBFSOpsGetBaseDirChildrenUncachedFailMissingBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
dirBlock := NewDirBlock().(*DirBlock)
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key, then
// fail block fetch
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, blockPtr, dirBlock, err)
if _, err2 := config.KBFSOps().GetDirChildren(ctx, n); err2 == nil {
t.Errorf("Got no expected error on getdir")
} else if err2 != err {
t.Errorf("Got unexpected error on root MD: %+v", err)
}
}
func TestKBFSOpsGetNestedDirChildrenCacheSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: Exec}}
dirBlock.Children["b"] = DirEntry{EntryInfo: EntryInfo{Type: Sym}}
blockPtr := makeBP(rootID, rmd, config, u)
rmd.data.Dir.BlockPointer = blockPtr
node := pathNode{blockPtr, "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, bNode.BlockPointer, id, dirBlock)
children, err := config.KBFSOps().GetDirChildren(ctx, n)
if err != nil {
t.Errorf("Got error on getdir: %+v", err)
} else if len(children) != 2 {
t.Errorf("Got bad children back: %v", children)
}
for c, ei := range children {
if de, ok := dirBlock.Children[c]; !ok {
t.Errorf("No such child: %s", c)
} else if de.EntryInfo != ei {
t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
}
}
}
func TestKBFSOpsLookupSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %+v", err)
}
bPath := ops.nodeCache.PathFromNode(bn)
expectedBNode := pathNode{makeBP(bID, rmd, config, u), "b"}
expectedBNode.KeyGen = 1
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bPath.path[2] != expectedBNode {
t.Errorf("Bad path node after lookup: %v vs %v",
bPath.path[2], expectedBNode)
}
}
func TestKBFSOpsLookupSymlinkSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Sym,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
bn, ei, err := config.KBFSOps().Lookup(ctx, n, "b")
if err != nil {
t.Errorf("Error on Lookup: %+v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Lookup returned a bad directory entry: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
} else if bn != nil {
t.Errorf("Node for symlink is not nil: %v", bn)
}
}
func TestKBFSOpsLookupNoSuchNameFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := NoSuchNameError{"c"}
_, _, err := config.KBFSOps().Lookup(ctx, n, "c")
if err == nil {
t.Error("No error as expected on Lookup")
} else if err != expectedErr {
t.Errorf("Unexpected error after bad Lookup: %+v", err)
}
}
func TestKBFSOpsLookupNewDataVersionFail(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
bInfo := makeBIFromID(bID, u)
bInfo.DataVer = 10
dirBlock.Children["b"] = DirEntry{
BlockInfo: bInfo,
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
expectedErr := &NewDataVersionError{
path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}},
bInfo.DataVer,
}
_, _, err := config.KBFSOps().Lookup(ctx, n, "b")
if err == nil {
t.Error("No expected error found on lookup")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Unexpected error after bad lookup: %+v", err)
}
}
func TestKBFSOpsStatSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
bID := kbfsblock.FakeID(44)
dirBlock := NewDirBlock().(*DirBlock)
dirBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
bNode := pathNode{makeBP(bID, rmd, config, u), "b"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, bNode}}
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
ei, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %+v", err)
}
if ei != dirBlock.Children["b"].EntryInfo {
t.Errorf("Stat returned a bad entry info: %v vs %v",
ei, dirBlock.Children["b"].EntryInfo)
}
}
type shimMDOps struct {
isUnmerged bool
codec kbfscodec.Codec
crypto cryptoPure
kbpki KBPKI
MDOps
}
func (s shimMDOps) Put(ctx context.Context, rmd *RootMetadata) (MdID, error) {
if s.isUnmerged {
return MdID{}, MDServerErrorConflictRevision{}
}
rmd.SetSerializedPrivateMetadata([]byte{0x1})
session, err := s.kbpki.GetCurrentSession(ctx)
if err != nil {
return MdID{}, err
}
signingKey := MakeLocalUserSigningKeyOrBust(session.Name)
err = rmd.bareMd.SignWriterMetadataInternally(
ctx, s.codec, kbfscrypto.SigningKeySigner{Key: signingKey})
if err != nil {
return MdID{}, err
}
return s.crypto.MakeMdID(rmd.bareMd)
}
func (s shimMDOps) PutUnmerged(ctx context.Context, rmd *RootMetadata) (MdID, error) {
if !s.isUnmerged {
panic("Unexpected PutUnmerged call")
}
rmd.SetSerializedPrivateMetadata([]byte{0x2})
session, err := s.kbpki.GetCurrentSession(ctx)
if err != nil {
return MdID{}, err
}
signingKey := MakeLocalUserSigningKeyOrBust(session.Name)
err = rmd.bareMd.SignWriterMetadataInternally(
ctx, s.codec, kbfscrypto.SigningKeySigner{Key: signingKey})
if err != nil {
return MdID{}, err
}
return s.crypto.MakeMdID(rmd.bareMd)
}
func expectSyncBlockHelper(
t *testing.T, config *ConfigMock, lastCall *gomock.Call,
uid keybase1.UID, id tlf.ID, name string, p path, kmd KeyMetadata,
newEntry bool, skipSync int, refBytes uint64, unrefBytes uint64,
newRmd *ImmutableRootMetadata, newBlockIDs []kbfsblock.ID, isUnmerged bool) (
path, *gomock.Call) {
// construct new path
newPath := path{
FolderBranch{Tlf: id},
make([]pathNode, 0, len(p.path)+1),
}
for _, node := range p.path {
newPath.path = append(newPath.path, pathNode{Name: node.Name})
}
if newEntry {
// one for the new entry
newPath.path = append(newPath.path, pathNode{Name: name})
}
// all MD is embedded for now
config.mockBsplit.EXPECT().ShouldEmbedBlockChanges(gomock.Any()).
AnyTimes().Return(true)
// By convention for these tests, the old blocks along the path
// all have EncodedSize == 1.
unrefBytes += uint64(len(p.path) * 1)
config.mockBops.EXPECT().GetEncodedSize(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(uint32(1), nil)
lastID := p.tailPointer().ID
for i := len(newPath.path) - 1; i >= skipSync; i-- {
newID := kbfsblock.FakeIDMul(lastID, 2)
newBuf := []byte{byte(i)}
refBytes += uint64(len(newBuf))
lastID = newID
readyBlockData := ReadyBlockData{
buf: newBuf,
}
call := config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{kmd},
gomock.Any()).Return(newID, len(newBuf), readyBlockData, nil)
if lastCall != nil {
call = call.After(lastCall)
}
lastCall = call
newPath.path[i].ID = newID
newBlockIDs[i] = newID
config.mockBserv.EXPECT().Put(gomock.Any(), kmd.TlfID(), newID,
gomock.Any(), readyBlockData.buf, readyBlockData.serverHalf).
Return(nil)
}
if skipSync == 0 {
// sign the MD and put it
oldMDOps := config.MDOps()
if oldShim, ok := oldMDOps.(shimMDOps); ok {
if oldShim.isUnmerged != isUnmerged {
t.Fatal("old shim with different isUnmerged")
}
} else {
mdOps := shimMDOps{
isUnmerged,
config.Codec(),
config.Crypto(),
config.KBPKI(),
oldMDOps,
}
config.SetMDOps(mdOps)
}
config.mockMdcache.EXPECT().Put(gomock.Any()).
Do(func(rmd ImmutableRootMetadata) {
*newRmd = rmd
// Check that the ref bytes are correct.
if rmd.RefBytes() != refBytes {
t.Errorf("Unexpected refbytes: %d vs %d",
rmd.RefBytes(), refBytes)
}
if rmd.UnrefBytes() != unrefBytes {
t.Errorf("Unexpected unrefbytes: %d vs %d",
rmd.UnrefBytes(), unrefBytes)
}
}).Return(nil)
}
return newPath, lastCall
}
func expectSyncBlock(
t *testing.T, config *ConfigMock, lastCall *gomock.Call,
uid keybase1.UID, id tlf.ID, name string, p path, kmd KeyMetadata,
newEntry bool, skipSync int, refBytes uint64, unrefBytes uint64,
newRmd *ImmutableRootMetadata, newBlockIDs []kbfsblock.ID) (path, *gomock.Call) {
return expectSyncBlockHelper(t, config, lastCall, uid, id, name, p, kmd,
newEntry, skipSync, refBytes, unrefBytes, newRmd, newBlockIDs, false)
}
func expectSyncBlockUnmerged(
t *testing.T, config *ConfigMock, lastCall *gomock.Call,
uid keybase1.UID, id tlf.ID, name string, p path, kmd KeyMetadata,
newEntry bool, skipSync int, refBytes uint64, unrefBytes uint64,
newRmd *ImmutableRootMetadata, newBlockIDs []kbfsblock.ID) (path, *gomock.Call) {
return expectSyncBlockHelper(t, config, lastCall, uid, id, name, p, kmd,
newEntry, skipSync, refBytes, unrefBytes, newRmd, newBlockIDs, true)
}
func getBlockFromCache(t *testing.T, config Config, id tlf.ID, ptr BlockPointer,
branch BranchName) Block {
if block, err := config.DirtyBlockCache().Get(id, ptr, branch); err == nil {
return block
}
block, err := config.BlockCache().Get(ptr)
if err != nil {
t.Errorf("Couldn't find block %v, branch %s in the cache after test: "+
"%+v", ptr, branch, err)
return nil
}
return block
}
func getDirBlockFromCache(t *testing.T, config Config, id tlf.ID,
ptr BlockPointer, branch BranchName) *DirBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
dblock, ok := block.(*DirBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a DirBlock", ptr, branch)
}
return dblock
}
func getFileBlockFromCache(t *testing.T, config Config, id tlf.ID,
ptr BlockPointer, branch BranchName) *FileBlock {
block := getBlockFromCache(t, config, id, ptr, branch)
fblock, ok := block.(*FileBlock)
if !ok {
t.Errorf("Cached block %v, branch %s was not a FileBlock", ptr, branch)
}
return fblock
}
func checkNewPath(t *testing.T, ctx context.Context, config Config,
newPath path, expectedPath path, rmd ReadOnlyRootMetadata, blocks []kbfsblock.ID,
entryType EntryType, newName string, rename bool) {
// TODO: check that the observer updates match the expectedPath as
// well (but need to handle the rename case where there can be
// multiple updates). For now, just check that there's at least
// one update.
if len(config.(*ConfigMock).observer.batchChanges) < 1 {
t.Errorf("No batch notifications sent, at least one expected")
}
if ctx.Value(tCtxID) != config.(*ConfigMock).observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in batch notify: %v",
config.(*ConfigMock).observer.ctx.Value(tCtxID))
}
if len(newPath.path) != len(expectedPath.path) {
t.Errorf("Unexpected new path length: %d", len(newPath.path))
return
}
if newPath.Tlf != expectedPath.Tlf {
t.Errorf("Unexpected topdir in new path: %s",
newPath.Tlf)
}
// check all names and IDs
for i, node := range newPath.path {
eNode := expectedPath.path[i]
if node.ID != eNode.ID {
t.Errorf("Wrong id on new path[%d]: %v vs. %v", i, node, eNode)
}
if node.Name != eNode.Name {
t.Errorf("Wrong name on new path[%d]: %v vs. %v", i, node, eNode)
}
}
// all the entries should point correctly and have the right times set
currDe := rmd.data.Dir
for i, id := range blocks {
var timeSet bool
if newName != "" {
// only the last 2 nodes should have their times changed
timeSet = i > len(blocks)-3
} else {
// only the last node should have its times changed
timeSet = i > len(blocks)-2
}
// for a rename, the last entry only changes ctime
if (!rename || i != len(blocks)-1) && (currDe.Mtime != 0) != timeSet {
t.Errorf("mtime was wrong (%d): %d", i, currDe.Mtime)
}
if (currDe.Ctime != 0) != timeSet {
t.Errorf("ctime was wrong (%d): %d", i, currDe.Ctime)
}
if i < len(expectedPath.path) {
eID := expectedPath.path[i].ID
if currDe.ID != eID {
t.Errorf("Entry does not point to %v, but to %v",
eID, currDe.ID)
}
}
if i < len(blocks)-1 {
var nextName string
if i+1 >= len(expectedPath.path) {
// new symlinks don't have an entry in the path
nextName = newName
} else {
nextName = expectedPath.path[i+1].Name
}
// TODO: update BlockPointer for refnonces when we start deduping
dblock := getDirBlockFromCache(t, config, newPath.Tlf,
makeBP(id, rmd.RootMetadata, config, rmd.data.Dir.Creator), newPath.Branch)
nextDe, ok := dblock.Children[nextName]
if !ok {
t.Errorf("No entry (%d) for %s", i, nextName)
}
currDe = nextDe
} else if newName != "" {
if currDe.Type != entryType {
t.Errorf("New entry has wrong type %s, expected %s",
currDe.Type, entryType)
}
}
if (currDe.Type != File && currDe.Type != Exec) && currDe.Size == 0 {
t.Errorf("Type %s unexpectedly has 0 size (%d)", currDe.Type, i)
}
}
}
func checkBPs(t *testing.T, bps []BlockPointer, expectedBPs []BlockPointer,
kind string) {
if len(expectedBPs) != len(bps) {
t.Errorf("Unexpected %s size: %d vs %d",
kind, len(bps), len(expectedBPs))
}
for _, ptr := range expectedBPs {
found := false
for _, ptr2 := range bps {
if ptr == ptr2 {
found = true
break
}
}
if !found {
t.Errorf("Missing expected %s block: %v", kind, ptr)
}
}
}
func checkOp(t *testing.T, op OpCommon, refs []BlockPointer,
unrefs []BlockPointer, updates []blockUpdate) {
checkBPs(t, op.RefBlocks, refs, "Refs")
checkBPs(t, op.UnrefBlocks, unrefs, "Unrefs")
if len(updates) != len(op.Updates) {
t.Errorf("Unexpected updates size: %d vs %d",
len(op.Updates), len(updates))
}
for _, up := range updates {
found := false
for _, up2 := range op.Updates {
if up == up2 {
found = true
break
}
}
if !found {
t.Errorf("Missing expected block update: %v", up)
}
}
}
func testCreateEntrySuccess(t *testing.T, entryType EntryType) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
t.Log("Setup RootMetadata")
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
t.Log("Create a/b")
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
t.Log("Sync block")
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 3)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "b", p, rmd,
entryType != Sym, 0, 0, 0, &newRmd, blocks)
var newN Node
var err error
switch entryType {
case File:
id := kbfsblock.FakeID(100)
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id, nil)
newN, _, err = config.KBFSOps().CreateFile(ctx, n, "b", false, NoExcl)
case Exec:
id := kbfsblock.FakeID(100)
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id, nil)
newN, _, err = config.KBFSOps().CreateFile(ctx, n, "b", true, NoExcl)
case Dir:
id := kbfsblock.FakeID(100)
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id, nil)
newN, _, err = config.KBFSOps().CreateDir(ctx, n, "b")
case Sym:
_, err = config.KBFSOps().CreateLink(ctx, n, "b", "c")
newN = n
}
newP := ops.nodeCache.PathFromNode(newN)
if err != nil {
t.Errorf("Got error on create: %+v", err)
}
require.NotNil(t, newRmd)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
entryType, "b", false)
b1 := getDirBlockFromCache(t, config, id, newP.path[1].BlockPointer,
newP.Branch)
if entryType == Sym {
de := b1.Children["b"]
if de.Type != Sym {
t.Error("Entry is not a symbolic link")
}
if de.SymPath != "c" {
t.Errorf("Symbolic path points to the wrong thing: %s", de.SymPath)
}
blocks = blocks[:len(blocks)-1] // discard fake block for symlink
} else if entryType != Dir {
de := b1.Children["b"]
if de.Size != 0 {
t.Errorf("New file has non-zero size: %d", de.Size)
}
}
t.Log("Check block cache")
checkBlockCache(t, config, id, append(blocks, rootID, aID), nil)
// make sure the createOp is correct
co, ok := newRmd.data.Changes.Ops[0].(*createOp)
if !ok {
t.Errorf("Couldn't find the createOp")
}
var refBlocks []BlockPointer
if entryType != Sym {
refBlocks = append(refBlocks, newP.path[2].BlockPointer)
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
t.Log("Check op")
checkOp(t, co.OpCommon, refBlocks, nil, updates)
dirUpdate := blockUpdate{rootBlock.Children["a"].BlockPointer,
newP.path[1].BlockPointer}
if co.Dir != dirUpdate {
t.Errorf("Incorrect dir update in op: %v vs. %v", co.Dir, dirUpdate)
} else if co.NewName != "b" {
t.Errorf("Incorrect name in op: %v", co.NewName)
} else if co.Type != entryType {
t.Errorf("Incorrect entry type in op: %v", co.Type)
}
}
func TestKBFSOpsCreateDirSuccess(t *testing.T) {
testCreateEntrySuccess(t, Dir)
}
func TestKBFSOpsCreateFileSuccess(t *testing.T) {
testCreateEntrySuccess(t, File)
}
func TestKBFSOpsCreateExecFileSuccess(t *testing.T) {
testCreateEntrySuccess(t, Exec)
}
func TestKBFSOpsCreateLinkSuccess(t *testing.T) {
testCreateEntrySuccess(t, Sym)
}
func testCreateEntryFailDupName(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// creating "a", which already exists in the root block
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameExistsError{"a"}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, "a")
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, "a", "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, true)
}
func TestCreateLinkFailDupName(t *testing.T) {
testCreateEntryFailDupName(t, false)
}
func testCreateEntryFailNameTooLong(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
config.maxNameBytes = 2
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NameTooLongError{name, config.maxNameBytes}
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, true)
}
func TestCreateLinkFailNameTooLong(t *testing.T) {
testCreateEntryFailNameTooLong(t, false)
}
func testCreateEntryFailDirTooBig(t *testing.T, isDir bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
rmd.data.Dir.Size = 10
config.maxDirBytes = 12
name := "aaa"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
var err error
// dir and link have different checks for dup name
if isDir {
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
} else {
_, err = config.KBFSOps().CreateLink(ctx, n, name, "b")
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if _, ok := err.(DirTooBigError); !ok {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, true)
}
func TestCreateLinkFailDirTooBig(t *testing.T) {
testCreateEntryFailDirTooBig(t, false)
}
func testCreateEntryFailKBFSPrefix(t *testing.T, et EntryType) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: Dir,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
name := ".kbfs_status"
expectedErr := DisallowedPrefixError{name, ".kbfs"}
var err error
// dir and link have different checks for dup name
switch et {
case Dir:
_, _, err = config.KBFSOps().CreateDir(ctx, n, name)
case Sym:
_, err = config.KBFSOps().CreateLink(ctx, n, name, "a")
case Exec:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, true, NoExcl)
case File:
_, _, err = config.KBFSOps().CreateFile(ctx, n, name, false, NoExcl)
}
if err == nil {
t.Errorf("Got no expected error on create")
} else if err != expectedErr {
t.Errorf("Got unexpected error on create: %+v", err)
}
}
func TestCreateDirFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Dir)
}
func TestCreateFileFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, File)
}
func TestCreateExecFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Exec)
}
func TestCreateLinkFailKBFSPrefix(t *testing.T) {
testCreateEntryFailKBFSPrefix(t, Sym)
}
// TODO: Currently only the remove tests use makeDirTree(),
// makeFile(), et al. Make the other tests use these functions, too.
// makeDirTree creates a block tree for the given path components and
// returns the DirEntry for the root block, a path, and the
// corresponding list of blocks. If n components are given, then the
// path will have n+1 nodes (one extra for the root node), and there
// will be n+1 corresponding blocks.
func makeDirTree(id tlf.ID, uid keybase1.UID, components ...string) (
DirEntry, path, []*DirBlock) {
var idCounter byte = 0x10
makeBlockID := func() kbfsblock.ID {
id := kbfsblock.FakeID(idCounter)
idCounter++
return id
}
// Handle the first (root) block.
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
rootEntry := DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes := []pathNode{{bi.BlockPointer, "{root}"}}
rootBlock := NewDirBlock().(*DirBlock)
blocks := []*DirBlock{rootBlock}
// Handle the rest.
parentDirBlock := rootBlock
for _, component := range components {
bid := makeBlockID()
bi := makeBIFromID(bid, uid)
parentDirBlock.Children[component] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
nodes = append(nodes, pathNode{bi.BlockPointer, component})
dirBlock := NewDirBlock().(*DirBlock)
blocks = append(blocks, dirBlock)
parentDirBlock = dirBlock
}
return rootEntry, path{FolderBranch{Tlf: id}, nodes}, blocks
}
func makeFile(dir path, parentDirBlock *DirBlock, name string, et EntryType,
directType BlockDirectType) (
path, *FileBlock) {
if et != File && et != Exec {
panic(fmt.Sprintf("Unexpected type %s", et))
}
bid := kbfsblock.FakeIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
bi.DirectType = directType
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: et,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewFileBlock().(*FileBlock)
}
func makeDir(dir path, parentDirBlock *DirBlock, name string) (
path, *DirBlock) {
bid := kbfsblock.FakeIDAdd(dir.tailPointer().ID, 1)
bi := makeBIFromID(bid, dir.tailPointer().Creator)
parentDirBlock.Children[name] = DirEntry{
BlockInfo: bi,
EntryInfo: EntryInfo{
Type: Dir,
},
}
p := dir.ChildPath(name, bi.BlockPointer)
return p, NewDirBlock().(*DirBlock)
}
func makeSym(dir path, parentDirBlock *DirBlock, name string) {
parentDirBlock.Children[name] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
},
}
}
func checkRmOp(t *testing.T, entryName string, newRmd ReadOnlyRootMetadata,
dirPath, newDirPath path, unrefBlocks []BlockPointer) {
// make sure the rmOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*rmOp)
require.True(t, ok)
var updates []blockUpdate
for i := 0; i < len(dirPath.path)-1; i++ {
updates = append(updates, blockUpdate{
dirPath.path[i].BlockPointer,
newDirPath.path[i].BlockPointer,
})
}
checkOp(t, ro.OpCommon, nil, unrefBlocks, updates)
dirUpdate := blockUpdate{
dirPath.tailPointer(), newDirPath.tailPointer(),
}
require.Equal(t, dirUpdate, ro.Dir)
require.Equal(t, entryName, ro.OldName)
}
func testKBFSOpsRemoveFileSuccess(t *testing.T, et EntryType) {
if et != File && et != Exec {
t.Fatalf("Unexpected type %s", et)
}
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
entryName := "file"
if et == Exec {
entryName += ".exe"
}
p, _ := makeFile(dirPath, parentDirBlock, entryName, et, DirectBlock)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, et, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for i := 0; i < len(p.path)-1; i++ {
n := p.path[i]
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestKBFSOpsRemoveFileSuccess(t *testing.T) {
testKBFSOpsRemoveFileSuccess(t, File)
}
func TestKBFSOpsRemoveExecSuccess(t *testing.T) {
testKBFSOpsRemoveFileSuccess(t, Exec)
}
func TestKBFSOpsRemoveDirSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
entryName := "dir"
rootEntry, p, blocks := makeDirTree(
id, uid, "a", "b", "c", "d", entryName)
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
dirPath := *p.parentPath()
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveDir(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, Dir, "", false)
newParentBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentBlock.Children[entryName]
require.False(t, ok)
for _, n := range p.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestKBFSOpsRemoveSymSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
entryName := "sym"
makeSym(dirPath, parentDirBlock, entryName)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// No block is being referenced.
var unrefBytes uint64
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, Sym, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range dirPath.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, nil)
}
func TestKBFSOpRemoveMultiBlockFileSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
entryName := "multiBlockFile"
lastBID := dirPath.tailPointer().ID
fileBID := kbfsblock.FakeIDAdd(lastBID, 1)
fileBI := makeBIFromID(fileBID, dirPath.tailPointer().Creator)
parentDirBlock.Children[entryName] = DirEntry{
BlockInfo: fileBI,
EntryInfo: EntryInfo{
Type: File,
},
}
// TODO: Write a helper function for making a file with
// indirect blocks and use it in other tests.
bid1 := kbfsblock.FakeIDAdd(lastBID, 2)
bid2 := kbfsblock.FakeIDAdd(lastBID, 3)
bid3 := kbfsblock.FakeIDAdd(lastBID, 4)
bid4 := kbfsblock.FakeIDAdd(lastBID, 5)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(bid1, rmd, config, uid, 5, 0),
makeIFP(bid2, rmd, config, uid, 5, 5),
makeIFP(bid3, rmd, config, uid, 5, 10),
makeIFP(bid4, rmd, config, uid, 5, 15),
}
fileBlock.IPtrs[0].DirectType = DirectBlock
fileBlock.IPtrs[1].DirectType = DirectBlock
fileBlock.IPtrs[2].DirectType = DirectBlock
fileBlock.IPtrs[3].DirectType = DirectBlock
fileBP := makeBP(fileBID, rmd, config, uid)
p := dirPath.ChildPath(entryName, fileBP)
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
// Let the top block be uncached, so we have to fetch it from
// BlockOps. Don't cache any of the file direct blocks, to make
// sure we don't try to fetch them.
expectBlock(config, rmd, fileBP, fileBlock, nil)
// sync block
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
unrefBytes := uint64(1 + 4*5) // fileBlock + 4 indirect blocks
var newRmd ImmutableRootMetadata
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(), blockIDs,
File, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range p.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{
fileBP,
fileBlock.IPtrs[0].BlockPointer,
fileBlock.IPtrs[1].BlockPointer,
fileBlock.IPtrs[2].BlockPointer,
fileBlock.IPtrs[3].BlockPointer,
}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestRemoveDirFailNonEmpty(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, *p.parentPath().parentPath())
expectedErr := DirNotEmptyError{p.parentPath().tailName()}
err := config.KBFSOps().RemoveDir(ctx, n, "d")
require.Equal(t, expectedErr, err)
}
func testKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T, et EntryType) {
if et != File && et != Exec {
t.Fatalf("Unexpected type %s", et)
}
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, dirPath, dirBlocks :=
makeDirTree(id, uid, "a", "b", "c", "d")
rmd.data.Dir = rootEntry
// Prime cache with all dir blocks.
for i, dirBlock := range dirBlocks {
testPutBlockInCache(
t, config, dirPath.path[i].BlockPointer, id, dirBlock)
}
parentDirBlock := dirBlocks[len(dirBlocks)-1]
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
entryName := "file"
if et == Exec {
entryName += ".exe"
}
p, _ := makeFile(dirPath, parentDirBlock, entryName, et, IndirectBlock)
// The operation might be retried several times.
config.mockBops.EXPECT().Get(
gomock.Any(), gomock.Any(), p.tailPointer(),
gomock.Any(), gomock.Any()).Return(kbfsblock.BServerErrorBlockNonExistent{}).MinTimes(1)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveEntry(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, File, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range dirPath.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, File)
}
func TestKBFSOpsRemoveExecMissingBlockSuccess(t *testing.T) {
testKBFSOpsRemoveFileMissingBlockSuccess(t, Exec)
}
func TestKBFSOpsRemoveDirMissingBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
entryName := "dir"
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", entryName)
rmd.data.Dir = rootEntry
// Prime cache with all directory blocks.
for i := 0; i < len(blocks)-1; i++ {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, blocks[i])
}
dirPath := *p.parentPath()
ops := getOps(config, id)
n := nodeFromPath(t, ops, dirPath)
// The operation might be retried several times.
config.mockBops.EXPECT().Get(
gomock.Any(), gomock.Any(), p.tailPointer(),
gomock.Any(), gomock.Any()).Return(kbfsblock.BServerErrorBlockNonExistent{}).MinTimes(1)
// sync block
var newRmd ImmutableRootMetadata
blockIDs := make([]kbfsblock.ID, len(dirPath.path))
// a block of size 1 is being unreferenced
var unrefBytes uint64 = 1
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
dirPath, rmd, false, 0, 0, unrefBytes, &newRmd, blockIDs)
err := config.KBFSOps().RemoveDir(ctx, n, entryName)
require.NoError(t, err)
newDirPath := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newDirPath, expectedPath, newRmd.ReadOnly(),
blockIDs, Dir, "", false)
newParentDirBlock := getDirBlockFromCache(
t, config, id, newDirPath.tailPointer(), newDirPath.Branch)
_, ok := newParentDirBlock.Children[entryName]
require.False(t, ok)
for _, n := range dirPath.path {
blockIDs = append(blockIDs, n.ID)
}
checkBlockCache(t, config, id, blockIDs, nil)
unrefBlocks := []BlockPointer{p.tailPointer()}
checkRmOp(t, entryName, newRmd.ReadOnly(), dirPath, newDirPath, unrefBlocks)
}
func TestRemoveDirFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootEntry, p, blocks := makeDirTree(id, uid, "a", "b", "c", "d", "e")
rmd.data.Dir = rootEntry
// Prime cache with all blocks.
for i, block := range blocks {
testPutBlockInCache(
t, config, p.path[i].BlockPointer, id, block)
}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
expectedErr := NoSuchNameError{"nonexistent"}
err := config.KBFSOps().RemoveDir(ctx, n, "nonexistent")
require.Equal(t, expectedErr, err)
}
func TestRenameInDirSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// renaming "a/b" to "a/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 3)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false,
0, 0, 0, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n, "b", n, "c")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
File, "c", true)
b1 := getDirBlockFromCache(
t, config, id, newP.path[1].BlockPointer, newP.Branch)
if _, ok := b1.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 1 {
t.Errorf("Expected 1 batch notification, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID, aID), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, ro.OpCommon, nil, nil, updates)
oldDirUpdate := blockUpdate{aNode.BlockPointer, newP.path[1].BlockPointer}
newDirUpdate := blockUpdate{}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "b" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v (expected empty)",
ro.NewDir)
} else if ro.NewName != "c" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameInDirOverEntrySuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
cID := kbfsblock.FakeID(44)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock.Children["c"] = DirEntry{
BlockInfo: makeBIFromID(cID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
cBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
cNode := pathNode{makeBP(cID, rmd, config, uid), "c"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// renaming "a/b" to "a/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, cNode.BlockPointer, id, cBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 3)
unrefBytes := uint64(1)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false,
0, 0, unrefBytes, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n, "b", n, "c")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
File, "c", true)
b1 := getDirBlockFromCache(
t, config, id, newP.path[1].BlockPointer, newP.Branch)
if _, ok := b1.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 1 {
t.Errorf("Expected 1 batch notification, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID, aID, cID), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP.path[0].BlockPointer},
}
checkOp(t, ro.OpCommon, nil, []BlockPointer{cNode.BlockPointer}, updates)
oldDirUpdate := blockUpdate{aNode.BlockPointer, newP.path[1].BlockPointer}
newDirUpdate := blockUpdate{}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "b" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v (expected empty)",
ro.NewDir)
} else if ro.NewName != "c" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameInRootSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// renaming "a" to "b"
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
expectedPath, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p, rmd, false,
0, 0, 0, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n, "a", n, "b")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
File, "b", true)
b0 := getDirBlockFromCache(
t, config, id, newP.path[0].BlockPointer, newP.Branch)
if _, ok := b0.Children["a"]; ok {
t.Errorf("entry for a is still around after rename")
} else if len(config.observer.batchChanges) != 1 {
t.Errorf("Expected 1 batch notification, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
checkOp(t, ro.OpCommon, nil, nil, nil)
oldDirUpdate := blockUpdate{rmd.data.Dir.BlockPointer,
newP.path[0].BlockPointer}
newDirUpdate := blockUpdate{}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "a" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v (expected empty)",
ro.NewDir)
} else if ro.NewName != "b" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameAcrossDirsSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p1 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n1 := nodeFromPath(t, ops, p1)
dID := kbfsblock.FakeID(40)
rootBlock.Children["d"] = DirEntry{
BlockInfo: makeBIFromID(dID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
dBlock := NewDirBlock().(*DirBlock)
dNode := pathNode{makeBP(dID, rmd, config, uid), "d"}
p2 := path{FolderBranch{Tlf: id}, []pathNode{node, dNode}}
n2 := nodeFromPath(t, ops, p2)
// renaming "a/b" to "d/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, dNode.BlockPointer, id, dBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks1 := make([]kbfsblock.ID, 2)
expectedPath1, lastCall :=
expectSyncBlock(t, config, nil, uid, id, "", p1, rmd, false,
1, 0, 0, nil, blocks1)
blocks2 := make([]kbfsblock.ID, 3)
refBytes := uint64(1) // need to include directory "a"
unrefBytes := uint64(1) // need to include directory "a"
expectedPath2, _ :=
expectSyncBlock(t, config, lastCall, uid, id, "", p2, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks2)
// fix up old expected path's common ancestor
expectedPath1.path[0].ID = expectedPath2.path[0].ID
err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP1 := ops.nodeCache.PathFromNode(n1)
newP2 := ops.nodeCache.PathFromNode(n2)
// fix up blocks1 -- the first partial sync stops at aBlock, and
// checkNewPath expects {rootBlock, aBlock}
blocks1 = []kbfsblock.ID{blocks2[0], blocks1[0]}
checkNewPath(t, ctx, config, newP1, expectedPath1, newRmd.ReadOnly(), blocks1,
File, "", true)
checkNewPath(t, ctx, config, newP2, expectedPath2, newRmd.ReadOnly(), blocks2,
File, "c", true)
b0 := getDirBlockFromCache(
t, config, id, newP1.path[0].BlockPointer, newP1.Branch)
if _, ok := b0.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 2 {
t.Errorf("Expected 2 batch notifications, got %d",
len(config.observer.batchChanges))
}
blocks2 = blocks2[:len(blocks2)-1] // the last block is never in the cache
checkBlockCache(t, config, id,
append(blocks2, rootID, aID, dID, blocks1[0]), nil)
// make sure the renameOp is correct
ro, ok := newRmd.data.Changes.Ops[0].(*renameOp)
if !ok {
t.Errorf("Couldn't find the renameOp")
}
updates := []blockUpdate{
{rmd.data.Dir.BlockPointer, newP1.path[0].BlockPointer},
}
checkOp(t, ro.OpCommon, nil, nil, updates)
oldDirUpdate := blockUpdate{aNode.BlockPointer, newP1.path[1].BlockPointer}
newDirUpdate := blockUpdate{dNode.BlockPointer, newP2.path[1].BlockPointer}
if ro.OldDir != oldDirUpdate {
t.Errorf("Incorrect old dir update in op: %v vs. %v", ro.OldDir,
oldDirUpdate)
} else if ro.OldName != "b" {
t.Errorf("Incorrect old name in op: %v", ro.OldName)
} else if ro.NewDir != newDirUpdate {
t.Errorf("Incorrect new dir update in op: %v vs. %v",
ro.NewDir, newDirUpdate)
} else if ro.NewName != "c" {
t.Errorf("Incorrect name in op: %v", ro.NewName)
}
}
func TestRenameAcrossPrefixSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
dID := kbfsblock.FakeID(40)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
aBlock.Children["d"] = DirEntry{
BlockInfo: makeBIFromID(dID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
dBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
dNode := pathNode{makeBP(dID, rmd, config, uid), "d"}
p1 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
p2 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, dNode}}
ops := getOps(config, id)
n1 := nodeFromPath(t, ops, p1)
n2 := nodeFromPath(t, ops, p2)
// renaming "a/b" to "a/d/c"
// the common ancestor and its parent will be changed once and then re-read
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, dNode.BlockPointer, id, dBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 4)
expectedPath2, _ :=
expectSyncBlock(t, config, nil, uid, id, "", p2, rmd, false,
0, 0, 0, &newRmd, blocks)
err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c")
if err != nil {
t.Errorf("Got error on rename: %+v", err)
}
newP1 := ops.nodeCache.PathFromNode(n1)
newP2 := ops.nodeCache.PathFromNode(n2)
if newP1.path[0].ID != newP2.path[0].ID {
t.Errorf("New old path not a prefix of new new path")
}
if newP1.path[1].ID != newP2.path[1].ID {
t.Errorf("New old path not a prefix of new new path")
}
b0 := getDirBlockFromCache(
t, config, id, newP1.path[0].BlockPointer, newP1.Branch)
if b0.Children["a"].Mtime == 0 {
t.Errorf("a's mtime didn't change")
}
if b0.Children["a"].Ctime == 0 {
t.Errorf("a's ctime didn't change")
}
// now change the times back so checkNewPath below works without hacking
aDe := b0.Children["a"]
aDe.Mtime = 0
aDe.Ctime = 0
b0.Children["a"] = aDe
checkNewPath(t, ctx, config, newP2, expectedPath2, newRmd.ReadOnly(), blocks,
File, "c", true)
b1 := getDirBlockFromCache(
t, config, id, newP1.path[1].BlockPointer, newP1.Branch)
if _, ok := b1.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 2 {
t.Errorf("Expected 2 batch notifications, got %d",
len(config.observer.batchChanges))
}
blocks = blocks[:len(blocks)-1] // the last block is never in the cache
checkBlockCache(t, config, id,
append(blocks, rootID, aID, dID), nil)
}
func TestRenameAcrossOtherPrefixSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(41)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(42)
bID := kbfsblock.FakeID(43)
dID := kbfsblock.FakeID(40)
rmd.data.Dir.ID = rootID
rmd.data.Dir.Type = Dir
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: Dir,
},
}
aBlock := NewDirBlock().(*DirBlock)
aBlock.Children["d"] = DirEntry{
BlockInfo: makeBIFromID(dID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
dBlock := NewDirBlock().(*DirBlock)
dBlock.Children["b"] = DirEntry{
BlockInfo: makeBIFromID(bID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
dNode := pathNode{makeBP(dID, rmd, config, uid), "d"}
p1 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode, dNode}}
p2 := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n1 := nodeFromPath(t, ops, p1)
n2 := nodeFromPath(t, ops, p2)
// renaming "a/d/b" to "a/c"
testPutBlockInCache(t, config, aNode.BlockPointer, id, aBlock)
testPutBlockInCache(t, config, dNode.BlockPointer, id, dBlock)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks1 := make([]kbfsblock.ID, 3)
expectedPath1, lastCall :=
expectSyncBlock(t, config, nil, uid, id, "", p1, rmd, false,
2, 0, 0, &newRmd, blocks1)
blocks2 := make([]kbfsblock.ID, 3)
refBytes := uint64(1) // need to include directory "d"
unrefBytes := uint64(1) // need to include directory "d"
expectedPath2, _ :=
expectSyncBlock(t, config, lastCall, uid, id, "", p2, rmd, false, 0,
refBytes, unrefBytes, &newRmd, blocks2)
// the new path is a prefix of the old path
expectedPath1.path[0].ID = expectedPath2.path[0].ID
expectedPath1.path[1].ID = expectedPath2.path[1].ID
err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c")
if err != nil {
t.Errorf("Got error on removal: %+v", err)
}
newP1 := ops.nodeCache.PathFromNode(n1)
newP2 := ops.nodeCache.PathFromNode(n2)
if newP2.path[0].ID != newP1.path[0].ID {
t.Errorf("New old path not a prefix of new new path")
}
if newP2.path[1].ID != newP1.path[1].ID {
t.Errorf("New old path not a prefix of new new path")
}
b1 := getDirBlockFromCache(
t, config, id, newP1.path[1].BlockPointer, newP1.Branch)
if b1.Children["d"].Mtime == 0 {
t.Errorf("d's mtime didn't change")
}
if b1.Children["d"].Ctime == 0 {
t.Errorf("d's ctime didn't change")
}
b0 := getDirBlockFromCache(
t, config, id, newP1.path[0].BlockPointer, newP1.Branch)
if b0.Children["a"].Mtime == 0 {
t.Errorf("d's mtime didn't change")
}
if b0.Children["a"].Ctime == 0 {
t.Errorf("d's ctime didn't change")
}
checkNewPath(t, ctx, config, newP1, expectedPath1, newRmd.ReadOnly(), blocks2,
File, "c", true)
b2 := getDirBlockFromCache(
t, config, id, newP1.path[2].BlockPointer, newP1.Branch)
if _, ok := b2.Children["b"]; ok {
t.Errorf("entry for b is still around after rename")
} else if len(config.observer.batchChanges) != 2 {
t.Errorf("Expected 2 batch notifications, got %d",
len(config.observer.batchChanges))
}
blocks2 = blocks2[:len(blocks2)-1] // the last block is never in the cache
checkBlockCache(t, config, id,
append(blocks2, rootID, aID, dID, blocks1[2]), nil)
}
func TestRenameFailAcrossTopLevelFolders(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := tlf.FakeID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1, err := makeInitialRootMetadata(config.MetadataVersion(), id1, h1)
require.NoError(t, err)
id2 := tlf.FakeID(2, false)
h2 := parseTlfHandleOrBust(t, config, "alice,bob,charlie", false)
rmd2, err := makeInitialRootMetadata(config.MetadataVersion(), id2, h2)
require.NoError(t, err)
uid1 := h2.ResolvedWriters()[0]
uid2 := h2.ResolvedWriters()[2]
rootID1 := kbfsblock.FakeID(41)
aID1 := kbfsblock.FakeID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
rootID2 := kbfsblock.FakeID(38)
aID2 := kbfsblock.FakeID(39)
node2 := pathNode{makeBP(rootID2, rmd2, config, uid2), "p"}
aNode2 := pathNode{makeBP(aID2, rmd2, config, uid2), "a"}
p2 := path{FolderBranch{Tlf: id2}, []pathNode{node2, aNode2}}
ops2 := getOps(config, id2)
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %+v", err)
}
}
func TestRenameFailAcrossBranches(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id1 := tlf.FakeID(1, false)
h1 := parseTlfHandleOrBust(t, config, "alice,bob", false)
rmd1, err := makeInitialRootMetadata(config.MetadataVersion(), id1, h1)
require.NoError(t, err)
uid1 := h1.FirstResolvedWriter()
rootID1 := kbfsblock.FakeID(41)
aID1 := kbfsblock.FakeID(42)
node1 := pathNode{makeBP(rootID1, rmd1, config, uid1), "p"}
aNode1 := pathNode{makeBP(aID1, rmd1, config, uid1), "a"}
p1 := path{FolderBranch{Tlf: id1}, []pathNode{node1, aNode1}}
p2 := path{FolderBranch{id1, "test"}, []pathNode{node1, aNode1}}
ops1 := getOps(config, id1)
n1 := nodeFromPath(t, ops1, p1)
ops2 := config.KBFSOps().(*KBFSOpsStandard).getOpsNoAdd(
FolderBranch{id1, "test"})
n2 := nodeFromPath(t, ops2, p2)
expectedErr := RenameAcrossDirsError{}
if err := config.KBFSOps().Rename(ctx, n1, "b", n2, "c"); err == nil {
t.Errorf("Got no expected error on rename")
} else if err.Error() != expectedErr.Error() {
t.Errorf("Got unexpected error on rename: %+v", err)
}
}
func TestKBFSOpsCacheReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 2); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n != 4 {
t.Errorf("Read the wrong number of bytes: %d", n)
} else if !bytes.Equal(dest, fileBlock.Contents[2:6]) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFullMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
testPutBlockInCache(t, config, fileBlock.IPtrs[3].BlockPointer, id, block4)
n := 20
dest := make([]byte, n, n)
fullContents := append(block1.Contents, block2.Contents...)
fullContents = append(fullContents, block3.Contents...)
fullContents = append(fullContents, block4.Contents...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fullContents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadPartialMultiBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
id3 := kbfsblock.FakeID(46)
id4 := kbfsblock.FakeID(47)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, u, 0, 0),
makeIFP(id2, rmd, config, u, 6, 5),
makeIFP(id3, rmd, config, u, 7, 10),
makeIFP(id4, rmd, config, u, 8, 15),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
block3 := NewFileBlock().(*FileBlock)
block3.Contents = []byte{15, 14, 13, 12, 11}
block4 := NewFileBlock().(*FileBlock)
block4.Contents = []byte{20, 19, 18, 17, 16}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
n := 10
dest := make([]byte, n, n)
contents := append(block1.Contents[3:], block2.Contents...)
contents = append(contents, block3.Contents[:3]...)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 3); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsCacheReadFailPastEnd(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
dest := make([]byte, 4, 4)
if n, err := config.KBFSOps().Read(ctx, pNode, dest, 10); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n != 0 {
t.Errorf("Read the wrong number of bytes: %d", n)
}
}
func TestKBFSOpsServerReadFullSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
expectBlock(config, rmd, fileBlockPtr, fileBlock, nil)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil {
t.Errorf("Got error on read: %+v", err)
} else if n2 != int64(n) {
t.Errorf("Read the wrong number of bytes: %d", n2)
} else if !bytes.Equal(dest, fileBlock.Contents) {
t.Errorf("Read bad contents: %v", dest)
}
}
func TestKBFSOpsServerReadFailNoSuchBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileBlockPtr := makeBP(fileID, rmd, config, u)
fileNode := pathNode{fileBlockPtr, "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
pNode := nodeFromPath(t, ops, p)
// cache miss means fetching metadata and getting read key
err := NoSuchBlockError{rootID}
expectBlock(config, rmd, fileBlockPtr, fileBlock, err)
n := len(fileBlock.Contents)
dest := make([]byte, n, n)
if _, err2 := config.KBFSOps().Read(ctx, pNode, dest, 0); err2 == nil {
t.Errorf("Got no expected error")
} else if err2 != err {
t.Errorf("Got unexpected error: %+v", err2)
}
}
func checkSyncOp(t *testing.T, codec kbfscodec.Codec,
so *syncOp, filePtr BlockPointer, writes []WriteRange) {
if so == nil {
t.Error("No sync info for written file!")
}
if so.File.Unref != filePtr {
t.Errorf("Unexpected unref file in sync op: %v vs %v",
so.File.Unref, filePtr)
}
if len(so.Writes) != len(writes) {
t.Errorf("Unexpected number of writes: %v (expected %v)",
len(so.Writes), len(writes))
}
for i, w := range writes {
writeEqual, err := kbfscodec.Equal(codec, so.Writes[i], w)
if err != nil {
t.Fatal(err)
}
if !writeEqual {
t.Errorf("Unexpected write: %v vs %v", so.Writes[i], w)
}
}
}
func checkSyncOpInCache(t *testing.T, codec kbfscodec.Codec,
ops *folderBranchOps, filePtr BlockPointer, writes []WriteRange) {
// check the in-progress syncOp
si, ok := ops.blocks.unrefCache[filePtr.Ref()]
if !ok {
t.Error("No sync info for written file!")
}
checkSyncOp(t, codec, si.op, filePtr, writes)
}
func updateWithDirtyEntries(ctx context.Context, ops *folderBranchOps,
lState *lockState, dir path, block *DirBlock) (*DirBlock, error) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
return ops.blocks.updateWithDirtyEntriesLocked(ctx, lState, dir, block)
}
func TestKBFSOpsWriteNewBlockSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 0); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(
ctx, ops, lState, *p.parentPath(), newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
} else if newRootBlock.Children["f"].GetWriter() != uid {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != uint64(len(data)) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteExtendSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 5); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: uint64(len(data))}})
}
func TestKBFSOpsWritePastEndSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{6, 7, 8, 9, 10}
expectedFullData := []byte{1, 2, 3, 4, 5, 0, 0, 6, 7, 8, 9, 10}
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data, int64(7)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = expectedFullData
}).Return(int64(len(data)))
if err := config.KBFSOps().Write(ctx, n, data, 7); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: uint64(len(data))}})
}
func TestKBFSOpsWriteCauseSplit(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
newData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
expectedFullData := append([]byte{0}, newData...)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData, int64(1)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append([]byte{0}, data[0:5]...)
}).Return(int64(5))
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
// new left block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id1, nil)
// new right block
config.mockCrypto.EXPECT().MakeTemporaryBlockID().Return(id2, nil)
// next we'll get the right block again
// then the second half
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), newData[5:10], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = data
}).Return(int64(5))
if err := config.KBFSOps().Write(ctx, n, newData, 1); err != nil {
t.Errorf("Got error on write: %+v", err)
}
b, _ := config.BlockCache().Get(node.BlockPointer)
newRootBlock := b.(*DirBlock)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(
ctx, ops, lState, *p.parentPath(), newRootBlock)
require.NoError(t, err)
b, _ = config.DirtyBlockCache().Get(id, fileNode.BlockPointer, p.Branch)
pblock := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id1, rmd, config, uid),
p.Branch)
block1 := b.(*FileBlock)
b, _ = config.DirtyBlockCache().Get(id, makeBP(id2, rmd, config, uid),
p.Branch)
block2 := b.(*FileBlock)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:6], block1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[6:11], block2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
} else if !pblock.IsInd {
t.Errorf("Parent block is not indirect!")
} else if len(pblock.IPtrs) != 2 {
t.Errorf("Wrong number of pointers in pblock: %v", pblock.IPtrs)
} else if pblock.IPtrs[0].ID != id1 {
t.Errorf("Parent block has wrong id for block 1: %v (vs. %v)",
pblock.IPtrs[0].ID, id1)
} else if pblock.IPtrs[1].ID != id2 {
t.Errorf("Parent block has wrong id for block 2: %v",
pblock.IPtrs[1].ID)
} else if pblock.IPtrs[0].Off != 0 {
t.Errorf("Parent block has wrong offset for block 1: %d",
pblock.IPtrs[0].Off)
} else if pblock.IPtrs[1].Off != 6 {
t.Errorf("Parent block has wrong offset for block 5: %d",
pblock.IPtrs[1].Off)
} else if newRootBlock.Children["f"].Size != uint64(11) {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
pblock.IPtrs[0].BlockPointer: p.Branch,
pblock.IPtrs[1].BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 1, Len: uint64(len(newData))}})
}
func mergeUnrefCache(
ops *folderBranchOps, lState *lockState, file path, md *RootMetadata) {
ops.blocks.blockLock.RLock(lState)
defer ops.blocks.blockLock.RUnlock(lState)
ops.blocks.unrefCache[file.tailPointer().Ref()].mergeUnrefCache(md)
}
func TestKBFSOpsWriteOverMultipleBlocks(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
filePtr := BlockPointer{
ID: fileID, KeyGen: 1, DataVer: 1,
Context: kbfsblock.Context{
Creator: uid,
},
}
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: filePtr,
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
data := []byte{1, 2, 3, 4, 5}
expectedFullData := []byte{5, 4, 1, 2, 3, 4, 5, 8, 7, 6}
so, err := newSyncOp(filePtr)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
// only copy the first half first
config.mockBsplit.EXPECT().CopyUntilSplit(
// gomock.Any(), gomock.Any(), data, int64(2)).
gomock.Any(), gomock.Any(), []byte{1, 2, 3}, int64(2)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block1.Contents[0:2], data[0:3]...)
}).Return(int64(3))
// update block 2
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), data[3:], int64(0)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(data, block2.Contents[2:]...)
}).Return(int64(2))
if err := config.KBFSOps().Write(ctx, n, data, 2); err != nil {
t.Errorf("Got error on write: %+v", err)
}
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during write: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(expectedFullData[0:5], newBlock1.Contents) {
t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
} else if !bytes.Equal(expectedFullData[5:10], newBlock2.Contents) {
t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
}
lState := makeFBOLockState()
// merge the unref cache to make it easy to check for changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 2, Len: uint64(len(data))}})
mergeUnrefCache(ops, lState, p, rmd)
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
// Read tests check the same error cases, so no need for similar write
// error tests
func TestKBFSOpsTruncateToZeroSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{}
if err := config.KBFSOps().Truncate(ctx, n, 0); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newRootBlock := getDirBlockFromCache(
t, config, id, node.BlockPointer, p.Branch)
lState := makeFBOLockState()
newRootBlock, err := updateWithDirtyEntries(
ctx, ops, lState, *p.parentPath(), newRootBlock)
require.NoError(t, err)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", newFileBlock.Contents)
} else if newRootBlock.Children["f"].GetWriter() != uid {
t.Errorf("Wrong last writer: %v",
newRootBlock.Children["f"].GetWriter())
} else if newRootBlock.Children["f"].Size != 0 {
t.Errorf("Wrong size for written file: %d",
newRootBlock.Children["f"].Size)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 0, Len: 0}})
}
func TestKBFSOpsTruncateSameSize(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: makeBIFromID(fileID, u),
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, u), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := fileBlock.Contents
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %+v", err)
} else if config.observer.localChange != nil {
t.Errorf("Unexpected local update during truncate: %v",
config.observer.localChange)
} else if !bytes.Equal(data, fileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID}, nil)
}
func TestKBFSOpsTruncateSmallerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
data := []byte{1, 2, 3, 4, 5}
if err := config.KBFSOps().Truncate(ctx, n, 5); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 0}})
}
func TestKBFSOpsTruncateShortensLastBlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid)
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
data2 := []byte{10, 9}
if err := config.KBFSOps().Truncate(ctx, n, 7); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
newBlock2 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[1].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 7, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(block1.Contents, newBlock1.Contents) {
t.Errorf("Wrote bad contents for block 1: %v", newBlock1.Contents)
} else if !bytes.Equal(data2, newBlock2.Contents) {
t.Errorf("Wrote bad contents for block 2: %v", newBlock2.Contents)
} else if len(newPBlock.IPtrs) != 2 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+6 {
// The fileid and the last block was all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[1].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateRemovesABlock(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
id1 := kbfsblock.FakeID(44)
id2 := kbfsblock.FakeID(45)
rootBlock := NewDirBlock().(*DirBlock)
fileInfo := makeBIFromID(fileID, uid)
rootBlock.Children["f"] = DirEntry{
BlockInfo: fileInfo,
EntryInfo: EntryInfo{
Size: 10,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.IsInd = true
fileBlock.IPtrs = []IndirectFilePtr{
makeIFP(id1, rmd, config, uid, 5, 0),
makeIFP(id2, rmd, config, uid, 6, 5),
}
block1 := NewFileBlock().(*FileBlock)
block1.Contents = []byte{5, 4, 3, 2, 1}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{10, 9, 8, 7, 6}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
so, err := newSyncOp(fileInfo.BlockPointer)
require.NoError(t, err)
rmd.AddOp(so)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
data := []byte{5, 4, 3, 2}
if err := config.KBFSOps().Truncate(ctx, n, 4); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newPBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
newBlock1 := getFileBlockFromCache(t, config, id,
fileBlock.IPtrs[0].BlockPointer, p.Branch)
lState := makeFBOLockState()
// merge unref changes so we can easily check the block changes
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 4, Len: 0}})
mergeUnrefCache(ops, lState, p, rmd)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newBlock1.Contents) {
t.Errorf("Wrote bad contents: %v", newBlock1.Contents)
} else if len(newPBlock.IPtrs) != 1 {
t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
} else if rmd.UnrefBytes() != 0+5+6 {
// The fileid and both blocks were all modified and marked dirty
t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
rmd.UnrefBytes())
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
fileBlock.IPtrs[0].BlockPointer: p.Branch,
})
}
func TestKBFSOpsTruncateBiggerSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
fileID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["f"] = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: makeBP(fileID, rmd, config, uid),
EncodedSize: 1,
},
EntryInfo: EntryInfo{
Type: File,
},
}
fileBlock := NewFileBlock().(*FileBlock)
fileBlock.Contents = []byte{1, 2, 3, 4, 5}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
fileNode := pathNode{makeBP(fileID, rmd, config, uid), "f"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, fileNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
config.mockBsplit.EXPECT().CopyUntilSplit(
gomock.Any(), gomock.Any(), []byte{0, 0, 0, 0, 0}, int64(5)).
Do(func(block *FileBlock, lb bool, data []byte, off int64) {
block.Contents = append(block.Contents, data...)
}).Return(int64(5))
data := []byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}
if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
t.Errorf("Got error on truncate: %+v", err)
}
newFileBlock := getFileBlockFromCache(t, config, id, fileNode.BlockPointer,
p.Branch)
if len(ops.nodeCache.PathFromNode(config.observer.localChange).path) !=
len(p.path) {
t.Errorf("Missing or incorrect local update during truncate: %v",
config.observer.localChange)
} else if ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID) {
t.Errorf("Wrong context value passed in local notify: %v",
config.observer.ctx.Value(tCtxID))
} else if !bytes.Equal(data, newFileBlock.Contents) {
t.Errorf("Wrote bad contents: %v", data)
}
checkBlockCache(t, config, id, []kbfsblock.ID{rootID, fileID},
map[BlockPointer]BranchName{
fileNode.BlockPointer: p.Branch,
})
// A truncate past the end of the file actually translates into a
// write for the difference
checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
[]WriteRange{{Off: 5, Len: 5}})
}
func testSetExSuccess(t *testing.T, entryType EntryType, ex bool) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, entryType != Sym)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Size: 1,
Type: entryType,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedChanges := 1
// SetEx() should do nothing when the exec status doesn't change.
if entryType == Sym || entryType == Dir || (entryType == File && !ex) ||
(entryType == Exec && ex) {
expectedChanges = 0
}
var expectedPath path
var newRmd ImmutableRootMetadata
var blocks []kbfsblock.ID
if expectedChanges > 0 {
// sync block
blocks = make([]kbfsblock.ID, 2)
expectedPath, _ = expectSyncBlock(t, config, nil, uid, id, "",
*p.parentPath(), rmd, false, 0, 0, 0, &newRmd, blocks)
expectedPath.path = append(expectedPath.path, aNode)
}
// SetEx() should only change the type of File and Exec.
var expectedType EntryType
if entryType == File && ex {
expectedType = Exec
} else if entryType == Exec && !ex {
expectedType = File
} else {
expectedType = entryType
}
// chmod a+x a
err := config.KBFSOps().SetEx(ctx, n, ex)
if err != nil {
t.Errorf("Got unexpected error on setex: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if expectedChanges != len(config.observer.batchChanges) {
t.Errorf("got changed=%d, expected %d",
len(config.observer.batchChanges), expectedChanges)
} else {
if blocks != nil {
rootBlock = getDirBlockFromCache(
t, config, id, newP.path[0].BlockPointer, newP.Branch)
}
if rootBlock.Children["a"].Type != expectedType {
t.Errorf("a has type %s, expected %s",
rootBlock.Children["a"].Type, expectedType)
} else if expectedChanges > 0 {
// SetEx() should always change the ctime of
// non-symlinks.
// pretend it's a rename so only ctime gets checked
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
expectedType, "", true)
}
}
if expectedChanges > 0 {
blocks = blocks[:len(blocks)-1] // last block is never in the cache
}
checkBlockCache(t, config, id, append(blocks, rootID), nil)
if expectedChanges > 0 {
// make sure the setAttrOp is correct
sao, ok := newRmd.data.Changes.Ops[0].(*setAttrOp)
if !ok {
t.Errorf("Couldn't find the setAttrOp")
}
checkOp(t, sao.OpCommon, nil, nil, nil)
dirUpdate := blockUpdate{rmd.data.Dir.BlockPointer,
newP.path[0].BlockPointer}
if sao.Dir != dirUpdate {
t.Errorf("Incorrect dir update in op: %v vs. %v", sao.Dir,
dirUpdate)
} else if sao.Name != "a" {
t.Errorf("Incorrect name in op: %v", sao.Name)
} else if sao.Attr != exAttr {
t.Errorf("Incorrect attr in op: %v", sao.Attr)
}
}
}
func TestSetExFileSuccess(t *testing.T) {
testSetExSuccess(t, File, true)
}
func TestSetNoExFileSuccess(t *testing.T) {
testSetExSuccess(t, File, false)
}
func TestSetExExecSuccess(t *testing.T) {
testSetExSuccess(t, Exec, true)
}
func TestSetNoExExecSuccess(t *testing.T) {
testSetExSuccess(t, Exec, false)
}
func TestSetExDirSuccess(t *testing.T) {
testSetExSuccess(t, Dir, true)
}
func TestSetNoExDirSuccess(t *testing.T) {
testSetExSuccess(t, Dir, false)
}
func TestSetExSymSuccess(t *testing.T) {
testSetExSuccess(t, Sym, true)
}
func TestSetNoExSymSuccess(t *testing.T) {
testSetExSuccess(t, Sym, false)
}
func TestSetExFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
// chmod a+x a
if err := config.KBFSOps().SetEx(ctx, n, true); err == nil {
t.Errorf("Got no expected error on setex")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setex: %+v", err)
}
}
// Other SetEx failure cases are all the same as any other block sync
func TestSetMtimeSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, true)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
uid, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, uid),
EntryInfo: EntryInfo{
Type: File,
},
}
node := pathNode{makeBP(rootID, rmd, config, uid), "p"}
aNode := pathNode{makeBP(aID, rmd, config, uid), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
// sync block
var newRmd ImmutableRootMetadata
blocks := make([]kbfsblock.ID, 2)
expectedPath, _ := expectSyncBlock(t, config, nil, uid, id, "",
*p.parentPath(), rmd, false, 0, 0, 0, &newRmd, blocks)
expectedPath.path = append(expectedPath.path, aNode)
newMtime := time.Now()
err := config.KBFSOps().SetMtime(ctx, n, &newMtime)
if err != nil {
t.Errorf("Got unexpected error on setmtime: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
b0 := getDirBlockFromCache(
t, config, id, newP.path[0].BlockPointer, newP.Branch)
if b0.Children["a"].Mtime != newMtime.UnixNano() {
t.Errorf("a has wrong mtime: %v", b0.Children["a"].Mtime)
} else {
checkNewPath(t, ctx, config, newP, expectedPath, newRmd.ReadOnly(), blocks,
Exec, "", false)
}
blocks = blocks[:len(blocks)-1] // last block is never in the cache
checkBlockCache(t, config, id, append(blocks, rootID), nil)
// make sure the setAttrOp is correct
sao, ok := newRmd.data.Changes.Ops[0].(*setAttrOp)
if !ok {
t.Errorf("Couldn't find the setAttrOp")
}
checkOp(t, sao.OpCommon, nil, nil, nil)
dirUpdate := blockUpdate{rmd.data.Dir.BlockPointer,
newP.path[0].BlockPointer}
if sao.Dir != dirUpdate {
t.Errorf("Incorrect dir update in op: %v vs. %v", sao.Dir,
dirUpdate)
} else if sao.Name != "a" {
t.Errorf("Incorrect name in op: %v", sao.Name)
} else if sao.Attr != mtimeAttr {
t.Errorf("Incorrect attr in op: %v", sao.Attr)
}
}
func TestSetMtimeNull(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
oldMtime := time.Now().UnixNano()
rootBlock.Children["a"] = DirEntry{
BlockInfo: makeBIFromID(aID, u),
EntryInfo: EntryInfo{
Type: File,
Mtime: oldMtime,
},
}
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
if err := config.KBFSOps().SetMtime(ctx, n, nil); err != nil {
t.Errorf("Got unexpected error on null setmtime: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if rootBlock.Children["a"].Mtime != oldMtime {
t.Errorf("a has wrong mtime: %v", rootBlock.Children["a"].Mtime)
} else if newP.path[0].ID != p.path[0].ID {
t.Errorf("Got back a changed path for null setmtime test: %v", newP)
}
checkBlockCache(t, config, id, nil, nil)
}
func TestMtimeFailNoSuchName(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
rootBlock := NewDirBlock().(*DirBlock)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
expectedErr := NoSuchNameError{p.tailName()}
newMtime := time.Now()
if err := config.KBFSOps().SetMtime(ctx, n, &newMtime); err == nil {
t.Errorf("Got no expected error on setmtime")
} else if err != expectedErr {
t.Errorf("Got unexpected error on setmtime: %+v", err)
}
}
func getOrCreateSyncInfo(
ops *folderBranchOps, lState *lockState, de DirEntry) (*syncInfo, error) {
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
return ops.blocks.getOrCreateSyncInfoLocked(lState, de)
}
func makeBlockStateDirty(config Config, kmd KeyMetadata, p path,
ptr BlockPointer) {
ops := getOps(config, kmd.TlfID())
lState := makeFBOLockState()
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
df := ops.blocks.getOrCreateDirtyFileLocked(lState, p)
df.setBlockDirty(ptr)
}
// SetMtime failure cases are all the same as any other block sync
func TestSyncCleanSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
u, id, rmd := injectNewRMD(t, config)
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.ID = rootID
aID := kbfsblock.FakeID(43)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
aNode := pathNode{makeBP(aID, rmd, config, u), "a"}
p := path{FolderBranch{Tlf: id}, []pathNode{node, aNode}}
ops := getOps(config, id)
n := nodeFromPath(t, ops, p)
// fsync a
if err := config.KBFSOps().SyncAll(ctx, n.GetFolderBranch()); err != nil {
t.Errorf("Got unexpected error on sync: %+v", err)
}
newP := ops.nodeCache.PathFromNode(n)
if len(newP.path) != len(p.path) {
// should be the exact same path back
t.Errorf("Got a different length path back: %v", newP)
} else {
for i, n := range newP.path {
if n != p.path[i] {
t.Errorf("Node %d differed: %v", i, n)
}
}
}
checkBlockCache(t, config, id, nil, nil)
}
func expectSyncDirtyBlock(config *ConfigMock, kmd KeyMetadata,
p path, ptr BlockPointer, block *FileBlock, splitAt int64,
padSize int, opsLockHeld bool) *gomock.Call {
branch := MasterBranch
if config.mockDirtyBcache != nil {
config.mockDirtyBcache.EXPECT().IsDirty(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(true)
config.mockDirtyBcache.EXPECT().Get(gomock.Any(), ptrMatcher{ptr},
branch).AnyTimes().Return(block, nil)
} else {
config.DirtyBlockCache().Put(p.Tlf, ptr, branch, block)
}
if !opsLockHeld {
makeBlockStateDirty(config, kmd, p, ptr)
}
c1 := config.mockBsplit.EXPECT().CheckSplit(block).Return(splitAt)
newID := kbfsblock.FakeIDAdd(ptr.ID, 100)
// Ideally, we'd use the size of block.Contents at the time
// that Ready() is called, but GoMock isn't expressive enough
// for that.
newEncBuf := make([]byte, len(block.Contents)+padSize)
readyBlockData := ReadyBlockData{
buf: newEncBuf,
}
c2 := config.mockBops.EXPECT().Ready(gomock.Any(), kmdMatcher{kmd}, block).
After(c1).Return(newID, len(block.Contents), readyBlockData, nil)
newPtr := BlockPointer{ID: newID}
if config.mockBcache != nil {
config.mockBcache.EXPECT().Put(ptrMatcher{newPtr}, kmd.TlfID(), block, PermanentEntry).Return(nil)
config.mockBcache.EXPECT().DeletePermanent(newID).Return(nil)
} else {
// Nothing to do, since the cache entry is added and
// removed.
}
config.mockBserv.EXPECT().Put(gomock.Any(), kmd.TlfID(), newID,
gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
return c2
}
func putAndCleanAnyBlock(config *ConfigMock, p path) {
config.mockBcache.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), TransientEntry).
Do(func(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime) {
config.mockDirtyBcache.EXPECT().
Get(gomock.Any(), ptrMatcher{BlockPointer{ID: ptr.ID}},
p.Branch).AnyTimes().Return(nil, NoSuchBlockError{ptr.ID})
config.mockBcache.EXPECT().
Get(ptrMatcher{BlockPointer{ID: ptr.ID}}).
AnyTimes().Return(block, nil)
}).AnyTimes().Return(nil)
config.mockDirtyBcache.EXPECT().Delete(gomock.Any(), gomock.Any(),
p.Branch).AnyTimes().Return(nil)
}
func TestKBFSOpsStatRootSuccess(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
node := pathNode{makeBP(rootID, rmd, config, u), "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
_, err := config.KBFSOps().Stat(ctx, n)
if err != nil {
t.Errorf("Error on Stat: %+v", err)
}
}
func TestKBFSOpsFailingRootOps(t *testing.T) {
mockCtrl, config, ctx, cancel := kbfsOpsInit(t, false)
defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
id, h, rmd := createNewRMD(t, config, "alice", false)
ops := getOps(config, id)
ops.head = makeImmutableRMDForTest(t, config, rmd, fakeMdID(1))
ops.headStatus = headTrusted
u := h.FirstResolvedWriter()
rootID := kbfsblock.FakeID(42)
rmd.data.Dir.BlockPointer = makeBP(rootID, rmd, config, u)
node := pathNode{rmd.data.Dir.BlockPointer, "p"}
p := path{FolderBranch{Tlf: id}, []pathNode{node}}
n := nodeFromPath(t, ops, p)
// TODO: Make sure Read, Write, and Truncate fail also with
// InvalidPathError{}.
err := config.KBFSOps().SetEx(ctx, n, true)
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetEx: %+v", err)
}
err = config.KBFSOps().SetMtime(ctx, n, &time.Time{})
if _, ok := err.(InvalidParentPathError); !ok {
t.Errorf("Unexpected error on SetMtime: %+v", err)
}
// TODO: Sync succeeds, but it should fail. Fix this!
}
type testBGObserver struct {
c chan<- struct{}
}
func (t *testBGObserver) LocalChange(ctx context.Context, node Node,
write WriteRange) {
// ignore
}
func (t *testBGObserver) BatchChanges(ctx context.Context,
changes []NodeChange) {
t.c <- struct{}{}
}
func (t *testBGObserver) TlfHandleChange(ctx context.Context,
newHandle *TlfHandle) {
return
}
// Tests that the background flusher will sync a dirty file if the
// application does not.
func TestKBFSOpsBackgroundFlush(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.noBGFlush = true
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "alice,bob", false)
kbfsOps := config.KBFSOps()
nodeA, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
if err := kbfsOps.Write(ctx, nodeA, data, 0); err != nil {
t.Errorf("Got error on write: %+v", err)
}
c := make(chan struct{})
observer := &testBGObserver{c}
config.Notifier().RegisterForChanges(
[]FolderBranch{rootNode.GetFolderBranch()}, observer)
// start the background flusher
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
go ops.backgroundFlusher(1 * time.Millisecond)
// Make sure we get the notification
select {
case <-c:
case <-ctx.Done():
t.Fatalf("Timeout waiting for signal")
}
if err := kbfsOps.Write(ctx, nodeA, data, 0); err != nil {
t.Errorf("Got error on write: %+v", err)
}
select {
case <-c:
case <-ctx.Done():
t.Fatalf("Timeout waiting for signal")
}
}
func TestKBFSOpsWriteRenameStat(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %+v", err)
}
// Stat it again.
newEi, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
// CTime is allowed to change after a rename, but nothing else.
if ei.Type != newEi.Type || ei.Size != newEi.Size ||
ei.Mtime != newEi.Mtime {
t.Errorf("Entry info unexpectedly changed from %+v to %+v", ei, newEi)
}
}
func TestKBFSOpsWriteRenameGetDirChildren(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write to it.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Stat it.
ei, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
if ei.Size != 1 {
t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
}
// Rename it.
err = kbfsOps.Rename(ctx, rootNode, "a", rootNode, "b")
if err != nil {
t.Fatalf("Couldn't rename; %+v", err)
}
// Get the stats via GetDirChildren.
eis, err := kbfsOps.GetDirChildren(ctx, rootNode)
if err != nil {
t.Fatalf("Couldn't stat file: %+v", err)
}
// CTime is allowed to change after a rename, but nothing else.
if newEi := eis["b"]; ei.Type != newEi.Type || ei.Size != newEi.Size ||
ei.Mtime != newEi.Mtime {
t.Errorf("Entry info unexpectedly changed from %+v to %+v",
ei, eis["b"])
}
}
func TestKBFSOpsCreateFileWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Remove the file, which will archive the block
err = kbfsOps.RemoveEntry(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove file: %+v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Create a second file, which will use the same initial block ID
// from the cache, even though it's been archived, and will be
// forced to try again.
_, _, err = kbfsOps.CreateFile(ctx, rootNode, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create second file: %+v", err)
}
}
func TestKBFSOpsMultiBlockSyncWithArchivedBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// Make the blocks small, with multiple levels of indirection, but
// make the unembedded size large, so we don't create thousands of
// unembedded block change blocks.
blockSize := int64(5)
bsplit := &BlockSplitterSimple{blockSize, 2, 100 * 1024}
config.SetBlockSplitter(bsplit)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write a few blocks
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
// Now overwrite those blocks to archive them
newData := []byte{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
err = kbfsOps.Write(ctx, fileNode, newData, 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
// Wait for the archiving to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server")
}
// Now write the original first block, which has been archived,
// and make sure it works.
err = kbfsOps.Write(ctx, fileNode, data[0:blockSize], 0)
if err != nil {
t.Fatalf("Couldn't write file: %+v", err)
}
err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync file: %+v", err)
}
}
type corruptBlockServer struct {
BlockServer
}
func (cbs corruptBlockServer) Get(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
data, keyServerHalf, err := cbs.BlockServer.Get(ctx, tlfID, id, context)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return append(data, 0), keyServerHalf, nil
}
func TestKBFSOpsFailToReadUnverifiableBlock(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
config.SetBlockServer(&corruptBlockServer{
BlockServer: config.BlockServer(),
})
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Read using a different "device"
config2 := ConfigAsUser(config, "test_user")
defer CheckConfigAndShutdown(ctx, t, config2)
// Shutdown the mdserver explicitly before the state checker tries to run
defer config2.MDServer().Shutdown()
rootNode2 := GetRootNodeOrBust(ctx, t, config2, "test_user", false)
// Lookup the file, which should fail on block ID verification
kbfsOps2 := config2.KBFSOps()
_, _, err = kbfsOps2.Lookup(ctx, rootNode2, "a")
if _, ok := errors.Cause(err).(kbfshash.HashMismatchError); !ok {
t.Fatalf("Could unexpectedly lookup the file: %+v", err)
}
}
// Test that the size of a single empty block doesn't change. If this
// test ever fails, consult max or strib before merging.
func TestKBFSOpsEmptyTlfSize(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// Create a TLF.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
status, _, err := config.KBFSOps().FolderStatus(ctx,
rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't get folder status: %+v", err)
}
if status.DiskUsage != 313 {
t.Fatalf("Disk usage of an empty TLF is no longer 313. " +
"Talk to max or strib about why this matters.")
}
}
type cryptoFixedTlf struct {
Crypto
tlf tlf.ID
}
func (c cryptoFixedTlf) MakeRandomTlfID(isPublic bool) (tlf.ID, error) {
return c.tlf, nil
}
// TestKBFSOpsMaliciousMDServerRange tries to trick KBFSOps into
// accepting bad MDs.
func TestKBFSOpsMaliciousMDServerRange(t *testing.T) {
config1, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "mallory")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config1, ctx, cancel)
// Create alice's TLF.
rootNode1 := GetRootNodeOrBust(ctx, t, config1, "alice", false)
fb1 := rootNode1.GetFolderBranch()
kbfsOps1 := config1.KBFSOps()
_, _, err := kbfsOps1.CreateFile(ctx, rootNode1, "dummy.txt", false, NoExcl)
require.NoError(t, err)
// Create mallory's fake TLF using the same TLF ID as alice's.
config2 := ConfigAsUser(config1, "mallory")
crypto2 := cryptoFixedTlf{config2.Crypto(), fb1.Tlf}
config2.SetCrypto(crypto2)
mdserver2, err := NewMDServerMemory(mdServerLocalConfigAdapter{config2})
require.NoError(t, err)
config2.MDServer().Shutdown()
config2.SetMDServer(mdserver2)
config2.SetMDCache(NewMDCacheStandard(1))
rootNode2 := GetRootNodeOrBust(ctx, t, config2, "alice,mallory", false)
require.Equal(t, fb1.Tlf, rootNode2.GetFolderBranch().Tlf)
kbfsOps2 := config2.KBFSOps()
// Add some operations to get mallory's TLF to have a higher
// MetadataVersion.
_, _, err = kbfsOps2.CreateFile(
ctx, rootNode2, "dummy.txt", false, NoExcl)
require.NoError(t, err)
err = kbfsOps2.RemoveEntry(ctx, rootNode2, "dummy.txt")
require.NoError(t, err)
// Now route alice's TLF to mallory's MD server.
config1.SetMDServer(mdserver2.copy(mdServerLocalConfigAdapter{config1}))
// Simulate the server triggering alice to update.
config1.SetKeyCache(NewKeyCacheStandard(1))
err = kbfsOps1.SyncFromServerForTesting(ctx, fb1)
// TODO: We can actually fake out the PrevRoot pointer, too
// and then we'll be caught by the handle check. But when we
// have MDOps do the handle check, that'll trigger first.
require.IsType(t, MDPrevRootMismatch{}, err)
}
// TODO: Test malicious mdserver and rekey flow against wrong
// TLFs being introduced upon rekey.
// Test that if GetTLFCryptKeys fails to create a TLF, the second
// attempt will also fail with the same error. Regression test for
// KBFS-1929.
func TestGetTLFCryptKeysAfterFirstError(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
createErr := errors.New("Cannot create this TLF")
mdserver := &shimMDServer{
MDServer: config.MDServer(),
nextErr: createErr,
}
config.SetMDServer(mdserver)
h := parseTlfHandleOrBust(t, config, "alice", false)
_, _, err := config.KBFSOps().GetTLFCryptKeys(ctx, h)
if err != createErr {
t.Fatalf("Got unexpected error when creating TLF: %+v", err)
}
// Reset the error.
mdserver.nextErr = createErr
// Should get the same error, otherwise something's wrong.
_, _, err = config.KBFSOps().GetTLFCryptKeys(ctx, h)
if err != createErr {
t.Fatalf("Got unexpected error when creating TLF: %+v", err)
}
}
func TestForceFastForwardOnEmptyTLF(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
// Look up bob's public folder.
h := parseTlfHandleOrBust(t, config, "bob", true)
_, _, err := config.KBFSOps().GetOrCreateRootNode(ctx, h, MasterBranch)
if _, ok := err.(WriteAccessError); !ok {
t.Fatalf("Unexpected err reading a public TLF: %+v", err)
}
// There's only one folder at this point.
kbfsOps := config.KBFSOps().(*KBFSOpsStandard)
kbfsOps.opsLock.RLock()
var ops *folderBranchOps
for _, fbo := range kbfsOps.ops {
ops = fbo
break
}
kbfsOps.opsLock.RUnlock()
// FastForward shouldn't do anything, since the TLF hasn't been
// cleared yet.
config.KBFSOps().ForceFastForward(ctx)
err = ops.forcedFastForwards.Wait(ctx)
if err != nil {
t.Fatalf("Couldn't wait for fast forward: %+v", err)
}
}
func TestKBFSOpsSyncAllTwoFiles(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
nodeA, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
nodeB, _, err := kbfsOps.CreateFile(ctx, rootNode, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Write to A.
data := []byte{1}
err = kbfsOps.Write(ctx, nodeA, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Write to B.
data = []byte{2}
err = kbfsOps.Write(ctx, nodeB, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Sync both.
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync; %+v", err)
}
}
func TestKBFSOpsCreateSyncAll(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
// Manually create a file without actually syncing it.
// TODO(KBFS-2076) remove all this duplicated code.
rootNode := GetRootNodeOrBust(ctx, t, config, "alice,bob", false)
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
name := "myfile"
var fileNode Node
{
rootDir := ops.nodeCache.PathFromNode(rootNode)
co, err := newCreateOp(name, rootDir.tailPointer(), File)
if err != nil {
t.Fatal(err)
}
newBlock := &FileBlock{}
newID, err := config.cryptoPure().MakeTemporaryBlockID()
if err != nil {
t.Fatal(err)
}
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
t.Fatal(err)
}
newPtr := BlockPointer{
ID: newID,
KeyGen: 1,
DataVer: 1,
DirectType: DirectBlock,
Context: kbfsblock.MakeFirstContext(
session.UID, keybase1.BlockType_DATA),
}
config.DirtyBlockCache().Put(ops.id(), newPtr, ops.branch(), newBlock)
co.AddRefBlock(newPtr)
fileNode, err = ops.nodeCache.GetOrCreate(newPtr, name, rootNode)
if err != nil {
t.Fatal(err)
}
de := DirEntry{
BlockInfo: BlockInfo{
BlockPointer: newPtr,
EncodedSize: 0,
},
EntryInfo: EntryInfo{
Type: File,
Size: 0,
},
}
lState := makeFBOLockState()
ops.blocks.AddDirEntryInCache(lState, rootDir, name, de)
ops.dirOps = append(
ops.dirOps, cachedDirOp{co, []Node{rootNode, fileNode}})
}
// Write to A.
data := []byte{1}
kbfsOps := config.KBFSOps()
err := kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %+v", err)
}
// Make sure we can see it before the sync happens.
_, ei, err := kbfsOps.Lookup(ctx, rootNode, name)
if err != nil {
t.Fatal(err)
}
if ei.Size != uint64(len(data)) {
t.Fatalf("Unexpected size: %d vs %d", ei.Size, len(data))
}
// Sync everything.
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync; %+v", err)
}
// Check that bob can see it.
config2 := ConfigAsUser(config, "bob")
defer CheckConfigAndShutdown(ctx, t, config2)
rootNodeBob := GetRootNodeOrBust(ctx, t, config2, "alice,bob", false)
fileNodeBob, _, err := config2.KBFSOps().Lookup(ctx, rootNodeBob, name)
if err != nil {
t.Fatal(err)
}
gotData := make([]byte, len(data))
_, err = config2.KBFSOps().Read(ctx, fileNodeBob, gotData, 0)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, gotData) {
t.Fatalf("Data didn't match: %v vs %v", data, gotData)
}
if len(ops.blocks.deCache) > 0 {
t.Fatalf("%d unexpected deCache entries leftover",
len(ops.blocks.deCache))
}
}
func TestKBFSOpsRemoveSyncAll(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
rootNode := GetRootNodeOrBust(ctx, t, config, "alice,bob", false)
kbfsOps := config.KBFSOps()
name := "myfile"
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, name, false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Manually remove a file without actually syncing it.
// TODO(KBFS-2076) remove all this duplicated code.
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
{
rootDir := ops.nodeCache.PathFromNode(rootNode)
ro, err := newRmOp(name, rootDir.tailPointer())
if err != nil {
t.Fatal(err)
}
ro.setFinalPath(rootDir)
filePath := ops.nodeCache.PathFromNode(fileNode)
ro.AddUnrefBlock(filePath.tailPointer())
lState := makeFBOLockState()
ops.blocks.RemoveDirEntryInCache(lState, rootDir, name)
ops.dirOps = append(ops.dirOps, cachedDirOp{ro, []Node{rootNode}})
}
// Make sure we can't see it before the sync happens.
children, err := kbfsOps.GetDirChildren(ctx, rootNode)
if err != nil {
t.Fatal(err)
}
if len(children) != 0 {
t.Fatalf("Unexpected children: %v", children)
}
// Sync everything.
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync; %+v", err)
}
if len(ops.blocks.deCache) > 0 {
t.Fatalf("%d unexpected deCache entries leftover",
len(ops.blocks.deCache))
}
}
func TestKBFSOpsRenameSameDirSyncAll(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
rootNode := GetRootNodeOrBust(ctx, t, config, "alice,bob", false)
kbfsOps := config.KBFSOps()
name := "myfile"
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, name, false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Manually rename a file without actually syncing it.
// TODO(KBFS-2076) remove all this duplicated code.
newName := "myfile"
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
{
rootDir := ops.nodeCache.PathFromNode(rootNode)
filePath := ops.nodeCache.PathFromNode(fileNode)
ro, err := newRenameOp(name, rootDir.tailPointer(), newName,
rootDir.tailPointer(), filePath.tailPointer(), File)
if err != nil {
t.Fatal(err)
}
lState := makeFBOLockState()
dblock, err := ops.blocks.GetDirBlockForReading(
ctx, lState, ops.head, rootDir.tailPointer(), ops.branch(), rootDir)
if err != nil {
t.Fatal(err)
}
_ = ops.blocks.RenameDirEntryInCache(
lState, rootDir, name, rootDir, newName, dblock.Children[name])
ops.dirOps = append(ops.dirOps, cachedDirOp{ro, []Node{rootNode}})
}
// Make sure we see the new name before the sync happens.
checkChild := func(config Config, rootNode Node) {
children, err := config.KBFSOps().GetDirChildren(ctx, rootNode)
if err != nil {
t.Fatal(err)
}
if len(children) != 1 {
t.Fatalf("Unexpected children: %v", children)
}
if _, ok := children[newName]; !ok {
t.Fatalf("Child %s missing: %v", newName, children)
}
}
checkChild(config, rootNode)
// Sync everything.
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync; %+v", err)
}
// Check that bob can see it.
config2 := ConfigAsUser(config, "bob")
defer CheckConfigAndShutdown(ctx, t, config2)
rootNodeBob := GetRootNodeOrBust(ctx, t, config2, "alice,bob", false)
checkChild(config2, rootNodeBob)
if len(ops.blocks.deCache) > 0 {
t.Fatalf("%d unexpected deCache entries leftover",
len(ops.blocks.deCache))
}
}
func TestKBFSOpsSetExSyncAll(t *testing.T) {
config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
// TODO: Use kbfsTestShutdownNoMocks.
defer kbfsTestShutdownNoMocks(t, config, ctx, cancel)
rootNode := GetRootNodeOrBust(ctx, t, config, "alice,bob", false)
kbfsOps := config.KBFSOps()
name := "myfile"
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, name, false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %+v", err)
}
// Manually set the executability of a file without actually
// syncing it. TODO(KBFS-2076) remove all this duplicated code.
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
{
rootDir := ops.nodeCache.PathFromNode(rootNode)
filePath := ops.nodeCache.PathFromNode(fileNode)
sao, err := newSetAttrOp(name, rootDir.tailPointer(),
exAttr, filePath.tailPointer())
if err != nil {
t.Fatal(err)
}
lState := makeFBOLockState()
dblock, err := ops.blocks.GetDirBlockForReading(
ctx, lState, ops.head, rootDir.tailPointer(), ops.branch(), rootDir)
if err != nil {
t.Fatal(err)
}
de := dblock.Children[name]
de.Type = Exec
_ = ops.blocks.SetAttrInDirEntryInCache(lState, filePath, de, sao.Attr)
ops.dirOps = append(ops.dirOps, cachedDirOp{sao, []Node{fileNode}})
}
// Make sure we can't see it before the sync happens.
checkChild := func(config Config, rootNode Node) {
children, err := config.KBFSOps().GetDirChildren(ctx, rootNode)
if err != nil {
t.Fatal(err)
}
if len(children) != 1 {
t.Fatalf("Unexpected children: %v", children)
}
if ei, ok := children[name]; !ok {
t.Fatalf("Child %s missing: %v", name, children)
} else if ei.Type != Exec {
t.Fatalf("Unexpected type: %s", ei.Type)
}
}
checkChild(config, rootNode)
// Sync everything.
err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync; %+v", err)
}
// Check that bob can see it.
config2 := ConfigAsUser(config, "bob")
defer CheckConfigAndShutdown(ctx, t, config2)
//rootNodeBob := GetRootNodeOrBust(ctx, t, config2, "alice,bob", false)
//checkChild(config2, rootNodeBob)
if len(ops.blocks.deCache) > 0 {
t.Fatalf("%d unexpected deCache entries leftover",
len(ops.blocks.deCache))
}
}
| 1 | 16,556 | Probably overkill, but maybe we should be using a fake clock here. I won't push on whatever you decide though. | keybase-kbfs | go |
@@ -690,7 +690,7 @@ func TestKBFSOpsConcurBlockSyncTruncate(t *testing.T) {
wg.Wait()
- // Do this in the main goroutine since t isn't goroutine safe,
+ // Do this in the main goroutine since it isn't goroutine safe,
// and do this after wg.Wait() since we only know it's set
// after the goroutine exits.
if syncErr != nil { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"bytes"
"errors"
"runtime"
"sync"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
)
// CounterLock keeps track of the number of lock attempts
type CounterLock struct {
countLock sync.Mutex
realLock sync.Mutex
count int
}
func (cl *CounterLock) Lock() {
cl.countLock.Lock()
cl.count++
cl.countLock.Unlock()
cl.realLock.Lock()
}
func (cl *CounterLock) Unlock() {
cl.realLock.Unlock()
}
func (cl *CounterLock) GetCount() int {
cl.countLock.Lock()
defer cl.countLock.Unlock()
return cl.count
}
func kbfsOpsConcurInit(t *testing.T, users ...libkb.NormalizedUsername) (
*ConfigLocal, keybase1.UID, context.Context, context.CancelFunc) {
return kbfsOpsInitNoMocks(t, users...)
}
func kbfsConcurTestShutdown(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
kbfsTestShutdownNoMocks(t, config, ctx, cancel)
}
// TODO: Get rid of all users of this.
func kbfsConcurTestShutdownNoCheck(t *testing.T, config *ConfigLocal,
ctx context.Context, cancel context.CancelFunc) {
kbfsTestShutdownNoMocksNoCheck(t, config, ctx, cancel)
}
// Test that only one of two concurrent GetRootMD requests can end up
// fetching the MD from the server. The second one should wait, and
// then get it from the MD cache.
func TestKBFSOpsConcurDoubleMDGet(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
onGetStalledCh, getUnstallCh, ctxStallGetForTLF :=
StallMDOp(ctx, config, StallableMDGetForTLF, 1)
// Initialize the MD using a different config
c2 := ConfigAsUser(config, "test_user")
defer CheckConfigAndShutdown(ctx, t, c2)
rootNode := GetRootNodeOrBust(ctx, t, c2, "test_user", false)
n := 10
c := make(chan error, n)
cl := &CounterLock{}
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
ops.mdWriterLock.locker = cl
for i := 0; i < n; i++ {
go func() {
_, _, _, err := ops.getRootNode(ctxStallGetForTLF)
c <- err
}()
}
// wait until the first one starts the get
<-onGetStalledCh
// make sure that the second goroutine has also started its write
// call, and thus must be queued behind the first one (since we
// are guaranteed the first one is currently running, and they
// both need the same lock).
for cl.GetCount() < 2 {
runtime.Gosched()
}
// Now let the first one complete. The second one should find the
// MD in the cache, and thus never call MDOps.Get().
close(getUnstallCh)
for i := 0; i < n; i++ {
err := <-c
if err != nil {
t.Errorf("Got an error doing concurrent MD gets: err=(%s)", err)
}
}
}
// Test that a read can happen concurrently with a sync
func TestKBFSOpsConcurReadDuringSync(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
onPutStalledCh, putUnstallCh, putCtx :=
StallMDOp(ctx, config, StallableMDAfterPut, 1)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write file: %v", err)
}
// start the sync
errChan := make(chan error)
go func() {
errChan <- kbfsOps.Sync(putCtx, fileNode)
}()
// wait until Sync gets stuck at MDOps.Put()
<-onPutStalledCh
// now make sure we can read the file and see the byte we wrote
buf := make([]byte, 1)
nr, err := kbfsOps.Read(ctx, fileNode, buf, 0)
if err != nil {
t.Errorf("Couldn't read data: %v\n", err)
}
if nr != 1 || !bytes.Equal(data, buf) {
t.Errorf("Got wrong data %v; expected %v", buf, data)
}
// now unblock Sync and make sure there was no error
close(putUnstallCh)
err = <-errChan
if err != nil {
t.Errorf("Sync got an error: %v", err)
}
}
// Test that writes can happen concurrently with a sync
func testKBFSOpsConcurWritesDuringSync(t *testing.T,
initialWriteBytes int, nOneByteWrites int) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
onPutStalledCh, putUnstallCh, putCtx :=
StallMDOp(ctx, config, StallableMDAfterPut, 1)
// Use the smallest possible block size.
bsplitter, err := NewBlockSplitterSimple(20, 8*1024, config.Codec())
if err != nil {
t.Fatalf("Couldn't create block splitter: %v", err)
}
config.SetBlockSplitter(bsplitter)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
data := make([]byte, initialWriteBytes)
for i := 0; i < initialWriteBytes; i++ {
data[i] = 1
}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// start the sync
errChan := make(chan error)
go func() {
errChan <- kbfsOps.Sync(putCtx, fileNode)
}()
// wait until Sync gets stuck at MDOps.Put()
<-onPutStalledCh
expectedData := make([]byte, len(data))
copy(expectedData, data)
for i := 0; i < nOneByteWrites; i++ {
// now make sure we can write the file and see the new byte we wrote
newData := []byte{byte(i + 2)}
err = kbfsOps.Write(ctx, fileNode, newData, int64(i+initialWriteBytes))
if err != nil {
t.Errorf("Couldn't write data: %v\n", err)
}
// read the data back
buf := make([]byte, i+1+initialWriteBytes)
nr, err := kbfsOps.Read(ctx, fileNode, buf, 0)
if err != nil {
t.Errorf("Couldn't read data: %v\n", err)
}
expectedData = append(expectedData, newData...)
if nr != int64(i+1+initialWriteBytes) ||
!bytes.Equal(expectedData, buf) {
t.Errorf("Got wrong data %v; expected %v", buf, expectedData)
}
}
// now unblock Sync and make sure there was no error
close(putUnstallCh)
err = <-errChan
if err != nil {
t.Errorf("Sync got an error: %v", err)
}
// finally, make sure we can still read it after the sync too
// (even though the second write hasn't been sync'd yet)
totalSize := nOneByteWrites + initialWriteBytes
buf2 := make([]byte, totalSize)
nr, err := kbfsOps.Read(ctx, fileNode, buf2, 0)
if err != nil {
t.Errorf("Couldn't read data: %v\n", err)
}
if nr != int64(totalSize) ||
!bytes.Equal(expectedData, buf2) {
t.Errorf("2nd read: Got wrong data %v; expected %v", buf2, expectedData)
}
// there should be 4+n clean blocks at this point: the original
// root block + 2 modifications (create + write), the empty file
// block, the n initial modification blocks plus top block (if
// applicable).
bcs := config.BlockCache().(*BlockCacheStandard)
numCleanBlocks := bcs.cleanTransient.Len()
nFileBlocks := 1 + len(data)/int(bsplitter.maxSize)
if nFileBlocks > 1 {
nFileBlocks++ // top indirect block
}
if g, e := numCleanBlocks, 4+nFileBlocks; g != e {
t.Errorf("Unexpected number of cached clean blocks: %d vs %d (%d vs %d)\n", g, e, totalSize, bsplitter.maxSize)
}
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Final sync failed: %v", err)
}
if ei, err := kbfsOps.Stat(ctx, fileNode); err != nil {
t.Fatalf("Couldn't stat: %v", err)
} else if g, e := ei.Size, uint64(totalSize); g != e {
t.Fatalf("Unexpected size: %d vs %d", g, e)
}
// Make sure there are no dirty blocks left at the end of the test.
dbcs := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
numDirtyBlocks := len(dbcs.cache)
if numDirtyBlocks != 0 {
t.Errorf("%d dirty blocks left after final sync", numDirtyBlocks)
}
}
// Test that a write can happen concurrently with a sync
func TestKBFSOpsConcurWriteDuringSync(t *testing.T) {
testKBFSOpsConcurWritesDuringSync(t, 1, 1)
}
// Test that multiple writes can happen concurrently with a sync
// (regression for KBFS-616)
func TestKBFSOpsConcurMultipleWritesDuringSync(t *testing.T) {
testKBFSOpsConcurWritesDuringSync(t, 1, 10)
}
// Test that multiple indirect writes can happen concurrently with a
// sync (regression for KBFS-661)
func TestKBFSOpsConcurMultipleIndirectWritesDuringSync(t *testing.T) {
testKBFSOpsConcurWritesDuringSync(t, 25, 50)
}
// Test that writes that happen concurrently with a sync, which write
// to the same block, work correctly.
func TestKBFSOpsConcurDeferredDoubleWritesDuringSync(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
onPutStalledCh, putUnstallCh, putCtx :=
StallMDOp(ctx, config, StallableMDAfterPut, 1)
// Use the smallest possible block size.
bsplitter, err := NewBlockSplitterSimple(20, 8*1024, config.Codec())
if err != nil {
t.Fatalf("Couldn't create block splitter: %v", err)
}
config.SetBlockSplitter(bsplitter)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
var data []byte
// Write 2 blocks worth of data
for i := 0; i < 30; i++ {
data = append(data, byte(i))
}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// Sync the initial two data blocks
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Initial sync failed: %v", err)
}
// Now dirty the first block.
newData1 := make([]byte, 10)
copy(newData1, data[20:])
err = kbfsOps.Write(ctx, fileNode, newData1, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// start the sync
errChan := make(chan error)
go func() {
errChan <- kbfsOps.Sync(putCtx, fileNode)
}()
// wait until Sync gets stuck at MDOps.Put()
<-onPutStalledCh
// Now dirty the second block, twice.
newData2 := make([]byte, 10)
copy(newData2, data[:10])
err = kbfsOps.Write(ctx, fileNode, newData2, 20)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
err = kbfsOps.Write(ctx, fileNode, newData2, 30)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// now unblock Sync and make sure there was no error
close(putUnstallCh)
err = <-errChan
if err != nil {
t.Errorf("Sync got an error: %v", err)
}
expectedData := make([]byte, 40)
copy(expectedData[:10], newData1)
copy(expectedData[10:20], data[10:20])
copy(expectedData[20:30], newData2)
copy(expectedData[30:40], newData2)
gotData := make([]byte, 40)
nr, err := kbfsOps.Read(ctx, fileNode, gotData, 0)
if err != nil {
t.Errorf("Couldn't read data: %v", err)
}
if nr != int64(len(gotData)) {
t.Errorf("Only read %d bytes", nr)
}
if !bytes.Equal(expectedData, gotData) {
t.Errorf("Read wrong data. Expected %v, got %v", expectedData, gotData)
}
// Final sync
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Final sync failed: %v", err)
}
gotData = make([]byte, 40)
nr, err = kbfsOps.Read(ctx, fileNode, gotData, 0)
if err != nil {
t.Errorf("Couldn't read data: %v", err)
}
if nr != int64(len(gotData)) {
t.Errorf("Only read %d bytes", nr)
}
if !bytes.Equal(expectedData, gotData) {
t.Errorf("Read wrong data. Expected %v, got %v", expectedData, gotData)
}
// Make sure there are no dirty blocks left at the end of the test.
dbcs := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
numDirtyBlocks := len(dbcs.cache)
if numDirtyBlocks != 0 {
t.Errorf("%d dirty blocks left after final sync", numDirtyBlocks)
}
}
// Test that a block write can happen concurrently with a block
// read. This is a regression test for KBFS-536.
func TestKBFSOpsConcurBlockReadWrite(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
// TODO: Use kbfsConcurTestShutdown.
defer kbfsConcurTestShutdownNoCheck(t, config, ctx, cancel)
// Turn off transient block caching.
config.SetBlockCache(NewBlockCacheStandard(0, 1<<30))
// Create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
onReadStalledCh, readUnstallCh, ctxStallRead :=
StallBlockOp(ctx, config, StallableBlockGet, 1)
onWriteStalledCh, writeUnstallCh, ctxStallWrite :=
StallBlockOp(ctx, config, StallableBlockGet, 1)
var wg sync.WaitGroup
// Start the read and wait for it to stall.
wg.Add(1)
var readErr error
go func() {
defer wg.Done()
_, readErr = kbfsOps.GetDirChildren(ctxStallRead, rootNode)
}()
<-onReadStalledCh
// Start the write and wait for it to stall.
wg.Add(1)
var writeErr error
go func() {
defer wg.Done()
data := []byte{1}
writeErr = kbfsOps.Write(ctxStallWrite, fileNode, data, 0)
}()
<-onWriteStalledCh
// Unstall the read, which shouldn't blow up.
close(readUnstallCh)
// Finally, unstall the write.
close(writeUnstallCh)
wg.Wait()
// Do these in the main goroutine since t isn't goroutine
// safe, and do these after wg.Wait() since we only know
// they're set after the goroutines exit.
if readErr != nil {
t.Errorf("Couldn't get children: %v", readErr)
}
if writeErr != nil {
t.Errorf("Couldn't write file: %v", writeErr)
}
}
// mdRecordingKeyManager records the last KeyMetadata argument seen
// in its KeyManager methods.
type mdRecordingKeyManager struct {
lastKMDMu sync.RWMutex
lastKMD KeyMetadata
delegate KeyManager
}
func (km *mdRecordingKeyManager) getLastKMD() KeyMetadata {
km.lastKMDMu.RLock()
defer km.lastKMDMu.RUnlock()
return km.lastKMD
}
func (km *mdRecordingKeyManager) setLastKMD(kmd KeyMetadata) {
km.lastKMDMu.Lock()
defer km.lastKMDMu.Unlock()
km.lastKMD = kmd
}
func (km *mdRecordingKeyManager) GetTLFCryptKeyForEncryption(
ctx context.Context, kmd KeyMetadata) (kbfscrypto.TLFCryptKey, error) {
km.setLastKMD(kmd)
return km.delegate.GetTLFCryptKeyForEncryption(ctx, kmd)
}
func (km *mdRecordingKeyManager) GetTLFCryptKeyForMDDecryption(
ctx context.Context, kmdToDecrypt, kmdWithKeys KeyMetadata) (
kbfscrypto.TLFCryptKey, error) {
km.setLastKMD(kmdToDecrypt)
return km.delegate.GetTLFCryptKeyForMDDecryption(ctx,
kmdToDecrypt, kmdWithKeys)
}
func (km *mdRecordingKeyManager) GetTLFCryptKeyForBlockDecryption(
ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer) (
kbfscrypto.TLFCryptKey, error) {
km.setLastKMD(kmd)
return km.delegate.GetTLFCryptKeyForBlockDecryption(ctx, kmd, blockPtr)
}
func (km *mdRecordingKeyManager) GetTLFCryptKeyOfAllGenerations(
ctx context.Context, kmd KeyMetadata) (
keys []kbfscrypto.TLFCryptKey, err error) {
km.setLastKMD(kmd)
return km.delegate.GetTLFCryptKeyOfAllGenerations(ctx, kmd)
}
func (km *mdRecordingKeyManager) Rekey(
ctx context.Context, md *RootMetadata, promptPaper bool) (
bool, *kbfscrypto.TLFCryptKey, error) {
km.setLastKMD(md)
return km.delegate.Rekey(ctx, md, promptPaper)
}
// Test that a sync can happen concurrently with a write. This is a
// regression test for KBFS-558.
func TestKBFSOpsConcurBlockSyncWrite(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
// TODO: Use kbfsConcurTestShutdown.
defer kbfsConcurTestShutdownNoCheck(t, config, ctx, cancel)
km := &mdRecordingKeyManager{delegate: config.KeyManager()}
config.SetKeyManager(km)
// Turn off block caching.
config.SetBlockCache(NewBlockCacheStandard(0, 1<<30))
// Create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Write to file to mark it dirty.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %v", err)
}
lState := makeFBOLockState()
fbo := kbfsOps.(*KBFSOpsStandard).getOpsNoAdd(rootNode.GetFolderBranch())
if fbo.blocks.GetState(lState) != dirtyState {
t.Fatal("Unexpectedly not in dirty state")
}
onSyncStalledCh, syncUnstallCh, ctxStallSync :=
StallBlockOp(ctx, config, StallableBlockPut, 1)
var wg sync.WaitGroup
// Start the sync and wait for it to stall (on getting the dir
// block).
wg.Add(1)
var syncErr error
go func() {
defer wg.Done()
syncErr = kbfsOps.Sync(ctxStallSync, fileNode)
}()
<-onSyncStalledCh
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
deferredWriteCount := fbo.blocks.getDeferredWriteCountForTest(lState)
if deferredWriteCount != 1 {
t.Errorf("Unexpected deferred write count %d",
deferredWriteCount)
}
// Unstall the sync.
close(syncUnstallCh)
wg.Wait()
// Do this in the main goroutine since t isn't goroutine safe,
// and do this after wg.Wait() since we only know it's set
// after the goroutine exits.
if syncErr != nil {
t.Errorf("Couldn't sync: %v", syncErr)
}
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
t.Errorf("Couldn't get MD: %v", err)
}
lastKMD := km.getLastKMD()
if md.ReadOnlyRootMetadata != lastKMD {
t.Error("Last MD seen by key manager != head")
}
}
// Test that a sync can happen concurrently with a truncate. This is a
// regression test for KBFS-558.
func TestKBFSOpsConcurBlockSyncTruncate(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
km := &mdRecordingKeyManager{delegate: config.KeyManager()}
config.SetKeyManager(km)
// Turn off block caching.
config.SetBlockCache(NewBlockCacheStandard(0, 1<<30))
// Create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Write to file to mark it dirty.
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %v", err)
}
lState := makeFBOLockState()
fbo := kbfsOps.(*KBFSOpsStandard).getOpsNoAdd(rootNode.GetFolderBranch())
if fbo.blocks.GetState(lState) != dirtyState {
t.Fatal("Unexpectedly not in dirty state")
}
onSyncStalledCh, syncUnstallCh, ctxStallSync :=
StallBlockOp(ctx, config, StallableBlockPut, 1)
var wg sync.WaitGroup
// Start the sync and wait for it to stall (on getting the dir
// block).
wg.Add(1)
var syncErr error
go func() {
defer wg.Done()
syncErr = kbfsOps.Sync(ctxStallSync, fileNode)
}()
<-onSyncStalledCh
err = kbfsOps.Truncate(ctx, fileNode, 0)
if err != nil {
t.Errorf("Couldn't truncate file: %v", err)
}
deferredWriteCount := fbo.blocks.getDeferredWriteCountForTest(lState)
if deferredWriteCount != 1 {
t.Errorf("Unexpected deferred write count %d",
deferredWriteCount)
}
// Unstall the sync.
close(syncUnstallCh)
wg.Wait()
// Do this in the main goroutine since t isn't goroutine safe,
// and do this after wg.Wait() since we only know it's set
// after the goroutine exits.
if syncErr != nil {
t.Errorf("Couldn't sync: %v", syncErr)
}
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
t.Errorf("Couldn't get MD: %v", err)
}
lastKMD := km.getLastKMD()
if md.ReadOnlyRootMetadata != lastKMD {
t.Error("Last MD seen by key manager != head")
}
}
// Test that a sync can happen concurrently with a read for a file
// large enough to have indirect blocks without messing anything
// up. This should pass with -race. This is a regression test for
// KBFS-537.
func TestKBFSOpsConcurBlockSyncReadIndirect(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
// Turn off block caching.
config.SetBlockCache(NewBlockCacheStandard(0, 1<<30))
// Use the smallest block size possible.
bsplitter, err := NewBlockSplitterSimple(20, 8*1024, config.Codec())
if err != nil {
t.Fatalf("Couldn't create block splitter: %v", err)
}
config.SetBlockSplitter(bsplitter)
// Create a file.
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Write to file to make an indirect block.
data := make([]byte, bsplitter.maxSize+1)
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write to file: %v", err)
}
// Decouple the read context from the sync context.
readCtx, cancel := context.WithCancel(context.Background())
defer cancel()
// Read in a loop in a separate goroutine until we encounter
// an error or the test ends.
c := make(chan struct{})
go func() {
defer close(c)
outer:
for {
_, err := kbfsOps.Read(readCtx, fileNode, data, 0)
select {
case <-readCtx.Done():
break outer
default:
}
if err != nil {
t.Errorf("Couldn't read file: %v", err)
break
}
}
}()
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("Couldn't sync file: %v", err)
}
cancel()
// Wait for the read loop to finish
<-c
}
// Test that a write can survive a folder BlockPointer update
func TestKBFSOpsConcurWriteDuringFolderUpdate(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
data := []byte{1}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// Now update the folder pointer in some other way
_, _, err = kbfsOps.CreateFile(ctx, rootNode, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Now sync the original file and see make sure the write survived
if err := kbfsOps.Sync(ctx, fileNode); err != nil {
t.Fatalf("Couldn't sync: %v", err)
}
de, err := kbfsOps.Stat(ctx, fileNode)
if err != nil {
t.Errorf("Couldn't stat file: %v", err)
}
if g, e := de.Size, len(data); g != uint64(e) {
t.Errorf("Got wrong size %d; expected %d", g, e)
}
}
// Test that a write can happen concurrently with a sync when there
// are multiple blocks in the file.
func TestKBFSOpsConcurWriteDuringSyncMultiBlocks(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
onPutStalledCh, putUnstallCh, putCtx :=
StallMDOp(ctx, config, StallableMDAfterPut, 1)
// make blocks small
config.BlockSplitter().(*BlockSplitterSimple).maxSize = 5
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// 2 blocks worth of data
data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// sync these initial blocks
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Errorf("Couldn't do the first sync: %v", err)
}
// there should be 7 blocks at this point: the original root block
// + 2 modifications (create + write), the top indirect file block
// and a modification (write), and its two children blocks.
numCleanBlocks := config.BlockCache().(*BlockCacheStandard).cleanTransient.Len()
if numCleanBlocks != 7 {
t.Errorf("Unexpected number of cached clean blocks: %d\n",
numCleanBlocks)
}
// write to the first block
b1data := []byte{11, 12}
err = kbfsOps.Write(ctx, fileNode, b1data, 0)
if err != nil {
t.Errorf("Couldn't write 1st block of file: %v", err)
}
// start the sync
errChan := make(chan error)
go func() {
errChan <- kbfsOps.Sync(putCtx, fileNode)
}()
// wait until Sync gets stuck at MDOps.Put()
<-onPutStalledCh
// now make sure we can write the second block of the file and see
// the new bytes we wrote
newData := []byte{20}
err = kbfsOps.Write(ctx, fileNode, newData, 9)
if err != nil {
t.Errorf("Couldn't write data: %v\n", err)
}
// read the data back
buf := make([]byte, 10)
nr, err := kbfsOps.Read(ctx, fileNode, buf, 0)
if err != nil {
t.Errorf("Couldn't read data: %v\n", err)
}
expectedData := []byte{11, 12, 3, 4, 5, 6, 7, 8, 9, 20}
if nr != 10 || !bytes.Equal(expectedData, buf) {
t.Errorf("Got wrong data %v; expected %v", buf, expectedData)
}
// now unstall Sync and make sure there was no error
close(putUnstallCh)
err = <-errChan
if err != nil {
t.Errorf("Sync got an error: %v", err)
}
// finally, make sure we can still read it after the sync too
// (even though the second write hasn't been sync'd yet)
buf2 := make([]byte, 10)
nr, err = kbfsOps.Read(ctx, fileNode, buf2, 0)
if err != nil {
t.Errorf("Couldn't read data: %v\n", err)
}
if nr != 10 || !bytes.Equal(expectedData, buf2) {
t.Errorf("2nd read: Got wrong data %v; expected %v", buf2, expectedData)
}
// Final sync to clean up
if err := kbfsOps.Sync(ctx, fileNode); err != nil {
t.Errorf("Couldn't sync the final write")
}
}
// Test that a write consisting of multiple blocks can be canceled
// before all blocks have been written.
func TestKBFSOpsConcurWriteParallelBlocksCanceled(t *testing.T) {
if maxParallelBlockPuts <= 1 {
t.Skip("Skipping because we are not putting blocks in parallel.")
}
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
// give it a remote block server with a fake client
log := config.MakeLogger("")
fc := NewFakeBServerClient(config.Crypto(), log, nil, nil, nil)
b := newBlockServerRemoteWithClient(
config.Codec(), config.KBPKI(), log, fc)
config.BlockServer().Shutdown()
config.SetBlockServer(b)
// make blocks small
blockSize := int64(5)
config.BlockSplitter().(*BlockSplitterSimple).maxSize = blockSize
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Two initial blocks, then maxParallelBlockPuts blocks that
// will be processed but discarded, then three extra blocks
// that will be ignored.
initialBlocks := 2
extraBlocks := 3
totalFileBlocks := initialBlocks + maxParallelBlockPuts + extraBlocks
var data []byte
for i := int64(0); i < blockSize*int64(totalFileBlocks); i++ {
data = append(data, byte(i))
}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// now set a control channel, let a couple blocks go in, and then
// cancel the context
readyChan := make(chan struct{})
goChan := make(chan struct{})
finishChan := make(chan struct{})
fc.readyChan = readyChan
fc.goChan = goChan
fc.finishChan = finishChan
prevNBlocks := fc.numBlocks()
ctx2, cancel2 := context.WithCancel(ctx)
go func() {
// let the first initialBlocks blocks through.
for i := 0; i < initialBlocks; i++ {
select {
case <-readyChan:
case <-ctx.Done():
t.Error(ctx.Err())
}
}
for i := 0; i < initialBlocks; i++ {
select {
case goChan <- struct{}{}:
case <-ctx.Done():
t.Error(ctx.Err())
}
}
for i := 0; i < initialBlocks; i++ {
select {
case <-finishChan:
case <-ctx.Done():
t.Error(ctx.Err())
}
}
// Let each parallel block worker block on readyChan.
for i := 0; i < maxParallelBlockPuts; i++ {
select {
case <-readyChan:
case <-ctx.Done():
t.Error(ctx.Err())
}
}
// Make sure all the workers are busy.
select {
case <-readyChan:
t.Error("Worker unexpectedly ready")
case <-ctx.Done():
t.Error(ctx.Err())
default:
}
// Let all the workers go through.
cancel2()
}()
err = kbfsOps.Sync(ctx2, fileNode)
if err != ctx2.Err() {
t.Errorf("Sync did not get canceled error: %v", err)
}
nowNBlocks := fc.numBlocks()
if nowNBlocks != prevNBlocks+2 {
t.Errorf("Unexpected number of blocks; prev = %d, now = %d",
prevNBlocks, nowNBlocks)
}
// Make sure there are no more workers, i.e. the extra blocks
// aren't sent to the server.
select {
case <-readyChan:
t.Error("Worker unexpectedly ready")
default:
}
// As a regression for KBFS-635, test that a second sync succeeds,
// and that future operations also succeed.
//
// Create new objects to avoid racing with goroutines from the
// first sync.
fc = NewFakeBServerClient(config.Crypto(), log, nil, nil, nil)
b = newBlockServerRemoteWithClient(
config.Codec(), config.KBPKI(), log, fc)
config.BlockServer().Shutdown()
config.SetBlockServer(b)
if err := kbfsOps.Sync(ctx, fileNode); err != nil {
t.Fatalf("Second sync failed: %v", err)
}
if _, _, err := kbfsOps.CreateFile(ctx, rootNode, "b", false, NoExcl); err != nil {
t.Fatalf("Couldn't create file after sync: %v", err)
}
// Avoid checking state when using a fake block server.
config.MDServer().Shutdown()
}
// Test that, when writing multiple blocks in parallel, one error will
// cancel the remaining puts.
func TestKBFSOpsConcurWriteParallelBlocksError(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
// give it a mock'd block server
ctr := NewSafeTestReporter(t)
mockCtrl := gomock.NewController(ctr)
defer mockCtrl.Finish()
defer ctr.CheckForFailures()
b := NewMockBlockServer(mockCtrl)
config.BlockServer().Shutdown()
config.SetBlockServer(b)
// from the folder creation, then 2 for file creation
c := b.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).Times(3).Return(nil)
b.EXPECT().ArchiveBlockReferences(gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return(nil)
// make blocks small
blockSize := int64(5)
config.BlockSplitter().(*BlockSplitterSimple).maxSize = blockSize
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// 15 blocks
var data []byte
fileBlocks := int64(15)
for i := int64(0); i < blockSize*fileBlocks; i++ {
data = append(data, byte(i))
}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// let two blocks through and fail the third:
c = b.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).Times(2).After(c).Return(nil)
putErr := errors.New("This is a forced error on put")
errPtrChan := make(chan BlockPointer)
c = b.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).
Do(func(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
context kbfsblock.Context, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf) {
errPtrChan <- BlockPointer{
ID: id,
Context: context,
}
}).After(c).Return(putErr)
// let the rest through
proceedChan := make(chan struct{})
b.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().
Do(func(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
context kbfsblock.Context, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf) {
<-proceedChan
}).After(c).Return(nil)
b.EXPECT().Shutdown().AnyTimes()
var errPtr BlockPointer
go func() {
errPtr = <-errPtrChan
close(proceedChan)
}()
err = kbfsOps.Sync(ctx, fileNode)
if err != putErr {
t.Errorf("Sync did not get the expected error: %v", err)
}
// wait for proceedChan to close, so we know the errPtr has been set
<-proceedChan
// Make sure the error'd file didn't make it to the actual cache
// -- it's still in the permanent cache because the file might
// still be read or sync'd later.
config.BlockCache().DeletePermanent(errPtr.ID)
if _, err := config.BlockCache().Get(errPtr); err == nil {
t.Errorf("Failed block put for %v left block in cache", errPtr)
}
// State checking won't happen on the mock block server since we
// leave ourselves in a dirty state.
}
// Test that writes that happen on a multi-block file concurrently
// with a sync, which has to retry due to an archived block, works
// correctly. Regression test for KBFS-700.
func TestKBFSOpsMultiBlockWriteDuringRetriedSync(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
// Use the smallest possible block size.
bsplitter, err := NewBlockSplitterSimple(20, 8*1024, config.Codec())
if err != nil {
t.Fatalf("Couldn't create block splitter: %v", err)
}
config.SetBlockSplitter(bsplitter)
oldBServer := config.BlockServer()
defer config.SetBlockServer(oldBServer)
onSyncStalledCh, syncUnstallCh, ctxStallSync :=
StallBlockOp(ctx, config, StallableBlockPut, 1)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
var data []byte
// Write 2 blocks worth of data
for i := 0; i < 30; i++ {
data = append(data, byte(i))
}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("First sync failed: %v", err)
}
// Remove that file, and wait for the archiving to complete
err = kbfsOps.RemoveEntry(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove file: %v", err)
}
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
fileNode2, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Now write the identical first block and sync it.
err = kbfsOps.Write(ctx, fileNode2, data[:20], 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// Sync the initial two data blocks
errChan := make(chan error)
// start the sync
go func() {
errChan <- kbfsOps.Sync(ctxStallSync, fileNode2)
}()
<-onSyncStalledCh
// Now write the second block.
err = kbfsOps.Write(ctx, fileNode2, data[20:], 20)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// Unstall the sync.
close(syncUnstallCh)
err = <-errChan
if err != nil {
t.Errorf("Sync got an error: %v", err)
}
// Final sync
err = kbfsOps.Sync(ctx, fileNode2)
if err != nil {
t.Fatalf("Final sync failed: %v", err)
}
gotData := make([]byte, 30)
nr, err := kbfsOps.Read(ctx, fileNode2, gotData, 0)
if err != nil {
t.Errorf("Couldn't read data: %v", err)
}
if nr != int64(len(gotData)) {
t.Errorf("Only read %d bytes", nr)
}
if !bytes.Equal(data, gotData) {
t.Errorf("Read wrong data. Expected %v, got %v", data, gotData)
}
// Make sure there are no dirty blocks left at the end of the test.
dbcs := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
numDirtyBlocks := len(dbcs.cache)
if numDirtyBlocks != 0 {
t.Errorf("%d dirty blocks left after final sync", numDirtyBlocks)
}
}
// Test that a sync of a multi-block file that hits both a retriable
// error and a unretriable error leave the system in a clean state.
// Regression test for KBFS-1508.
func TestKBFSOpsMultiBlockWriteWithRetryAndError(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
// Use the smallest possible block size.
bsplitter, err := NewBlockSplitterSimple(20, 8*1024, config.Codec())
if err != nil {
t.Fatalf("Couldn't create block splitter: %v", err)
}
config.SetBlockSplitter(bsplitter)
oldBServer := config.BlockServer()
defer config.SetBlockServer(oldBServer)
onSyncStalledCh, syncUnstallCh, ctxStallSync :=
StallBlockOp(ctx, config, StallableBlockPut, 7)
ctxStallSync, cancel2 := context.WithCancel(ctxStallSync)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
var data []byte
// Write 2 blocks worth of data
for i := 0; i < 30; i++ {
data = append(data, byte(i))
}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
err = kbfsOps.Sync(ctx, fileNode)
if err != nil {
t.Fatalf("First sync failed: %v", err)
}
// Remove that file, and wait for the archiving to complete
err = kbfsOps.RemoveEntry(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove file: %v", err)
}
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
fileNode2, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Now write the identical first block, plus a new block and sync it.
err = kbfsOps.Write(ctx, fileNode2, data[:20], 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
err = kbfsOps.Write(ctx, fileNode2, data[10:30], 20)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// Sync the initial three data blocks
errChan := make(chan error, 1)
// start the sync
go func() {
errChan <- kbfsOps.Sync(ctxStallSync, fileNode2)
}()
// Wait for the first block to finish (before the retry)
select {
case <-onSyncStalledCh:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
// Dirty the last block and extend it, so the one that was sent as
// part of the first sync is no longer part of the file.
err = kbfsOps.Write(ctx, fileNode2, data[10:20], 40)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
select {
case syncUnstallCh <- struct{}{}:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
// Wait for the rest of the first set of block to finish (before the retry)
for i := 0; i < 5; i++ {
select {
case <-onSyncStalledCh:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
select {
case syncUnstallCh <- struct{}{}:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
}
// Once the first block of the retry comes in, cancel everything.
select {
case <-onSyncStalledCh:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
cancel2()
// Unstall the sync.
close(syncUnstallCh)
err = <-errChan
if err != context.Canceled {
t.Errorf("Sync got an unexpected error: %v", err)
}
// Finish the sync
err = kbfsOps.Sync(ctx, fileNode2)
if err != nil {
t.Errorf("Couldn't sync file after error: %v", err)
}
gotData := make([]byte, 50)
nr, err := kbfsOps.Read(ctx, fileNode2, gotData, 0)
if err != nil {
t.Errorf("Couldn't read data: %v", err)
}
if nr != int64(len(gotData)) {
t.Errorf("Only read %d bytes", nr)
}
expectedData := make([]byte, 0, 45)
expectedData = append(expectedData, data[0:20]...)
expectedData = append(expectedData, data[10:30]...)
expectedData = append(expectedData, data[10:20]...)
if !bytes.Equal(expectedData, gotData) {
t.Errorf("Read wrong data. Expected %v, got %v", expectedData, gotData)
}
// Make sure there are no dirty blocks left at the end of the test.
dbcs := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
numDirtyBlocks := len(dbcs.cache)
if numDirtyBlocks != 0 {
for ptr := range dbcs.cache {
t.Logf("Block %v still dirty", ptr.id)
}
t.Errorf("%d dirty blocks left after final sync, sync=%d wait=%d", numDirtyBlocks, dbcs.syncBufBytes, dbcs.waitBufBytes)
}
}
// This tests the situation where cancellation happens when the MD write has
// already started, and cancellation is delayed. Since no extra delay greater
// than the grace period in MD writes is introduced, Create should succeed.
func TestKBFSOpsCanceledCreateNoError(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
onPutStalledCh, putUnstallCh, putCtx :=
StallMDOp(context.Background(), config, StallableMDPut, 1)
putCtx, cancel2 := context.WithCancel(putCtx)
putCtx, err := NewContextWithCancellationDelayer(putCtx)
if err != nil {
t.Fatal(err)
}
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
errChan := make(chan error, 1)
go func() {
_, _, err := kbfsOps.CreateFile(putCtx, rootNode, "a", false, WithExcl)
errChan <- err
}()
// Wait until Create gets stuck at MDOps.Put(). At this point, the delayed
// cancellation should have been enabled.
select {
case <-onPutStalledCh:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
cancel2()
close(putUnstallCh)
// We expect no canceled error
select {
case err = <-errChan:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
if err != nil {
t.Fatalf("Create returned error: %v", err)
}
ctx2 := BackgroundContextWithCancellationDelayer()
defer CleanupCancellationDelayer(ctx2)
if _, _, err = kbfsOps.Lookup(
ctx2, rootNode, "a"); err != nil {
t.Fatalf("Lookup returned error: %v", err)
}
}
// This tests the situation where cancellation happens when the MD write has
// already started, and cancellation is delayed. A delay larger than the grace
// period is introduced to MD write, so Create should fail. This is to ensure
// Ctrl-C is able to interrupt the process eventually after the grace period.
func TestKBFSOpsCanceledCreateDelayTimeoutErrors(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
// This essentially fast-forwards the grace period timer, making cancellation
// happen much faster. This way we can avoid time.Sleep.
config.SetDelayedCancellationGracePeriod(0)
onPutStalledCh, putUnstallCh, putCtx :=
StallMDOp(context.Background(), config, StallableMDPut, 1)
putCtx, cancel2 := context.WithCancel(putCtx)
putCtx, err := NewContextWithCancellationDelayer(putCtx)
if err != nil {
t.Fatal(err)
}
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
errChan := make(chan error, 1)
go func() {
_, _, err := kbfsOps.CreateFile(putCtx, rootNode, "a", false, WithExcl)
errChan <- err
}()
// Wait until Create gets stuck at MDOps.Put(). At this point, the delayed
// cancellation should have been enabled.
select {
case <-onPutStalledCh:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
cancel2()
select {
case <-ctx.Done():
t.Fatal(ctx.Err())
case <-putCtx.Done():
// The cancellation delayer makes cancellation become async. This makes
// sure ctx is actually canceled before unstalling.
case <-time.After(time.Second):
// We have a grace period of 0s. This is too long; something must have gone
// wrong!
t.Fatalf("it took too long for cancellation to happen")
}
close(putUnstallCh)
// We expect a canceled error
select {
case err = <-errChan:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
if err != context.Canceled {
t.Fatalf("Create didn't fail after grace period after cancellation."+
" Got %v; expecting context.Canceled", err)
}
ctx2 := BackgroundContextWithCancellationDelayer()
defer CleanupCancellationDelayer(ctx2)
// do another Op, which generates a new revision, to make sure
// CheckConfigAndShutdown doesn't get stuck
if _, _, err = kbfsOps.CreateFile(ctx2,
rootNode, "b", false, NoExcl); err != nil {
t.Fatalf("throwaway op failed: %v", err)
}
}
// Test that a Sync that is canceled during a successful MD put works.
func TestKBFSOpsConcurCanceledSyncSucceeds(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
onPutStalledCh, putUnstallCh, putCtx :=
StallMDOp(ctx, config, StallableMDAfterPut, 1)
// Use the smallest possible block size.
bsplitter, err := NewBlockSplitterSimple(20, 8*1024, config.Codec())
if err != nil {
t.Fatalf("Couldn't create block splitter: %v", err)
}
config.SetBlockSplitter(bsplitter)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
data := make([]byte, 30)
for i := 0; i < 30; i++ {
data[i] = 1
}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
unpauseDeleting := make(chan struct{})
ops.fbm.blocksToDeletePauseChan <- unpauseDeleting
// start the sync
errChan := make(chan error)
cancelCtx, cancel := context.WithCancel(putCtx)
go func() {
errChan <- kbfsOps.Sync(cancelCtx, fileNode)
}()
// wait until Sync gets stuck at MDOps.Put()
<-onPutStalledCh
cancel()
close(putUnstallCh)
// We expect a canceled error
err = <-errChan
if err != context.Canceled {
t.Fatalf("No expected canceled error: %v", err)
}
// Flush the file. This will result in conflict resolution, and
// an extra copy of the file, but that's ok for now.
if err := kbfsOps.Sync(ctx, fileNode); err != nil {
t.Fatalf("Couldn't sync: %v", err)
}
if len(ops.fbm.blocksToDeleteChan) == 0 {
t.Fatalf("No blocks to delete after error")
}
unpauseDeleting <- struct{}{}
ops.fbm.waitForDeletingBlocks(ctx)
if len(ops.fbm.blocksToDeleteChan) > 0 {
t.Fatalf("Blocks left to delete after sync")
}
// The first put actually succeeded, so
// SyncFromServerForTesting and make sure it worked.
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
gotData := make([]byte, 30)
nr, err := kbfsOps.Read(ctx, fileNode, gotData, 0)
if err != nil {
t.Errorf("Couldn't read data: %v", err)
}
if nr != int64(len(gotData)) {
t.Errorf("Only read %d bytes", nr)
}
if !bytes.Equal(data, gotData) {
t.Errorf("Read wrong data. Expected %v, got %v", data, gotData)
}
}
// Test that when a Sync that is canceled during a successful MD put,
// and then another Sync hits a conflict but then is also canceled,
// and finally a Sync succeeds (as a conflict), the TLF is left in a
// reasonable state where CR can succeed. Regression for KBFS-1569.
func TestKBFSOpsConcurCanceledSyncFailsAfterCanceledSyncSucceeds(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
onPutStalledCh, putUnstallCh, putCtx :=
StallMDOp(ctx, config, StallableMDAfterPut, 1)
// Use the smallest possible block size.
bsplitter, err := NewBlockSplitterSimple(20, 8*1024, config.Codec())
if err != nil {
t.Fatalf("Couldn't create block splitter: %v", err)
}
config.SetBlockSplitter(bsplitter)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
data := make([]byte, 30)
for i := 0; i < 30; i++ {
data[i] = 1
}
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// start the sync
errChan := make(chan error)
cancelCtx, cancel := context.WithCancel(putCtx)
go func() {
errChan <- kbfsOps.Sync(cancelCtx, fileNode)
}()
// wait until Sync gets stuck at MDOps.Put()
<-onPutStalledCh
cancel()
close(putUnstallCh)
// We expect a canceled error
err = <-errChan
if err != context.Canceled {
t.Fatalf("No expected canceled error: %v", err)
}
// Cancel this one before it succeeds.
onUnmergedPutStalledCh, unmergedPutUnstallCh, putUnmergedCtx :=
StallMDOp(ctx, config, StallableMDPutUnmerged, 1)
// Flush the file again, which will result in an unmerged put,
// which we will also cancel.
cancelCtx, cancel = context.WithCancel(putUnmergedCtx)
go func() {
errChan <- kbfsOps.Sync(cancelCtx, fileNode)
}()
// wait until Sync gets stuck at MDOps.Put()
<-onUnmergedPutStalledCh
cancel()
close(unmergedPutUnstallCh)
// We expect a canceled error
err = <-errChan
if err != context.Canceled {
t.Fatalf("No expected canceled error: %v", err)
}
// Now finally flush the file again, which will result in a
// conflict file.
if err := kbfsOps.Sync(ctx, fileNode); err != nil {
t.Fatalf("Couldn't sync: %v", err)
}
// Wait for all the deletes to go through.
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
ops.fbm.waitForDeletingBlocks(ctx)
if len(ops.fbm.blocksToDeleteChan) > 0 {
t.Fatalf("Blocks left to delete after sync")
}
// Wait for CR to finish
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
}
// Test that truncating a block to a zero-contents block, for which a
// duplicate has previously been archived, works correctly after a
// cancel. Regression test for KBFS-727.
func TestKBFSOpsTruncateWithDupBlockCanceled(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Remove that file, and wait for the archiving to complete
err = kbfsOps.RemoveEntry(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove file: %v", err)
}
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
fileNode2, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
var data []byte
// Write some data
for i := 0; i < 30; i++ {
data = append(data, byte(i))
}
err = kbfsOps.Write(ctx, fileNode2, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
err = kbfsOps.Sync(ctx, fileNode2)
if err != nil {
t.Fatalf("First sync failed: %v", err)
}
// Now truncate and sync, canceling during the block puts
err = kbfsOps.Truncate(ctx, fileNode2, 0)
if err != nil {
t.Errorf("Couldn't truncate file: %v", err)
}
// Sync the initial two data blocks
errChan := make(chan error)
// start the sync
cancelCtx, cancel := context.WithCancel(ctx)
oldBServer := config.BlockServer()
defer config.SetBlockServer(oldBServer)
onSyncStalledCh, syncUnstallCh, ctxStallSync :=
StallBlockOp(cancelCtx, config, StallableBlockPut, 1)
go func() {
errChan <- kbfsOps.Sync(ctxStallSync, fileNode2)
}()
<-onSyncStalledCh
cancel()
// Unstall the sync.
close(syncUnstallCh)
err = <-errChan
if err != context.Canceled {
t.Errorf("Sync got wrong error: %v", err)
}
// Final sync
err = kbfsOps.Sync(ctx, fileNode2)
if err != nil {
t.Fatalf("Final sync failed: %v", err)
}
}
type blockOpsOverQuota struct {
BlockOps
}
func (booq *blockOpsOverQuota) Put(ctx context.Context, tlfID tlf.ID,
blockPtr BlockPointer, readyBlockData ReadyBlockData) error {
return kbfsblock.BServerErrorOverQuota{
Throttled: true,
}
}
// Test that a quota error causes deferred writes to error.
// Regression test for KBFS-751.
func TestKBFSOpsErrorOnBlockedWriteDuringSync(t *testing.T) {
t.Skip("Broken pending KBFS-1261")
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
// create and write to a file
rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", false)
kbfsOps := config.KBFSOps()
fileNode, _, err := kbfsOps.CreateFile(ctx, rootNode, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// Write over the dirty amount of data. TODO: make this
// configurable for a speedier test.
dbcs := config.DirtyBlockCache().(*DirtyBlockCacheStandard)
data := make([]byte, dbcs.minSyncBufCap+1)
err = kbfsOps.Write(ctx, fileNode, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
realBlockOps := config.BlockOps()
config.SetBlockOps(&blockOpsOverQuota{BlockOps: config.BlockOps()})
onSyncStalledCh, syncUnstallCh, ctxStallSync :=
StallBlockOp(ctx, config, StallableBlockPut, 1)
// Block the Sync
// Sync the initial two data blocks
syncErrCh := make(chan error)
go func() {
syncErrCh <- kbfsOps.Sync(ctxStallSync, fileNode)
}()
<-onSyncStalledCh
// Write more data which should get accepted but deferred.
moreData := make([]byte, dbcs.minSyncBufCap*2+1)
err = kbfsOps.Write(ctx, fileNode, moreData, int64(len(data)))
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
// Now write more data which should get blocked
newData := make([]byte, 1)
writeErrCh := make(chan error)
go func() {
writeErrCh <- kbfsOps.Write(ctx, fileNode, newData,
int64(len(data)+len(moreData)))
}()
// Wait until the second write is blocked
ops := getOps(config, rootNode.GetFolderBranch().Tlf)
func() {
lState := makeFBOLockState()
filePath := ops.nodeCache.PathFromNode(fileNode)
ops.blocks.blockLock.Lock(lState)
defer ops.blocks.blockLock.Unlock(lState)
df := ops.blocks.getOrCreateDirtyFileLocked(lState, filePath)
// TODO: locking
for len(df.errListeners) != 3 {
ops.blocks.blockLock.Unlock(lState)
runtime.Gosched()
ops.blocks.blockLock.Lock(lState)
}
}()
// Unblock the sync
close(syncUnstallCh)
// Both errors should be an OverQuota error
syncErr := <-syncErrCh
writeErr := <-writeErrCh
if _, ok := syncErr.(kbfsblock.BServerErrorOverQuota); !ok {
t.Fatalf("Unexpected sync err: %v", syncErr)
}
if writeErr != syncErr {
t.Fatalf("Unexpected write err: %v", writeErr)
}
// Finish the sync to clear out the byte counts
config.SetBlockOps(realBlockOps)
if err := kbfsOps.Sync(ctx, fileNode); err != nil {
t.Fatalf("Couldn't finish sync: %v", err)
}
}
func TestKBFSOpsCancelGetFavorites(t *testing.T) {
config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user")
defer kbfsConcurTestShutdown(t, config, ctx, cancel)
serverConn, conn := rpc.MakeConnectionForTest(t)
daemon := newKeybaseDaemonRPCWithClient(
nil,
conn.GetClient(),
config.MakeLogger(""))
config.SetKeybaseService(daemon)
f := func(ctx context.Context) error {
_, err := config.KBFSOps().GetFavorites(ctx)
return err
}
testRPCWithCanceledContext(t, serverConn, f)
}
type stallingNodeCache struct {
NodeCache
doStallUpdate <-chan struct{}
unstallUpdate <-chan struct{}
beforePathsCalled chan<- struct{}
afterPathCalled chan<- struct{}
}
func (snc *stallingNodeCache) UpdatePointer(
oldRef BlockRef, newPtr BlockPointer) {
select {
case <-snc.doStallUpdate:
<-snc.unstallUpdate
default:
}
snc.NodeCache.UpdatePointer(oldRef, newPtr)
}
func (snc *stallingNodeCache) PathFromNode(node Node) path {
snc.beforePathsCalled <- struct{}{}
p := snc.NodeCache.PathFromNode(node)
snc.afterPathCalled <- struct{}{}
return p
}
// Test that a lookup that straddles a sync from the same file doesn't
// have any races. Regression test for KBFS-1717.
func TestKBFSOpsLookupSyncRace(t *testing.T) {
var userName1, userName2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, userName1, userName2)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
config2 := ConfigAsUser(config1, userName2)
defer CheckConfigAndShutdown(ctx, t, config2)
name := userName1.String() + "," + userName2.String()
rootNode2 := GetRootNodeOrBust(ctx, t, config2, name, false)
kbfsOps2 := config2.KBFSOps()
ops2 := getOps(config2, rootNode2.GetFolderBranch().Tlf)
doStallUpdate := make(chan struct{}, 1)
unstallUpdate := make(chan struct{})
beforePathsCalled := make(chan struct{})
afterPathCalled := make(chan struct{})
snc := &stallingNodeCache{
NodeCache: ops2.nodeCache,
doStallUpdate: doStallUpdate,
unstallUpdate: unstallUpdate,
beforePathsCalled: beforePathsCalled,
afterPathCalled: afterPathCalled,
}
ops2.nodeCache = snc
ops2.blocks.nodeCache = snc
defer func() {
ops2.nodeCache = snc.NodeCache
ops2.blocks.nodeCache = snc.NodeCache
}()
// u1 creates a file.
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
fileNodeA1, _, err := kbfsOps1.CreateFile(
ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// u2 syncs and then disables updates.
if err := kbfsOps2.SyncFromServerForTesting(
ctx, rootNode2.GetFolderBranch()); err != nil {
t.Fatal("Couldn't sync user 2 from server")
}
_, err = DisableUpdatesForTesting(config2, rootNode2.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't disable updates: %v", err)
}
// u2 writes to the file.
data := []byte{1, 2, 3}
err = kbfsOps1.Write(ctx, fileNodeA1, data, 0)
if err != nil {
t.Errorf("Couldn't write file: %v", err)
}
if err := kbfsOps1.Sync(ctx, fileNodeA1); err != nil {
t.Fatalf("Couldn't finish sync: %v", err)
}
// u2 tries to lookup the file, which will block until we drain
// the afterPathCalled channel.
var wg sync.WaitGroup
wg.Add(1)
var fileNodeA2 Node
go func() {
defer wg.Done()
var err error
fileNodeA2, _, err = kbfsOps2.Lookup(ctx, rootNode2, "a")
if err != nil {
t.Errorf("Couldn't lookup a: %v", err)
}
}()
// Wait for the lookup to block.
select {
case <-beforePathsCalled:
case <-ctx.Done():
t.Fatal("Timeout while waiting for lookup to block")
}
// u2 starts to sync but the sync is stalled while holding the
// block lock.
doStallUpdate <- struct{}{}
wg.Add(1)
go func() {
defer wg.Done()
if err := kbfsOps2.SyncFromServerForTesting(
ctx, rootNode2.GetFolderBranch()); err != nil {
t.Errorf("Couldn't sync user 2 from server: %v", err)
}
}()
// Unblock the lookup.
select {
case <-afterPathCalled:
case <-ctx.Done():
t.Fatal("Timeout while waiting for afterPathCalled")
}
// Wait for the sync to block and let the sync succeed (which will
// let the lookup succeed). NOTE: To repro KBFS-1717, this call
// needs to go before we unblock the paths lookup. However, with
// the fix for KBFS-1717, the test will hang if we do that since
// the Lookup holds blockLock while it gets the path. So as is,
// this isn't a direct repro but it's still a test worth having
// around.
select {
case unstallUpdate <- struct{}{}:
case <-ctx.Done():
t.Fatal("Timeout while waiting for sync to block")
}
wg.Wait()
// Now u2 reads using the node it just looked up, and should see
// the right data.
gotData := make([]byte, len(data))
go func() { <-beforePathsCalled; <-afterPathCalled }() // Read needs a path lookup too.
nr, err := kbfsOps2.Read(ctx, fileNodeA2, gotData, 0)
if err != nil {
t.Errorf("Couldn't read data: %v", err)
}
if nr != int64(len(gotData)) {
t.Errorf("Only read %d bytes", nr)
}
if !bytes.Equal(data, gotData) {
t.Errorf("Read wrong data. Expected %v, got %v", data, gotData)
}
}
| 1 | 15,281 | If you feel like it, may as well apply all the suggestions below to this test too. | keybase-kbfs | go |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.