patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -38,13 +38,15 @@ var PATH_SEPARATOR = process.platform === 'win32' ? ';' : ':';
exports.rmDir = function(path) {
return new promise.Promise(function(fulfill, reject) {
var numAttempts = 0;
+ var maxAttempts = 5;
+ var attemptTimeout = 250;
attemptRm();
function attemptRm() {
numAttempts += 1;
rimraf(path, function(err) {
if (err) {
- if (err.code === 'ENOTEMPTY' && numAttempts < 2) {
- attemptRm();
+ if (err.code === 'ENOTEMPTY' && numAttempts <= maxAttempts) {
+ setTimeout(attemptRm, attemptTimeout);
return;
}
reject(err); | 1 | // Copyright 2013 Selenium committers
// Copyright 2013 Software Freedom Conservancy
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
var fs = require('fs'),
path = require('path'),
rimraf = require('rimraf'),
tmp = require('tmp');
var promise = require('..').promise;
var PATH_SEPARATOR = process.platform === 'win32' ? ';' : ':';
// PUBLIC API
/**
* Recursively removes a directory and all of its contents. This is equivalent
* to {@code rm -rf} on a POSIX system.
* @param {string} path Path to the directory to remove.
* @return {!promise.Promise} A promise to be resolved when the operation has
* completed.
*/
exports.rmDir = function(path) {
return new promise.Promise(function(fulfill, reject) {
var numAttempts = 0;
attemptRm();
function attemptRm() {
numAttempts += 1;
rimraf(path, function(err) {
if (err) {
if (err.code === 'ENOTEMPTY' && numAttempts < 2) {
attemptRm();
return;
}
reject(err);
} else {
fulfill();
}
});
}
});
};
/**
* Copies one file to another.
* @param {string} src The source file.
* @param {string} dst The destination file.
* @return {!promise.Promise.<string>} A promise for the copied file's path.
*/
exports.copy = function(src, dst) {
var copied = promise.defer();
var rs = fs.createReadStream(src);
rs.on('error', copied.reject);
rs.on('end', function() {
copied.fulfill(dst);
});
var ws = fs.createWriteStream(dst);
ws.on('error', copied.reject);
rs.pipe(ws);
return copied.promise;
};
/**
* Recursively copies the contents of one directory to another.
* @param {string} src The source directory to copy.
* @param {string} dst The directory to copy into.
* @param {(RegEx|function(string): boolean)=} opt_exclude An exclusion filter
* as either a regex or predicate function. All files matching this filter
* will not be copied.
* @return {!promise.Promise.<string>} A promise for the destination
* directory's path once all files have been copied.
*/
exports.copyDir = function(src, dst, opt_exclude) {
var predicate = opt_exclude;
if (opt_exclude && typeof opt_exclude !== 'function') {
predicate = function(p) {
return !opt_exclude.test(p);
};
}
// TODO(jleyba): Make this function completely async.
if (!fs.existsSync(dst)) {
fs.mkdirSync(dst);
}
var files = fs.readdirSync(src);
files = files.map(function(file) {
return path.join(src, file);
});
if (predicate) {
files = files.filter(predicate);
}
var results = [];
files.forEach(function(file) {
var stats = fs.statSync(file);
var target = path.join(dst, path.basename(file));
if (stats.isDirectory()) {
if (!fs.existsSync(target)) {
fs.mkdirSync(target, stats.mode);
}
results.push(exports.copyDir(file, target, predicate));
} else {
results.push(exports.copy(file, target));
}
});
return promise.all(results).then(function() {
return dst;
});
};
/**
* Tests if a file path exists.
* @param {string} path The path to test.
* @return {!promise.Promise.<boolean>} A promise for whether the file exists.
*/
exports.exists = function(path) {
var result = promise.defer();
fs.exists(path, result.fulfill);
return result.promise;
};
/**
* Deletes a name from the filesystem and possibly the file it refers to. Has
* no effect if the file does not exist.
* @param {string} path The path to remove.
* @return {!promise.Promise} A promise for when the file has been removed.
*/
exports.unlink = function(path) {
return new promise.Promise(function(fulfill, reject) {
fs.exists(path, function(exists) {
if (exists) {
fs.unlink(path, function(err) {
err && reject(err) || fulfill();
});
} else {
fulfill();
}
});
});
};
/**
* @return {!promise.Promise.<string>} A promise for the path to a temporary
* directory.
* @see https://www.npmjs.org/package/tmp
*/
exports.tmpDir = function() {
return promise.checkedNodeCall(tmp.dir);
};
/**
* @param {{postfix: string}=} opt_options Temporary file options.
* @return {!promise.Promise.<string>} A promise for the path to a temporary
* file.
* @see https://www.npmjs.org/package/tmp
*/
exports.tmpFile = function(opt_options) {
// |tmp.file| checks arguments length to detect options rather than doing a
// truthy check, so we must only pass options if there are some to pass.
return opt_options ?
promise.checkedNodeCall(tmp.file, opt_options) :
promise.checkedNodeCall(tmp.file);
};
/**
* Searches the {@code PATH} environment variable for the given file.
* @param {string} file The file to locate on the PATH.
* @param {boolean=} opt_checkCwd Whether to always start with the search with
* the current working directory, regardless of whether it is explicitly
* listed on the PATH.
* @return {?string} Path to the located file, or {@code null} if it could
* not be found.
*/
exports.findInPath = function(file, opt_checkCwd) {
if (opt_checkCwd) {
var tmp = path.join(process.cwd(), file);
if (fs.existsSync(tmp)) {
return tmp;
}
}
var dirs = process.env['PATH'].split(PATH_SEPARATOR);
var found = null;
dirs.forEach(function(dir) {
var tmp = path.join(dir, file);
if (!found && fs.existsSync(tmp)) {
found = tmp;
}
});
return found;
};
| 1 | 11,567 | Don't penalize everyone with 250ms delay b/c some machines have problems. | SeleniumHQ-selenium | java |
@@ -130,7 +130,11 @@ public final class UserUtils {
// Match!
// reparse the config file
log.info("Modification detected, reloading config file " + filename);
- configFileMap.get(filename).parseConfigFile();
+ try {
+ configFileMap.get(filename).parseConfigFile();
+ } catch (Exception e) {
+ log.error("Failed parsing config file after update: " + filename, e);
+ }
break;
}
} | 1 | package azkaban.user;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import com.sun.nio.file.SensitivityWatchEventModifier;
import java.io.File;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardWatchEventKinds;
import java.nio.file.WatchEvent;
import java.nio.file.WatchKey;
import java.nio.file.WatchService;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class UserUtils {
private static final Logger log = LoggerFactory.getLogger(UserUtils.class);
private UserUtils() {
}
/**
* @return - Returns true if the given user is an ADMIN, or if user has the required permission
* for the action requested.
*/
public static boolean hasPermissionforAction(final UserManager userManager, final User user,
final Permission.Type type) {
for (final String roleName : user.getRoles()) {
final Role role = userManager.getRole(roleName);
final Permission perm = role.getPermission();
if (perm.isPermissionSet(Permission.Type.ADMIN) || perm.isPermissionSet(type)) {
return true;
}
}
return false;
}
/**
* Creates a watch thread which listens to specified files' modification and reloads
* configurations
*/
static void setupWatch(final Map<String, ParseConfigFile> configFileMap) throws IOException {
Preconditions.checkNotNull(configFileMap);
Preconditions.checkArgument(configFileMap.size() > 0);
final WatchService watchService;
try {
watchService = FileSystems.getDefault().newWatchService();
} catch (IOException e) {
log.warn(" Failed to create WatchService " + e.getMessage());
throw e;
}
// Map to store WatchKey to Dir mapping
final Map<WatchKey, Path> keys = new HashMap<>();
// A directory to config files multimap
final Multimap<Path, String> dirToFilesMap = HashMultimap.create();
// Iterate over each file.
for (Map.Entry<String, ParseConfigFile> entry : configFileMap.entrySet()) {
String fileName = entry.getKey();
ParseConfigFile parser = entry.getValue();
Preconditions.checkNotNull(fileName);
Preconditions.checkNotNull(parser);
final File file = new File(fileName);
if (!file.exists()) {
log.warn("Failed to setup watch service, user provided file " + fileName + " does not "
+ "exist.");
continue;
}
try {
Path dir = Paths.get(fileName).getParent();
if (!dirToFilesMap.containsKey(dir)) {
// There is not entry for this directory, create a watchkey
WatchKey watchKey = dir.register(watchService,
new WatchEvent.Kind[]{StandardWatchEventKinds.ENTRY_MODIFY},
SensitivityWatchEventModifier.HIGH);
keys.put(watchKey, dir);
}
// Add the config file to dir map
dirToFilesMap.put(dir, fileName);
} catch (IOException e) {
// Ignore the IOException
log.warn("IOException while setting up watch on conf " + fileName + ". "
+ e.getMessage());
}
}
// Return if WatchService is not initialized
if (keys.size() == 0) {
log.warn("Watchservice was not setup for any config file(s).");
try {
watchService.close();
} catch (IOException e) {
log.warn("IOException while closing watchService. " + e.getMessage());
}
return;
}
Runnable runnable = () -> {
// Watchservice is established, now listen for the events till eternity!
for (;; ) {
WatchKey watchKey;
try {
watchKey = watchService.take();
} catch (InterruptedException ie) {
log.warn(ie.getMessage());
Thread.currentThread().interrupt();
return;
}
// Get the directory for which watch service event triggered.
Path dir = keys.get(watchKey);
for (WatchEvent<?> event : watchKey.pollEvents()) {
// Make sure the modification happened to user config file
@SuppressWarnings("unchecked")
final Path name = ((WatchEvent<Path>) event).context();
final String filename = dir.resolve(name).toString();
// Lookup the file in dirToFilesMap
if (dirToFilesMap.containsEntry(dir, filename)) {
// Match!
// reparse the config file
log.info("Modification detected, reloading config file " + filename);
configFileMap.get(filename).parseConfigFile();
break;
}
}
watchKey.reset();
}
};
final Thread thread = new Thread(runnable);
log.info("Starting configuration watching thread.");
thread.start();
}
}
| 1 | 18,037 | This try-catch should be here so that watcher thread doesn't just exit in case of an exception. This change alone would probably be enough to fix the error as well, assuming that there's another `ENTRY_MODIFY` event when the file write is finalized. But of course not a perfect fix because it doesn't protect against possibly reading a non-empty but only partially written file? | azkaban-azkaban | java |
@@ -417,6 +417,9 @@ func (s *Server) sendInternalMsg(sub, rply string, si *ServerInfo, msg interface
// Locked version of checking if events system running. Also checks server.
func (s *Server) eventsRunning() bool {
+ if s == nil {
+ return false
+ }
s.mu.Lock()
er := s.running && s.eventsEnabled()
s.mu.Unlock() | 1 | // Copyright 2018-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nats-server/v2/server/pse"
)
const (
accLookupReqTokens = 6
accLookupReqSubj = "$SYS.REQ.ACCOUNT.%s.CLAIMS.LOOKUP"
accPackReqSubj = "$SYS.REQ.CLAIMS.PACK"
accListReqSubj = "$SYS.REQ.CLAIMS.LIST"
accClaimsReqSubj = "$SYS.REQ.CLAIMS.UPDATE"
accDeleteReqSubj = "$SYS.REQ.CLAIMS.DELETE"
connectEventSubj = "$SYS.ACCOUNT.%s.CONNECT"
disconnectEventSubj = "$SYS.ACCOUNT.%s.DISCONNECT"
accReqSubj = "$SYS.REQ.ACCOUNT.%s.%s"
// kept for backward compatibility when using http resolver
// this overlaps with the names for events but you'd have to have the operator private key in order to succeed.
accUpdateEventSubjOld = "$SYS.ACCOUNT.%s.CLAIMS.UPDATE"
accUpdateEventSubjNew = "$SYS.REQ.ACCOUNT.%s.CLAIMS.UPDATE"
connsRespSubj = "$SYS._INBOX_.%s"
accConnsEventSubjNew = "$SYS.ACCOUNT.%s.SERVER.CONNS"
accConnsEventSubjOld = "$SYS.SERVER.ACCOUNT.%s.CONNS" // kept for backward compatibility
shutdownEventSubj = "$SYS.SERVER.%s.SHUTDOWN"
authErrorEventSubj = "$SYS.SERVER.%s.CLIENT.AUTH.ERR"
serverStatsSubj = "$SYS.SERVER.%s.STATSZ"
serverDirectReqSubj = "$SYS.REQ.SERVER.%s.%s"
serverPingReqSubj = "$SYS.REQ.SERVER.PING.%s"
serverStatsPingReqSubj = "$SYS.REQ.SERVER.PING" // use $SYS.REQ.SERVER.PING.STATSZ instead
leafNodeConnectEventSubj = "$SYS.ACCOUNT.%s.LEAFNODE.CONNECT"
remoteLatencyEventSubj = "$SYS.LATENCY.M2.%s"
inboxRespSubj = "$SYS._INBOX.%s.%s"
// FIXME(dlc) - Should account scope, even with wc for now, but later on
// we can then shard as needed.
accNumSubsReqSubj = "$SYS.REQ.ACCOUNT.NSUBS"
// These are for exported debug services. These are local to this server only.
accSubsSubj = "$SYS.DEBUG.SUBSCRIBERS"
shutdownEventTokens = 4
serverSubjectIndex = 2
accUpdateTokensNew = 6
accUpdateTokensOld = 5
accUpdateAccIdxOld = 2
accReqTokens = 5
accReqAccIndex = 3
)
// FIXME(dlc) - make configurable.
var eventsHBInterval = 30 * time.Second
// Used to send and receive messages from inside the server.
type internal struct {
account *Account
client *client
seq uint64
sid int
servers map[string]*serverUpdate
sweeper *time.Timer
stmr *time.Timer
replies map[string]msgHandler
sendq chan *pubMsg
resetCh chan struct{}
wg sync.WaitGroup
orphMax time.Duration
chkOrph time.Duration
statsz time.Duration
shash string
inboxPre string
}
// ServerStatsMsg is sent periodically with stats updates.
type ServerStatsMsg struct {
Server ServerInfo `json:"server"`
Stats ServerStats `json:"statsz"`
}
// ConnectEventMsg is sent when a new connection is made that is part of an account.
type ConnectEventMsg struct {
TypedEvent
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
}
// ConnectEventMsgType is the schema type for ConnectEventMsg
const ConnectEventMsgType = "io.nats.server.advisory.v1.client_connect"
// DisconnectEventMsg is sent when a new connection previously defined from a
// ConnectEventMsg is closed.
type DisconnectEventMsg struct {
TypedEvent
Server ServerInfo `json:"server"`
Client ClientInfo `json:"client"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Reason string `json:"reason"`
}
// DisconnectEventMsgType is the schema type for DisconnectEventMsg
const DisconnectEventMsgType = "io.nats.server.advisory.v1.client_disconnect"
// AccountNumConns is an event that will be sent from a server that is tracking
// a given account when the number of connections changes. It will also HB
// updates in the absence of any changes.
type AccountNumConns struct {
TypedEvent
Server ServerInfo `json:"server"`
Account string `json:"acc"`
Conns int `json:"conns"`
LeafNodes int `json:"leafnodes"`
TotalConns int `json:"total_conns"`
}
const AccountNumConnsMsgType = "io.nats.server.advisory.v1.account_connections"
// accNumConnsReq is sent when we are starting to track an account for the first
// time. We will request others send info to us about their local state.
type accNumConnsReq struct {
Server ServerInfo `json:"server"`
Account string `json:"acc"`
}
// ServerInfo identifies remote servers.
type ServerInfo struct {
Name string `json:"name"`
Host string `json:"host"`
ID string `json:"id"`
Cluster string `json:"cluster,omitempty"`
Version string `json:"ver"`
Seq uint64 `json:"seq"`
JetStream bool `json:"jetstream"`
Time time.Time `json:"time"`
}
// ClientInfo is detailed information about the client forming a connection.
type ClientInfo struct {
Start *time.Time `json:"start,omitempty"`
Host string `json:"host,omitempty"`
ID uint64 `json:"id,omitempty"`
Account string `json:"acc"`
User string `json:"user,omitempty"`
Name string `json:"name,omitempty"`
Lang string `json:"lang,omitempty"`
Version string `json:"ver,omitempty"`
RTT time.Duration `json:"rtt,omitempty"`
Server string `json:"server,omitempty"`
Stop *time.Time `json:"stop,omitempty"`
Jwt string `json:"jwt,omitempty"`
IssuerKey string `json:"issuer_key,omitempty"`
NameTag string `json:"name_tag,omitempty"`
Tags jwt.TagList `json:"tags,omitempty"`
}
// ServerStats hold various statistics that we will periodically send out.
type ServerStats struct {
Start time.Time `json:"start"`
Mem int64 `json:"mem"`
Cores int `json:"cores"`
CPU float64 `json:"cpu"`
Connections int `json:"connections"`
TotalConnections uint64 `json:"total_connections"`
ActiveAccounts int `json:"active_accounts"`
NumSubs uint32 `json:"subscriptions"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
SlowConsumers int64 `json:"slow_consumers"`
Routes []*RouteStat `json:"routes,omitempty"`
Gateways []*GatewayStat `json:"gateways,omitempty"`
}
// RouteStat holds route statistics.
type RouteStat struct {
ID uint64 `json:"rid"`
Name string `json:"name,omitempty"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
Pending int `json:"pending"`
}
// GatewayStat holds gateway statistics.
type GatewayStat struct {
ID uint64 `json:"gwid"`
Name string `json:"name"`
Sent DataStats `json:"sent"`
Received DataStats `json:"received"`
NumInbound int `json:"inbound_connections"`
}
// DataStats reports how may msg and bytes. Applicable for both sent and received.
type DataStats struct {
Msgs int64 `json:"msgs"`
Bytes int64 `json:"bytes"`
}
// Used for internally queueing up messages that the server wants to send.
type pubMsg struct {
c *client
sub string
rply string
si *ServerInfo
msg interface{}
last bool
}
// Used to track server updates.
type serverUpdate struct {
seq uint64
ltime time.Time
}
// TypedEvent is a event or advisory sent by the server that has nats type hints
// typically used for events that might be consumed by 3rd party event systems
type TypedEvent struct {
Type string `json:"type"`
ID string `json:"id"`
Time time.Time `json:"timestamp"`
}
// internalSendLoop will be responsible for serializing all messages that
// a server wants to send.
func (s *Server) internalSendLoop(wg *sync.WaitGroup) {
defer wg.Done()
RESET:
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
sysc := s.sys.client
resetCh := s.sys.resetCh
sendq := s.sys.sendq
id := s.info.ID
host := s.info.Host
servername := s.info.Name
seqp := &s.sys.seq
js := s.js != nil
cluster := s.info.Cluster
if s.gateway.enabled {
cluster = s.getGatewayName()
}
s.mu.Unlock()
// Warn when internal send queue is backed up past 75%
warnThresh := 3 * internalSendQLen / 4
warnFreq := time.Second
last := time.Now().Add(-warnFreq)
for s.eventsRunning() {
// Setup information for next message
if len(sendq) > warnThresh && time.Since(last) >= warnFreq {
s.Warnf("Internal system send queue > 75%%")
last = time.Now()
}
select {
case pm := <-sendq:
if pm.si != nil {
pm.si.Name = servername
pm.si.Host = host
pm.si.Cluster = cluster
pm.si.ID = id
pm.si.Seq = atomic.AddUint64(seqp, 1)
pm.si.Version = VERSION
pm.si.Time = time.Now()
pm.si.JetStream = js
}
var b []byte
if pm.msg != nil {
switch v := pm.msg.(type) {
case string:
b = []byte(v)
case []byte:
b = v
default:
b, _ = json.MarshalIndent(pm.msg, _EMPTY_, " ")
}
}
// Setup our client. If the user wants to use a non-system account use our internal
// account scoped here so that we are not changing out accounts for the system client.
var c *client
if pm.c != nil {
c = pm.c
} else {
c = sysc
}
// Grab client lock.
c.mu.Lock()
// Prep internal structures needed to send message.
c.pa.subject = []byte(pm.sub)
c.pa.size = len(b)
c.pa.szb = []byte(strconv.FormatInt(int64(len(b)), 10))
c.pa.reply = []byte(pm.rply)
trace := c.trace
c.mu.Unlock()
// Add in NL
b = append(b, _CRLF_...)
if trace {
c.traceInOp(fmt.Sprintf("PUB %s %s %d", c.pa.subject, c.pa.reply, c.pa.size), nil)
c.traceMsg(b)
}
// Process like a normal inbound msg.
c.processInboundClientMsg(b)
// See if we are doing graceful shutdown.
if !pm.last {
c.flushClients(0) // Never spend time in place.
} else {
// For the Shutdown event, we need to send in place otherwise
// there is a chance that the process will exit before the
// writeLoop has a chance to send it.
c.flushClients(time.Second)
return
}
case <-resetCh:
goto RESET
case <-s.quitCh:
return
}
}
}
// Will send a shutdown message.
func (s *Server) sendShutdownEvent() {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return
}
subj := fmt.Sprintf(shutdownEventSubj, s.info.ID)
sendq := s.sys.sendq
// Stop any more messages from queueing up.
s.sys.sendq = nil
// Unhook all msgHandlers. Normal client cleanup will deal with subs, etc.
s.sys.replies = nil
s.mu.Unlock()
// Send to the internal queue and mark as last.
sendq <- &pubMsg{nil, subj, _EMPTY_, nil, nil, true}
}
// Used to send an internal message to an arbitrary account.
func (s *Server) sendInternalAccountMsg(a *Account, subject string, msg interface{}) error {
s.mu.Lock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.Unlock()
return ErrNoSysAccount
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
c := s.sys.client
s.mu.Unlock()
// Replace our client with the account's internal client.
if a != nil {
c = a.internalClient()
}
sendq <- &pubMsg{c, subject, _EMPTY_, nil, msg, false}
return nil
}
// This will queue up a message to be sent.
// Lock should not be held.
func (s *Server) sendInternalMsgLocked(sub, rply string, si *ServerInfo, msg interface{}) {
s.mu.Lock()
s.sendInternalMsg(sub, rply, si, msg)
s.mu.Unlock()
}
// This will queue up a message to be sent.
// Assumes lock is held on entry.
func (s *Server) sendInternalMsg(sub, rply string, si *ServerInfo, msg interface{}) {
if s.sys == nil || s.sys.sendq == nil {
return
}
sendq := s.sys.sendq
// Don't hold lock while placing on the channel.
s.mu.Unlock()
sendq <- &pubMsg{nil, sub, rply, si, msg, false}
s.mu.Lock()
}
// Locked version of checking if events system running. Also checks server.
func (s *Server) eventsRunning() bool {
s.mu.Lock()
er := s.running && s.eventsEnabled()
s.mu.Unlock()
return er
}
// EventsEnabled will report if the server has internal events enabled via
// a defined system account.
func (s *Server) EventsEnabled() bool {
s.mu.Lock()
ee := s.eventsEnabled()
s.mu.Unlock()
return ee
}
// eventsEnabled will report if events are enabled.
// Lock should be held.
func (s *Server) eventsEnabled() bool {
return s.sys != nil && s.sys.client != nil && s.sys.account != nil
}
// TrackedRemoteServers returns how many remote servers we are tracking
// from a system events perspective.
func (s *Server) TrackedRemoteServers() int {
s.mu.Lock()
if !s.running || !s.eventsEnabled() {
return -1
}
ns := len(s.sys.servers)
s.mu.Unlock()
return ns
}
// Check for orphan servers who may have gone away without notification.
// This should be wrapChk() to setup common locking.
func (s *Server) checkRemoteServers() {
now := time.Now()
for sid, su := range s.sys.servers {
if now.Sub(su.ltime) > s.sys.orphMax {
s.Debugf("Detected orphan remote server: %q", sid)
// Simulate it going away.
s.processRemoteServerShutdown(sid)
delete(s.sys.servers, sid)
}
}
if s.sys.sweeper != nil {
s.sys.sweeper.Reset(s.sys.chkOrph)
}
}
// Grab RSS and PCPU
func updateServerUsage(v *ServerStats) {
var rss, vss int64
var pcpu float64
pse.ProcUsage(&pcpu, &rss, &vss)
v.Mem = rss
v.CPU = pcpu
v.Cores = numCores
}
// Generate a route stat for our statz update.
func routeStat(r *client) *RouteStat {
if r == nil {
return nil
}
r.mu.Lock()
rs := &RouteStat{
ID: r.cid,
Sent: DataStats{
Msgs: atomic.LoadInt64(&r.outMsgs),
Bytes: atomic.LoadInt64(&r.outBytes),
},
Received: DataStats{
Msgs: atomic.LoadInt64(&r.inMsgs),
Bytes: atomic.LoadInt64(&r.inBytes),
},
Pending: int(r.out.pb),
}
if r.route != nil {
rs.Name = r.route.remoteName
}
r.mu.Unlock()
return rs
}
// Actual send method for statz updates.
// Lock should be held.
func (s *Server) sendStatsz(subj string) {
m := ServerStatsMsg{}
updateServerUsage(&m.Stats)
m.Stats.Start = s.start
m.Stats.Connections = len(s.clients)
m.Stats.TotalConnections = s.totalClients
m.Stats.ActiveAccounts = int(atomic.LoadInt32(&s.activeAccounts))
m.Stats.Received.Msgs = atomic.LoadInt64(&s.inMsgs)
m.Stats.Received.Bytes = atomic.LoadInt64(&s.inBytes)
m.Stats.Sent.Msgs = atomic.LoadInt64(&s.outMsgs)
m.Stats.Sent.Bytes = atomic.LoadInt64(&s.outBytes)
m.Stats.SlowConsumers = atomic.LoadInt64(&s.slowConsumers)
m.Stats.NumSubs = s.numSubscriptions()
for _, r := range s.routes {
m.Stats.Routes = append(m.Stats.Routes, routeStat(r))
}
if s.gateway.enabled {
gw := s.gateway
gw.RLock()
for name, c := range gw.out {
gs := &GatewayStat{Name: name}
c.mu.Lock()
gs.ID = c.cid
gs.Sent = DataStats{
Msgs: atomic.LoadInt64(&c.outMsgs),
Bytes: atomic.LoadInt64(&c.outBytes),
}
c.mu.Unlock()
// Gather matching inbound connections
gs.Received = DataStats{}
for _, c := range gw.in {
c.mu.Lock()
if c.gw.name == name {
gs.Received.Msgs += atomic.LoadInt64(&c.inMsgs)
gs.Received.Bytes += atomic.LoadInt64(&c.inBytes)
gs.NumInbound++
}
c.mu.Unlock()
}
m.Stats.Gateways = append(m.Stats.Gateways, gs)
}
gw.RUnlock()
}
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
}
// Send out our statz update.
// This should be wrapChk() to setup common locking.
func (s *Server) heartbeatStatsz() {
if s.sys.stmr != nil {
s.sys.stmr.Reset(s.sys.statsz)
}
s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID))
}
// This should be wrapChk() to setup common locking.
func (s *Server) startStatszTimer() {
s.sys.stmr = time.AfterFunc(s.sys.statsz, s.wrapChk(s.heartbeatStatsz))
}
// Start a ticker that will fire periodically and check for orphaned servers.
// This should be wrapChk() to setup common locking.
func (s *Server) startRemoteServerSweepTimer() {
s.sys.sweeper = time.AfterFunc(s.sys.chkOrph, s.wrapChk(s.checkRemoteServers))
}
// Length of our system hash used for server targeted messages.
const sysHashLen = 8
// Computes a hash of 8 characters for the name.
func getHash(name string) []byte {
return getHashSize(name, sysHashLen)
}
// Returns the node name for this server which is a hash of the server name.
func (s *Server) Node() string {
s.mu.Lock()
defer s.mu.Unlock()
if s.sys != nil {
return s.sys.shash
}
return _EMPTY_
}
// This will setup our system wide tracking subs.
// For now we will setup one wildcard subscription to
// monitor all accounts for changes in number of connections.
// We can make this on a per account tracking basis if needed.
// Tradeoff is subscription and interest graph events vs connect and
// disconnect events, etc.
func (s *Server) initEventTracking() {
if !s.EventsEnabled() {
return
}
// Create a system hash which we use for other servers to target us specifically.
s.sys.shash = string(getHash(s.info.Name))
// This will be for all inbox responses.
subject := fmt.Sprintf(inboxRespSubj, s.sys.shash, "*")
if _, err := s.sysSubscribe(subject, s.inboxReply); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
s.sys.inboxPre = subject
// This is for remote updates for connection accounting.
subject = fmt.Sprintf(accConnsEventSubjOld, "*")
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking for %s: %v", subject, err)
}
// This will be for responses for account info that we send out.
subject = fmt.Sprintf(connsRespSubj, s.info.ID)
if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for broad requests to respond with number of subscriptions for a given subject.
if _, err := s.sysSubscribe(accNumSubsReqSubj, s.nsubsRequest); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for all server shutdowns.
subject = fmt.Sprintf(shutdownEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.remoteServerShutdown); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// Listen for account claims updates.
subscribeToUpdate := true
if s.accResolver != nil {
subscribeToUpdate = !s.accResolver.IsTrackingUpdate()
}
if subscribeToUpdate {
for _, sub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} {
if _, err := s.sysSubscribe(fmt.Sprintf(sub, "*"), s.accountClaimUpdate); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
}
// Listen for ping messages that will be sent to all servers for statsz.
// This subscription is kept for backwards compatibility. Got replaced by ...PING.STATZ from below
if _, err := s.sysSubscribe(serverStatsPingReqSubj, s.statszReq); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
monSrvc := map[string]msgHandler{
"STATSZ": s.statszReq,
"VARZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &VarzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Varz(&optz.VarzOptions) })
},
"SUBSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &SubszEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Subsz(&optz.SubszOptions) })
},
"CONNZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &ConnzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Connz(&optz.ConnzOptions) })
},
"ROUTEZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &RoutezEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Routez(&optz.RoutezOptions) })
},
"GATEWAYZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &GatewayzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Gatewayz(&optz.GatewayzOptions) })
},
"LEAFZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &LeafzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Leafz(&optz.LeafzOptions) })
},
"ACCOUNTZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &AccountzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Accountz(&optz.AccountzOptions) })
},
}
for name, req := range monSrvc {
subject = fmt.Sprintf(serverDirectReqSubj, s.info.ID, name)
if _, err := s.sysSubscribe(subject, req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
subject = fmt.Sprintf(serverPingReqSubj, name)
if _, err := s.sysSubscribe(subject, req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
extractAccount := func(subject string) (string, error) {
if tk := strings.Split(subject, tsep); len(tk) != accReqTokens {
return "", fmt.Errorf("subject %q is malformed", subject)
} else {
return tk[accReqAccIndex], nil
}
}
monAccSrvc := map[string]msgHandler{
"SUBSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &SubszEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.SubszOptions.Subscriptions = true
optz.SubszOptions.Account = acc
return s.Subsz(&optz.SubszOptions)
}
})
},
"CONNZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &ConnzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.ConnzOptions.Account = acc
return s.Connz(&optz.ConnzOptions)
}
})
},
"LEAFZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &LeafzEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
optz.LeafzOptions.Account = acc
return s.Leafz(&optz.LeafzOptions)
}
})
},
"INFO": func(sub *subscription, _ *client, subject, reply string, msg []byte) {
optz := &AccInfoEventOptions{}
s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) {
if acc, err := extractAccount(subject); err != nil {
return nil, err
} else {
return s.accountInfo(acc)
}
})
},
"CONNS": s.connsRequest,
}
for name, req := range monAccSrvc {
if _, err := s.sysSubscribe(fmt.Sprintf(accReqSubj, "*", name), req); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
}
// Listen for updates when leaf nodes connect for a given account. This will
// force any gateway connections to move to `modeInterestOnly`
subject = fmt.Sprintf(leafNodeConnectEventSubj, "*")
if _, err := s.sysSubscribe(subject, s.leafNodeConnected); err != nil {
s.Errorf("Error setting up internal tracking: %v", err)
}
// For tracking remote latency measurements.
subject = fmt.Sprintf(remoteLatencyEventSubj, s.sys.shash)
if _, err := s.sysSubscribe(subject, s.remoteLatencyUpdate); err != nil {
s.Errorf("Error setting up internal latency tracking: %v", err)
}
// This is for simple debugging of number of subscribers that exist in the system.
if _, err := s.sysSubscribeInternal(accSubsSubj, s.debugSubscribers); err != nil {
s.Errorf("Error setting up internal debug service for subscribers: %v", err)
}
}
// add all exports a system account will need
func (s *Server) addSystemAccountExports(sacc *Account) {
if !s.EventsEnabled() {
return
}
if err := sacc.AddServiceExport(accSubsSubj, nil); err != nil {
s.Errorf("Error adding system service export for %q: %v", accSubsSubj, err)
}
}
// accountClaimUpdate will receive claim updates for accounts.
func (s *Server) accountClaimUpdate(sub *subscription, _ *client, subject, resp string, msg []byte) {
if !s.EventsEnabled() {
return
}
pubKey := ""
toks := strings.Split(subject, tsep)
if len(toks) == accUpdateTokensNew {
pubKey = toks[accReqAccIndex]
} else if len(toks) == accUpdateTokensOld {
pubKey = toks[accUpdateAccIdxOld]
} else {
s.Debugf("Received account claims update on bad subject %q", subject)
return
}
if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if claim.Subject != pubKey {
err := errors.New("subject does not match jwt content")
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else if v, ok := s.accounts.Load(pubKey); !ok {
respondToUpdate(s, resp, pubKey, "jwt update skipped", nil)
} else if err := s.updateAccountWithClaimJWT(v.(*Account), string(msg)); err != nil {
respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err)
} else {
respondToUpdate(s, resp, pubKey, "jwt updated", nil)
}
}
// processRemoteServerShutdown will update any affected accounts.
// Will update the remote count for clients.
// Lock assume held.
func (s *Server) processRemoteServerShutdown(sid string) {
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).removeRemoteServer(sid)
return true
})
}
// remoteServerShutdownEvent is called when we get an event from another server shutting down.
func (s *Server) remoteServerShutdown(sub *subscription, _ *client, subject, reply string, msg []byte) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() {
return
}
toks := strings.Split(subject, tsep)
if len(toks) < shutdownEventTokens {
s.Debugf("Received remote server shutdown on bad subject %q", subject)
return
}
sid := toks[serverSubjectIndex]
su := s.sys.servers[sid]
if su != nil {
s.processRemoteServerShutdown(sid)
}
}
// updateRemoteServer is called when we have an update from a remote server.
// This allows us to track remote servers, respond to shutdown messages properly,
// make sure that messages are ordered, and allow us to prune dead servers.
// Lock should be held upon entry.
func (s *Server) updateRemoteServer(ms *ServerInfo) {
su := s.sys.servers[ms.ID]
if su == nil {
s.sys.servers[ms.ID] = &serverUpdate{ms.Seq, time.Now()}
s.processNewServer(ms)
} else {
// Should always be going up.
if ms.Seq <= su.seq {
s.Errorf("Received out of order remote server update from: %q", ms.ID)
return
}
su.seq = ms.Seq
su.ltime = time.Now()
}
}
// processNewServer will hold any logic we want to use when we discover a new server.
// Lock should be held upon entry.
func (s *Server) processNewServer(ms *ServerInfo) {
// Right now we only check if we have leafnode servers and if so send another
// connect update to make sure they switch this account to interest only mode.
s.ensureGWsInterestOnlyForLeafNodes()
// Add to our nodeToName
s.nodeToName[string(getHash(ms.Name))] = ms.Name
}
// If GW is enabled on this server and there are any leaf node connections,
// this function will send a LeafNode connect system event to the super cluster
// to ensure that the GWs are in interest-only mode for this account.
// Lock should be held upon entry.
// TODO(dlc) - this will cause this account to be loaded on all servers. Need a better
// way with GW2.
func (s *Server) ensureGWsInterestOnlyForLeafNodes() {
if !s.gateway.enabled || len(s.leafs) == 0 {
return
}
sent := make(map[*Account]bool, len(s.leafs))
for _, c := range s.leafs {
if !sent[c.acc] {
s.sendLeafNodeConnectMsg(c.acc.Name)
sent[c.acc] = true
}
}
}
// shutdownEventing will clean up all eventing state.
func (s *Server) shutdownEventing() {
if !s.eventsRunning() {
return
}
s.mu.Lock()
clearTimer(&s.sys.sweeper)
clearTimer(&s.sys.stmr)
s.mu.Unlock()
// We will queue up a shutdown event and wait for the
// internal send loop to exit.
s.sendShutdownEvent()
s.sys.wg.Wait()
close(s.sys.resetCh)
s.mu.Lock()
defer s.mu.Unlock()
// Whip through all accounts.
s.accounts.Range(func(k, v interface{}) bool {
v.(*Account).clearEventing()
return true
})
// Turn everything off here.
s.sys = nil
}
// Request for our local connection count.
func (s *Server) connsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
tk := strings.Split(subject, tsep)
if len(tk) != accReqTokens {
s.sys.client.Errorf("Bad subject account connections request message")
return
}
a := tk[accReqAccIndex]
m := accNumConnsReq{Account: a}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
if m.Account != a {
s.sys.client.Errorf("Error unmarshalled account does not match subject")
return
}
// Here we really only want to lookup the account if its local. We do not want to fetch this
// account if we have no interest in it.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
if acc == nil {
return
}
// We know this is a local connection.
if nlc := acc.NumLocalConnections(); nlc > 0 {
s.mu.Lock()
s.sendAccConnsUpdate(acc, reply)
s.mu.Unlock()
}
}
// leafNodeConnected is an event we will receive when a leaf node for a given account connects.
func (s *Server) leafNodeConnected(sub *subscription, _ *client, subject, reply string, msg []byte) {
m := accNumConnsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err)
return
}
s.mu.Lock()
na := m.Account == "" || !s.eventsEnabled() || !s.gateway.enabled
s.mu.Unlock()
if na {
return
}
if acc, _ := s.lookupAccount(m.Account); acc != nil {
s.switchAccountToInterestMode(acc.Name)
}
}
// Common filter options for system requests STATSZ VARZ SUBSZ CONNZ ROUTEZ GATEWAYZ LEAFZ
type EventFilterOptions struct {
Name string `json:"server_name,omitempty"` // filter by server name
Cluster string `json:"cluster,omitempty"` // filter by cluster name
Host string `json:"host,omitempty"` // filter by host name
Tags []string `json:"tags,omitempty"` // filter by tags (must match all tags)
}
// StatszEventOptions are options passed to Statsz
type StatszEventOptions struct {
// No actual options yet
EventFilterOptions
}
// Options for account Info
type AccInfoEventOptions struct {
// No actual options yet
EventFilterOptions
}
// In the context of system events, ConnzEventOptions are options passed to Connz
type ConnzEventOptions struct {
ConnzOptions
EventFilterOptions
}
// In the context of system events, RoutezEventOptions are options passed to Routez
type RoutezEventOptions struct {
RoutezOptions
EventFilterOptions
}
// In the context of system events, SubzEventOptions are options passed to Subz
type SubszEventOptions struct {
SubszOptions
EventFilterOptions
}
// In the context of system events, VarzEventOptions are options passed to Varz
type VarzEventOptions struct {
VarzOptions
EventFilterOptions
}
// In the context of system events, GatewayzEventOptions are options passed to Gatewayz
type GatewayzEventOptions struct {
GatewayzOptions
EventFilterOptions
}
// In the context of system events, LeafzEventOptions are options passed to Leafz
type LeafzEventOptions struct {
LeafzOptions
EventFilterOptions
}
// In the context of system events, AccountzEventOptions are options passed to Accountz
type AccountzEventOptions struct {
AccountzOptions
EventFilterOptions
}
// returns true if the request does NOT apply to this server and can be ignored.
// DO NOT hold the server lock when
func (s *Server) filterRequest(fOpts *EventFilterOptions) bool {
if fOpts.Name != "" && !strings.Contains(s.info.Name, fOpts.Name) {
return true
}
if fOpts.Host != "" && !strings.Contains(s.info.Host, fOpts.Host) {
return true
}
if fOpts.Cluster != "" {
s.mu.Lock()
cluster := s.info.Cluster
s.mu.Unlock()
if !strings.Contains(cluster, fOpts.Cluster) {
return true
}
}
if len(fOpts.Tags) > 0 {
opts := s.getOpts()
for _, t := range fOpts.Tags {
if !opts.Tags.Contains(t) {
return true
}
}
}
return false
}
// statszReq is a request for us to respond with current statsz.
func (s *Server) statszReq(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.EventsEnabled() || reply == _EMPTY_ {
return
}
opts := StatszEventOptions{}
if len(msg) != 0 {
if err := json.Unmarshal(msg, &opts); err != nil {
server := &ServerInfo{}
response := map[string]interface{}{"server": server}
response["error"] = map[string]interface{}{
"code": http.StatusBadRequest,
"description": err.Error(),
}
s.sendInternalMsgLocked(reply, _EMPTY_, server, response)
return
} else if ignore := s.filterRequest(&opts.EventFilterOptions); ignore {
return
}
}
s.mu.Lock()
s.sendStatsz(reply)
s.mu.Unlock()
}
func (s *Server) zReq(reply string, msg []byte, fOpts *EventFilterOptions, optz interface{}, respf func() (interface{}, error)) {
if !s.EventsEnabled() || reply == _EMPTY_ {
return
}
server := &ServerInfo{}
response := map[string]interface{}{"server": server}
var err error
status := 0
if len(msg) != 0 {
if err = json.Unmarshal(msg, optz); err != nil {
status = http.StatusBadRequest // status is only included on error, so record how far execution got
} else if s.filterRequest(fOpts) {
return
}
}
if err == nil {
response["data"], err = respf()
status = http.StatusInternalServerError
}
if err != nil {
response["error"] = map[string]interface{}{
"code": status,
"description": err.Error(),
}
}
s.sendInternalMsgLocked(reply, _EMPTY_, server, response)
}
// remoteConnsUpdate gets called when we receive a remote update from another server.
func (s *Server) remoteConnsUpdate(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := AccountNumConns{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account connection event message: %v", err)
return
}
// See if we have the account registered, if not drop it.
// Make sure this does not force us to load this account here.
var acc *Account
if v, ok := s.accounts.Load(m.Account); ok {
acc = v.(*Account)
}
// Silently ignore these if we do not have local interest in the account.
if acc == nil {
return
}
s.mu.Lock()
// check again here if we have been shutdown.
if !s.running || !s.eventsEnabled() {
s.mu.Unlock()
return
}
// Double check that this is not us, should never happen, so error if it does.
if m.Server.ID == s.info.ID {
s.sys.client.Errorf("Processing our own account connection event message: ignored")
s.mu.Unlock()
return
}
// If we are here we have interest in tracking this account. Update our accounting.
clients := acc.updateRemoteServer(&m)
s.updateRemoteServer(&m.Server)
s.mu.Unlock()
// Need to close clients outside of server lock
for _, c := range clients {
c.maxAccountConnExceeded()
}
}
// Setup tracking for this account. This allows us to track global account activity.
// Lock should be held on entry.
func (s *Server) enableAccountTracking(a *Account) {
if a == nil || !s.eventsEnabled() {
return
}
// TODO(ik): Generate payload although message may not be sent.
// May need to ensure we do so only if there is a known interest.
// This can get complicated with gateways.
subj := fmt.Sprintf(accReqSubj, a.Name, "CONNS")
reply := fmt.Sprintf(connsRespSubj, s.info.ID)
m := accNumConnsReq{Account: a.Name}
s.sendInternalMsg(subj, reply, &m.Server, &m)
}
// Event on leaf node connect.
// Lock should NOT be held on entry.
func (s *Server) sendLeafNodeConnect(a *Account) {
s.mu.Lock()
// If we are not in operator mode, or do not have any gateways defined, this should also be a no-op.
if a == nil || !s.eventsEnabled() || !s.gateway.enabled {
s.mu.Unlock()
return
}
s.sendLeafNodeConnectMsg(a.Name)
s.mu.Unlock()
s.switchAccountToInterestMode(a.Name)
}
// Send the leafnode connect message.
// Lock should be held.
func (s *Server) sendLeafNodeConnectMsg(accName string) {
subj := fmt.Sprintf(leafNodeConnectEventSubj, accName)
m := accNumConnsReq{Account: accName}
s.sendInternalMsg(subj, "", &m.Server, &m)
}
// sendAccConnsUpdate is called to send out our information on the
// account's local connections.
// Lock should be held on entry.
func (s *Server) sendAccConnsUpdate(a *Account, subj ...string) {
if !s.eventsEnabled() || a == nil {
return
}
sendQ := s.sys.sendq
if sendQ == nil {
return
}
// Build event with account name and number of local clients and leafnodes.
eid := s.nextEventID()
a.mu.Lock()
s.mu.Unlock()
localConns := a.numLocalConnections()
m := &AccountNumConns{
TypedEvent: TypedEvent{
Type: AccountNumConnsMsgType,
ID: eid,
Time: time.Now().UTC(),
},
Account: a.Name,
Conns: localConns,
LeafNodes: a.numLocalLeafNodes(),
TotalConns: localConns + a.numLocalLeafNodes(),
}
// Set timer to fire again unless we are at zero.
if localConns == 0 {
clearTimer(&a.ctmr)
} else {
// Check to see if we have an HB running and update.
if a.ctmr == nil {
a.ctmr = time.AfterFunc(eventsHBInterval, func() { s.accConnsUpdate(a) })
} else {
a.ctmr.Reset(eventsHBInterval)
}
}
for _, sub := range subj {
msg := &pubMsg{nil, sub, _EMPTY_, &m.Server, &m, false}
select {
case sendQ <- msg:
default:
a.mu.Unlock()
sendQ <- msg
a.mu.Lock()
}
}
a.mu.Unlock()
s.mu.Lock()
}
// accConnsUpdate is called whenever there is a change to the account's
// number of active connections, or during a heartbeat.
func (s *Server) accConnsUpdate(a *Account) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.eventsEnabled() || a == nil {
return
}
s.sendAccConnsUpdate(a, fmt.Sprintf(accConnsEventSubjOld, a.Name), fmt.Sprintf(accConnsEventSubjNew, a.Name))
}
// server lock should be held
func (s *Server) nextEventID() string {
return s.eventIds.Next()
}
// accountConnectEvent will send an account client connect event if there is interest.
// This is a billing event.
func (s *Server) accountConnectEvent(c *client) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
gacc := s.gacc
eid := s.nextEventID()
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := ConnectEventMsg{
TypedEvent: TypedEvent{
Type: ConnectEventMsgType,
ID: eid,
Time: time.Now().UTC(),
},
Client: ClientInfo{
Start: &c.start,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
Jwt: c.opts.JWT,
IssuerKey: issuerForClient(c),
Tags: c.tags,
NameTag: c.nameTag,
},
}
c.mu.Unlock()
subj := fmt.Sprintf(connectEventSubj, c.acc.Name)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
// accountDisconnectEvent will send an account client disconnect event if there is interest.
// This is a billing event.
func (s *Server) accountDisconnectEvent(c *client, now time.Time, reason string) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
gacc := s.gacc
eid := s.nextEventID()
s.mu.Unlock()
c.mu.Lock()
// Ignore global account activity
if c.acc == nil || c.acc == gacc {
c.mu.Unlock()
return
}
m := DisconnectEventMsg{
TypedEvent: TypedEvent{
Type: DisconnectEventMsgType,
ID: eid,
Time: now.UTC(),
},
Client: ClientInfo{
Start: &c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
Jwt: c.opts.JWT,
IssuerKey: issuerForClient(c),
Tags: c.tags,
NameTag: c.nameTag,
},
Sent: DataStats{
Msgs: atomic.LoadInt64(&c.inMsgs),
Bytes: atomic.LoadInt64(&c.inBytes),
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: reason,
}
accName := c.acc.Name
c.mu.Unlock()
subj := fmt.Sprintf(disconnectEventSubj, accName)
s.sendInternalMsgLocked(subj, _EMPTY_, &m.Server, &m)
}
func (s *Server) sendAuthErrorEvent(c *client) {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
eid := s.nextEventID()
s.mu.Unlock()
now := time.Now()
c.mu.Lock()
m := DisconnectEventMsg{
TypedEvent: TypedEvent{
Type: DisconnectEventMsgType,
ID: eid,
Time: now.UTC(),
},
Client: ClientInfo{
Start: &c.start,
Stop: &now,
Host: c.host,
ID: c.cid,
Account: accForClient(c),
User: c.getRawAuthUser(),
Name: c.opts.Name,
Lang: c.opts.Lang,
Version: c.opts.Version,
RTT: c.getRTT(),
Jwt: c.opts.JWT,
IssuerKey: issuerForClient(c),
Tags: c.tags,
NameTag: c.nameTag,
},
Sent: DataStats{
Msgs: c.inMsgs,
Bytes: c.inBytes,
},
Received: DataStats{
Msgs: c.outMsgs,
Bytes: c.outBytes,
},
Reason: AuthenticationViolation.String(),
}
c.mu.Unlock()
s.mu.Lock()
subj := fmt.Sprintf(authErrorEventSubj, s.info.ID)
s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m)
s.mu.Unlock()
}
// Internal message callback. If the msg is needed past the callback it is
// required to be copied.
type msgHandler func(sub *subscription, client *client, subject, reply string, msg []byte)
// Create an internal subscription. sysSubscribeQ for queue groups.
func (s *Server) sysSubscribe(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, _EMPTY_, false, nil, cb)
}
// Create an internal subscription with queue
func (s *Server) sysSubscribeQ(subject, queue string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, queue, false, nil, cb)
}
// Create an internal subscription but do not forward interest.
func (s *Server) sysSubscribeInternal(subject string, cb msgHandler) (*subscription, error) {
return s.systemSubscribe(subject, _EMPTY_, true, nil, cb)
}
func (s *Server) systemSubscribe(subject, queue string, internalOnly bool, c *client, cb msgHandler) (*subscription, error) {
if !s.eventsEnabled() {
return nil, ErrNoSysAccount
}
if cb == nil {
return nil, fmt.Errorf("undefined message handler")
}
s.mu.Lock()
if c == nil {
c = s.sys.client
}
trace := c.trace
s.sys.sid++
sid := strconv.Itoa(s.sys.sid)
s.mu.Unlock()
// Now create the subscription
if trace {
c.traceInOp("SUB", []byte(subject+" "+queue+" "+sid))
}
var q []byte
if queue != "" {
q = []byte(queue)
}
// Now create the subscription
return c.processSub([]byte(subject), q, []byte(sid), cb, internalOnly)
}
func (s *Server) sysUnsubscribe(sub *subscription) {
if sub == nil {
return
}
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
acc := s.sys.account
c := s.sys.client
s.mu.Unlock()
c.unsubscribe(acc, sub, true, true)
}
// This will generate the tracking subject for remote latency from the response subject.
func remoteLatencySubjectForResponse(subject []byte) string {
if !isTrackedReply(subject) {
return ""
}
toks := bytes.Split(subject, []byte(tsep))
// FIXME(dlc) - Sprintf may become a performance concern at some point.
return fmt.Sprintf(remoteLatencyEventSubj, toks[len(toks)-2])
}
// remoteLatencyUpdate is used to track remote latency measurements for tracking on exported services.
func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, subject, _ string, msg []byte) {
if !s.eventsRunning() {
return
}
rl := remoteLatency{}
if err := json.Unmarshal(msg, &rl); err != nil {
s.Errorf("Error unmarshalling remote latency measurement: %v", err)
return
}
// Now we need to look up the responseServiceImport associated with this measurement.
acc, err := s.LookupAccount(rl.Account)
if err != nil {
s.Warnf("Could not lookup account %q for latency measurement", rl.Account)
return
}
// Now get the request id / reply. We need to see if we have a GW prefix and if so strip that off.
reply := rl.ReqId
if gwPrefix, old := isGWRoutedSubjectAndIsOldPrefix([]byte(reply)); gwPrefix {
reply = string(getSubjectFromGWRoutedReply([]byte(reply), old))
}
acc.mu.RLock()
si := acc.exports.responses[reply]
if si == nil {
acc.mu.RUnlock()
return
}
m1 := si.m1
m2 := rl.M2
lsub := si.latency.subject
acc.mu.RUnlock()
// So we have not processed the response tracking measurement yet.
if m1 == nil {
si.acc.mu.Lock()
// Double check since could have slipped in.
m1 = si.m1
if m1 == nil {
// Store our value there for them to pick up.
si.m1 = &m2
}
si.acc.mu.Unlock()
if m1 == nil {
return
}
}
// Calculate the correct latencies given M1 and M2.
m1.merge(&m2)
// Clear the requesting client since we send the result here.
acc.mu.Lock()
si.rc = nil
acc.mu.Unlock()
// Make sure we remove the entry here.
acc.removeServiceImport(si.from)
// Send the metrics
s.sendInternalAccountMsg(acc, lsub, m1)
}
// This is used for all inbox replies so that we do not send supercluster wide interest
// updates for every request. Same trick used in modern NATS clients.
func (s *Server) inboxReply(sub *subscription, c *client, subject, reply string, msg []byte) {
s.mu.Lock()
if !s.eventsEnabled() || s.sys.replies == nil {
s.mu.Unlock()
return
}
cb, ok := s.sys.replies[subject]
s.mu.Unlock()
if ok && cb != nil {
cb(sub, c, subject, reply, msg)
}
}
// Copied from go client.
// We could use serviceReply here instead to save some code.
// I prefer these semantics for the moment, when tracing you know what this is.
const (
InboxPrefix = "$SYS._INBOX."
inboxPrefixLen = len(InboxPrefix)
respInboxPrefixLen = inboxPrefixLen + sysHashLen + 1
replySuffixLen = 8 // Gives us 62^8
)
// Creates an internal inbox used for replies that will be processed by the global wc handler.
func (s *Server) newRespInbox() string {
var b [respInboxPrefixLen + replySuffixLen]byte
pres := b[:respInboxPrefixLen]
copy(pres, s.sys.inboxPre)
rn := rand.Int63()
for i, l := respInboxPrefixLen, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
return string(b[:])
}
// accNumSubsReq is sent when we need to gather remote info on subs.
type accNumSubsReq struct {
Account string `json:"acc"`
Subject string `json:"subject"`
Queue []byte `json:"queue,omitempty"`
}
// helper function to total information from results to count subs.
func totalSubs(rr *SublistResult, qg []byte) (nsubs int32) {
if rr == nil {
return
}
checkSub := func(sub *subscription) {
// TODO(dlc) - This could be smarter.
if qg != nil && !bytes.Equal(qg, sub.queue) {
return
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
if qg == nil {
for _, sub := range rr.psubs {
checkSub(sub)
}
}
for _, qsub := range rr.qsubs {
for _, sub := range qsub {
checkSub(sub)
}
}
return
}
// Allows users of large systems to debug active subscribers for a given subject.
// Payload should be the subject of interest.
func (s *Server) debugSubscribers(sub *subscription, c *client, subject, reply string, rmsg []byte) {
// Even though this is an internal only subscription, meaning interest was not forwarded, we could
// get one here from a GW in optimistic mode. Ignore for now.
// FIXME(dlc) - Should we send no interest here back to the GW?
if c.kind != CLIENT {
return
}
_, acc, _, msg, err := s.getRequestInfo(c, rmsg)
if err != nil {
return
}
// We could have a single subject or we could have a subject and a wildcard separated by whitespace.
args := strings.Split(strings.TrimSpace(string(msg)), " ")
if len(args) == 0 {
s.sendInternalAccountMsg(acc, reply, 0)
return
}
tsubj := args[0]
var qgroup []byte
if len(args) > 1 {
qgroup = []byte(args[1])
}
var nsubs int32
if subjectIsLiteral(tsubj) {
// We will look up subscribers locally first then determine if we need to solicit other servers.
rr := acc.sl.Match(tsubj)
nsubs = totalSubs(rr, qgroup)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
acc.sl.All(&subs)
for _, sub := range subs {
if subjectIsSubsetMatch(string(sub.subject), tsubj) {
if qgroup != nil && !bytes.Equal(qgroup, sub.queue) {
continue
}
if sub.client.kind == CLIENT || sub.client.isHubLeafNode() {
nsubs++
}
}
}
}
// We should have an idea of how many responses to expect from remote servers.
var expected = acc.expectedRemoteResponses()
// If we are only local, go ahead and return.
if expected == 0 {
s.sendInternalAccountMsg(nil, reply, nsubs)
return
}
// We need to solicit from others.
// To track status.
responses := int32(0)
done := make(chan (bool))
s.mu.Lock()
// Create direct reply inbox that we multiplex under the WC replies.
replySubj := s.newRespInbox()
// Store our handler.
s.sys.replies[replySubj] = func(sub *subscription, _ *client, subject, _ string, msg []byte) {
if n, err := strconv.Atoi(string(msg)); err == nil {
atomic.AddInt32(&nsubs, int32(n))
}
if atomic.AddInt32(&responses, 1) >= expected {
select {
case done <- true:
default:
}
}
}
// Send the request to the other servers.
request := &accNumSubsReq{
Account: acc.Name,
Subject: tsubj,
Queue: qgroup,
}
s.sendInternalMsg(accNumSubsReqSubj, replySubj, nil, request)
s.mu.Unlock()
// FIXME(dlc) - We should rate limit here instead of blind Go routine.
go func() {
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
// Cleanup the WC entry.
var sendResponse bool
s.mu.Lock()
if s.sys != nil && s.sys.replies != nil {
delete(s.sys.replies, replySubj)
sendResponse = true
}
s.mu.Unlock()
if sendResponse {
// Send the response.
s.sendInternalAccountMsg(nil, reply, atomic.LoadInt32(&nsubs))
}
}()
}
// Request for our local subscription count. This will come from a remote origin server
// that received the initial request.
func (s *Server) nsubsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) {
if !s.eventsRunning() {
return
}
m := accNumSubsReq{}
if err := json.Unmarshal(msg, &m); err != nil {
s.sys.client.Errorf("Error unmarshalling account nsubs request message: %v", err)
return
}
// Grab account.
acc, _ := s.lookupAccount(m.Account)
if acc == nil || acc.numLocalAndLeafConnections() == 0 {
return
}
// We will look up subscribers locally first then determine if we need to solicit other servers.
var nsubs int32
if subjectIsLiteral(m.Subject) {
rr := acc.sl.Match(m.Subject)
nsubs = totalSubs(rr, m.Queue)
} else {
// We have a wildcard, so this is a bit slower path.
var _subs [32]*subscription
subs := _subs[:0]
acc.sl.All(&subs)
for _, sub := range subs {
if (sub.client.kind == CLIENT || sub.client.isHubLeafNode()) && subjectIsSubsetMatch(string(sub.subject), m.Subject) {
if m.Queue != nil && !bytes.Equal(m.Queue, sub.queue) {
continue
}
nsubs++
}
}
}
s.sendInternalMsgLocked(reply, _EMPTY_, nil, nsubs)
}
// Helper to grab account name for a client.
func accForClient(c *client) string {
if c.acc != nil {
return c.acc.Name
}
return "N/A"
}
// Helper to grab issuer for a client.
func issuerForClient(c *client) (issuerKey string) {
if c == nil || c.user == nil {
return
}
issuerKey = c.user.SigningKey
if issuerKey == "" && c.user.Account != nil {
issuerKey = c.user.Account.Name
}
return
}
// Helper to clear timers.
func clearTimer(tp **time.Timer) {
if t := *tp; t != nil {
t.Stop()
*tp = nil
}
}
// Helper function to wrap functions with common test
// to lock server and return if events not enabled.
func (s *Server) wrapChk(f func()) func() {
return func() {
s.mu.Lock()
if !s.eventsEnabled() {
s.mu.Unlock()
return
}
f()
s.mu.Unlock()
}
}
| 1 | 12,377 | Weird that we have to check for `s == nil` here.. I would instead have fixed the call stack to find out when this gets invoked with a nil server. | nats-io-nats-server | go |
@@ -228,7 +228,7 @@ export default Controller.extend({
if (erroredEmails.length > 0) {
invitationsString = erroredEmails.length > 1 ? ' invitations: ' : ' invitation: ';
message = `Failed to send ${erroredEmails.length} ${invitationsString}`;
- message += erroredEmails.join(', ');
+ message += Ember.Handlebars.Utils.escapeExpression(erroredEmails.join(', '));
message += '. Please check your email configuration, see <a href=\'https://docs.ghost.org/docs/mail-config\' target=\'_blank\'>https://docs.ghost.org/v1.0.0/docs/mail-config</a> for instructions';
message = htmlSafe(message); | 1 | /* eslint-disable ghost/ember/alias-model-in-controller */
import Controller, {inject as controller} from '@ember/controller';
import DS from 'ember-data';
import RSVP from 'rsvp';
import validator from 'npm:validator';
import {alias} from '@ember/object/computed';
import {computed} from '@ember/object';
import {A as emberA} from '@ember/array';
import {htmlSafe} from '@ember/string';
import {isInvalidError} from 'ember-ajax/errors';
import {run} from '@ember/runloop';
import {inject as service} from '@ember/service';
import {task, timeout} from 'ember-concurrency';
const {Errors} = DS;
export default Controller.extend({
two: controller('setup/two'),
notifications: service(),
users: '',
errors: Errors.create(),
hasValidated: emberA(),
ownerEmail: alias('two.email'),
usersArray: computed('users', function () {
let errors = this.get('errors');
let users = this.get('users').split('\n').filter(function (email) {
return email.trim().length > 0;
});
// remove "no users to invite" error if we have users
if (users.uniq().length > 0 && errors.get('users.length') === 1) {
if (errors.get('users.firstObject').message.match(/no users/i)) {
errors.remove('users');
}
}
return users.uniq();
}),
validUsersArray: computed('usersArray', 'ownerEmail', function () {
let ownerEmail = this.get('ownerEmail');
return this.get('usersArray').filter(function (user) {
return validator.isEmail(user || '') && user !== ownerEmail;
});
}),
invalidUsersArray: computed('usersArray', 'ownerEmail', function () {
let ownerEmail = this.get('ownerEmail');
return this.get('usersArray').reject(user => validator.isEmail(user || '') || user === ownerEmail);
}),
validationResult: computed('invalidUsersArray', function () {
let errors = [];
this.get('invalidUsersArray').forEach((user) => {
errors.push({
user,
error: 'email'
});
});
if (errors.length === 0) {
// ensure we aren't highlighting fields when everything is fine
this.get('errors').clear();
return true;
} else {
return errors;
}
}),
buttonText: computed('errors.users', 'validUsersArray', 'invalidUsersArray', function () {
let usersError = this.get('errors.users.firstObject.message');
let validNum = this.get('validUsersArray').length;
let invalidNum = this.get('invalidUsersArray').length;
let userCount;
if (usersError && usersError.match(/no users/i)) {
return usersError;
}
if (invalidNum > 0) {
userCount = invalidNum === 1 ? 'email address' : 'email addresses';
return `${invalidNum} invalid ${userCount}`;
}
if (validNum > 0) {
userCount = validNum === 1 ? 'user' : 'users';
userCount = `${validNum} ${userCount}`;
} else {
userCount = 'some users';
}
return `Invite ${userCount}`;
}),
buttonClass: computed('validationResult', 'usersArray.length', function () {
if (this.get('validationResult') === true && this.get('usersArray.length') > 0) {
return 'gh-btn-green';
} else {
return 'gh-btn-minor';
}
}),
authorRole: computed(function () {
return this.store.findAll('role', {reload: true}).then(roles => roles.findBy('name', 'Author'));
}),
actions: {
validate() {
this.validate();
},
invite() {
this.get('invite').perform();
},
skipInvite() {
this.send('loadServerNotifications');
this.transitionToRoute('posts.index');
}
},
validate() {
let errors = this.get('errors');
let validationResult = this.get('validationResult');
let property = 'users';
errors.clear();
// If property isn't in the `hasValidated` array, add it to mark that this field can show a validation result
this.get('hasValidated').addObject(property);
if (validationResult === true) {
return true;
}
validationResult.forEach((error) => {
// Only one error type here so far, but one day the errors might be more detailed
switch (error.error) {
case 'email':
errors.add(property, `${error.user} is not a valid email.`);
}
});
return false;
},
_transitionAfterSubmission() {
if (!this._hasTransitioned) {
this._hasTransitioned = true;
this.transitionToRoute('posts.index');
}
},
invite: task(function* () {
let users = this.get('validUsersArray');
if (this.validate() && users.length > 0) {
this._hasTransitioned = false;
this.get('_slowSubmissionTimeout').perform();
let authorRole = yield this.get('authorRole');
let invites = yield this._saveInvites(authorRole);
this.get('_slowSubmissionTimeout').cancelAll();
this._showNotifications(invites);
run.schedule('actions', this, function () {
this.send('loadServerNotifications');
this._transitionAfterSubmission();
});
} else if (users.length === 0) {
this.get('errors').add('users', 'No users to invite');
}
}).drop(),
_slowSubmissionTimeout: task(function* () {
yield timeout(4000);
this._transitionAfterSubmission();
}).drop(),
_saveInvites(authorRole) {
let users = this.get('validUsersArray');
return RSVP.Promise.all(
users.map((user) => {
let invite = this.store.createRecord('invite', {
email: user,
role: authorRole
});
return invite.save().then(() => ({
email: user,
success: invite.get('status') === 'sent'
})).catch(error => ({
error,
email: user,
success: false
}));
})
);
},
_showNotifications(invites) {
let notifications = this.get('notifications');
let erroredEmails = [];
let successCount = 0;
let invitationsString, message;
invites.forEach((invite) => {
if (invite.success) {
successCount += 1;
} else if (isInvalidError(invite.error)) {
message = `${invite.email} was invalid: ${invite.error.payload.errors[0].message}`;
notifications.showAlert(message, {type: 'error', delayed: true, key: `signup.send-invitations.${invite.email}`});
} else {
erroredEmails.push(invite.email);
}
});
if (erroredEmails.length > 0) {
invitationsString = erroredEmails.length > 1 ? ' invitations: ' : ' invitation: ';
message = `Failed to send ${erroredEmails.length} ${invitationsString}`;
message += erroredEmails.join(', ');
message += '. Please check your email configuration, see <a href=\'https://docs.ghost.org/docs/mail-config\' target=\'_blank\'>https://docs.ghost.org/v1.0.0/docs/mail-config</a> for instructions';
message = htmlSafe(message);
notifications.showAlert(message, {type: 'error', delayed: successCount > 0, key: 'signup.send-invitations.failed'});
}
if (successCount > 0) {
// pluralize
invitationsString = successCount > 1 ? 'invitations' : 'invitation';
notifications.showAlert(`${successCount} ${invitationsString} sent!`, {type: 'success', delayed: true, key: 'signup.send-invitations.success'});
}
}
});
| 1 | 9,077 | The `import` statement for `Ember` is missing in this file. | TryGhost-Admin | js |
@@ -90,7 +90,7 @@ export function diff(dom, parentDom, newVNode, oldVNode, context, isSvg, excessD
let s = c._nextState || c.state;
if (newType.getDerivedStateFromProps!=null) {
oldState = assign({}, c.state);
- if (s===c.state) s = assign({}, s);
+ if (s===c.state) s = c._nextState = assign({}, s);
assign(s, newType.getDerivedStateFromProps(newVNode.props, s));
}
| 1 | import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { Component, enqueueRender } from '../component';
import { coerceToVNode, Fragment } from '../create-element';
import { diffChildren } from './children';
import { diffProps } from './props';
import { assign, removeNode } from '../util';
import options from '../options';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement | Text} dom The DOM element representing
* the virtual nodes under diff
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode | null} newVNode The new virtual node
* @param {import('../internal').VNode | null} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this element is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} mounts A list of newly
* mounted components
* @param {import('../internal').Component | null} ancestorComponent The direct
* parent component
*/
export function diff(dom, parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent, force) {
// If the previous type doesn't match the new type we drop the whole subtree
if (oldVNode==null || newVNode==null || oldVNode.type!==newVNode.type) {
if (oldVNode!=null) unmount(oldVNode, ancestorComponent);
if (newVNode==null) return null;
dom = null;
oldVNode = EMPTY_OBJ;
}
if (options.diff) options.diff(newVNode);
let c, p, isNew = false, oldProps, oldState, oldContext,
newType = newVNode.type;
/** @type {import('../internal').Component | null} */
let clearProcessingException;
try {
outer: if (oldVNode.type===Fragment || newType===Fragment) {
diffChildren(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, c);
if (newVNode._children.length) {
dom = newVNode._children[0]._dom;
newVNode._lastDomChild = newVNode._children[newVNode._children.length - 1]._dom;
}
}
else if (typeof newType==='function') {
// Necessary for createContext api. Setting this property will pass
// the context value as `this.context` just for this component.
let cxType = newType.contextType;
let provider = cxType && context[cxType._id];
let cctx = cxType != null ? (provider ? provider.props.value : cxType._defaultValue) : context;
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component;
clearProcessingException = c._processingException;
}
else {
isNew = true;
// Instantiate the new component
if (newType.prototype && newType.prototype.render) {
newVNode._component = c = new newType(newVNode.props, cctx); // eslint-disable-line new-cap
}
else {
newVNode._component = c = new Component(newVNode.props, cctx);
c.constructor = newType;
c.render = doRender;
}
c._ancestorComponent = ancestorComponent;
if (provider) provider.sub(c);
c.props = newVNode.props;
if (!c.state) c.state = {};
c.context = cctx;
c._context = context;
c._dirty = true;
c._renderCallbacks = [];
}
c._vnode = newVNode;
// Invoke getDerivedStateFromProps
let s = c._nextState || c.state;
if (newType.getDerivedStateFromProps!=null) {
oldState = assign({}, c.state);
if (s===c.state) s = assign({}, s);
assign(s, newType.getDerivedStateFromProps(newVNode.props, s));
}
// Invoke pre-render lifecycle methods
if (isNew) {
if (newType.getDerivedStateFromProps==null && c.componentWillMount!=null) c.componentWillMount();
if (c.componentDidMount!=null) mounts.push(c);
}
else {
if (newType.getDerivedStateFromProps==null && force==null && c.componentWillReceiveProps!=null) {
c.componentWillReceiveProps(newVNode.props, cctx);
s = c._nextState || c.state;
}
if (!force && c.shouldComponentUpdate!=null && c.shouldComponentUpdate(newVNode.props, s, cctx)===false) {
c.props = newVNode.props;
c.state = s;
c._dirty = false;
break outer;
}
if (c.componentWillUpdate!=null) {
c.componentWillUpdate(newVNode.props, s, cctx);
}
}
oldProps = c.props;
if (!oldState) oldState = c.state;
oldContext = c.context = cctx;
c.props = newVNode.props;
c.state = s;
if (options.render) options.render(newVNode);
let prev = c._prevVNode;
let vnode = c._prevVNode = coerceToVNode(c.render(c.props, c.state, c.context));
c._dirty = false;
if (c.getChildContext!=null) {
context = assign(assign({}, context), c.getChildContext());
}
if (!isNew && c.getSnapshotBeforeUpdate!=null) {
oldContext = c.getSnapshotBeforeUpdate(oldProps, oldState);
}
c.base = dom = diff(dom, parentDom, vnode, prev, context, isSvg, excessDomChildren, mounts, c, null);
if (vnode!=null) {
// If this component returns a Fragment (or another component that
// returns a Fragment), then _lastDomChild will be non-null,
// informing `diffChildren` to diff this component's VNode like a Fragemnt
newVNode._lastDomChild = vnode._lastDomChild;
}
c._parentDom = parentDom;
if (newVNode.ref) applyRef(newVNode.ref, c, ancestorComponent);
}
else {
dom = diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent);
if (newVNode.ref && (oldVNode.ref !== newVNode.ref)) {
applyRef(newVNode.ref, dom, ancestorComponent);
}
}
newVNode._dom = dom;
if (c!=null) {
while (p=c._renderCallbacks.pop()) p.call(c);
// Don't call componentDidUpdate on mount or when we bailed out via
// `shouldComponentUpdate`
if (!isNew && oldProps!=null && c.componentDidUpdate!=null) {
c.componentDidUpdate(oldProps, oldState, oldContext);
}
}
if (clearProcessingException) {
c._processingException = null;
}
if (options.diffed) options.diffed(newVNode);
}
catch (e) {
catchErrorInComponent(e, ancestorComponent);
}
return dom;
}
export function commitRoot(mounts, root) {
let c;
while ((c = mounts.pop())) {
try {
c.componentDidMount();
}
catch (e) {
catchErrorInComponent(e, c._ancestorComponent);
}
}
if (options.commit) options.commit(root);
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {*} excessDomChildren
* @param {Array<import('../internal').Component>} mounts An array of newly
* mounted components
* @param {import('../internal').Component} ancestorComponent The parent
* component to the ones being diffed
* @returns {import('../internal').PreactElement}
*/
function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent) {
let d = dom;
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvg = newVNode.type==='svg' || isSvg;
if (dom==null && excessDomChildren!=null) {
for (let i=0; i<excessDomChildren.length; i++) {
const child = excessDomChildren[i];
if (child!=null && (newVNode.type===null ? child.nodeType===3 : child.localName===newVNode.type)) {
dom = child;
excessDomChildren[i] = null;
break;
}
}
}
if (dom==null) {
dom = newVNode.type===null ? document.createTextNode(newVNode.text) : isSvg ? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type) : document.createElement(newVNode.type);
// we created a new parent, so none of the previously attached children can be reused:
excessDomChildren = null;
}
newVNode._dom = dom;
if (newVNode.type===null) {
if ((d===null || dom===d) && newVNode.text!==oldVNode.text) {
dom.data = newVNode.text;
}
}
else {
if (excessDomChildren!=null && dom.childNodes!=null) {
excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes);
}
if (newVNode!==oldVNode) {
let oldProps = oldVNode.props;
let newProps = newVNode.props;
// if we're hydrating, use the element's attributes as its current props:
if (oldProps==null) {
oldProps = {};
if (excessDomChildren!=null) {
let name;
for (let i=0; i<dom.attributes.length; i++) {
name = dom.attributes[i].name;
oldProps[name=='class' && newProps.className ? 'className' : name] = dom.attributes[i].value;
}
}
}
let oldHtml = oldProps.dangerouslySetInnerHTML;
let newHtml = newProps.dangerouslySetInnerHTML;
if (newHtml || oldHtml) {
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!newHtml || !oldHtml || newHtml.__html!=oldHtml.__html) {
dom.innerHTML = newHtml && newHtml.__html || '';
}
}
if (newProps.multiple) {
dom.multiple = newProps.multiple;
}
diffChildren(dom, newVNode, oldVNode, context, newVNode.type==='foreignObject' ? false : isSvg, excessDomChildren, mounts, ancestorComponent);
diffProps(dom, newProps, oldProps, isSvg);
}
}
return dom;
}
/**
* Invoke or update a ref, depending on whether it is a function or object ref.
* @param {object|function} [ref=null]
* @param {any} [value]
*/
export function applyRef(ref, value, ancestorComponent) {
try {
if (typeof ref=='function') ref(value);
else ref.current = value;
}
catch (e) {
catchErrorInComponent(e, ancestorComponent);
}
}
/**
* Unmount a virtual node from the tree and apply DOM changes
* @param {import('../internal').VNode} vnode The virtual node to unmount
* @param {import('../internal').Component} ancestorComponent The parent
* component to this virtual node
* @param {boolean} skipRemove Flag that indicates that a parent node of the
* current element is already detached from the DOM.
*/
export function unmount(vnode, ancestorComponent, skipRemove) {
let r;
if (options.unmount) options.unmount(vnode);
if (r = vnode.ref) {
applyRef(r, null, ancestorComponent);
}
if (!skipRemove && vnode._lastDomChild==null && (skipRemove = ((r = vnode._dom)!=null))) removeNode(r);
vnode._dom = vnode._lastDomChild = null;
if ((r = vnode._component)!=null) {
if (r.componentWillUnmount) {
try {
r.componentWillUnmount();
}
catch (e) {
catchErrorInComponent(e, ancestorComponent);
}
}
r.base = r._parentDom = null;
if (r = r._prevVNode) unmount(r, ancestorComponent, skipRemove);
}
else if (r = vnode._children) {
for (let i = 0; i < r.length; i++) {
unmount(r[i], ancestorComponent, skipRemove);
}
}
}
/** The `.render()` method for a PFC backing instance. */
function doRender(props, state, context) {
return this.constructor(props, context);
}
/**
* Find the closest error boundary to a thrown error and call it
* @param {object} error The thrown value
* @param {import('../internal').Component} component The first ancestor
* component check for error boundary behaviors
*/
function catchErrorInComponent(error, component) {
for (; component; component = component._ancestorComponent) {
if (!component._processingException) {
try {
if (component.constructor.getDerivedStateFromError!=null) {
component.setState(component.constructor.getDerivedStateFromError(error));
}
else if (component.componentDidCatch!=null) {
component.componentDidCatch(error);
}
else {
continue;
}
return enqueueRender(component._processingException = component);
}
catch (e) {
error = e;
}
}
}
throw error;
}
| 1 | 12,771 | Awesome :tada: I'd love to have a test case for this so that we don't regress on this feature in any future refactorings :+1: | preactjs-preact | js |
@@ -28,7 +28,7 @@ class SeriesDateTimeTest(ReusedSQLTestCase, SQLTestUtils):
@property
def pdf1(self):
- date1 = pd.Series(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M'))
+ date1 = pd.Series(pd.date_range('2012-1-1 12:45:31', periods=3, freq='M'))
date2 = pd.Series(pd.date_range('2013-3-11 21:45:00', periods=3, freq='W'))
return pd.DataFrame(dict(start_date=date1, end_date=date2))
| 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
import pandas as pd
import pandas.testing as mt
from databricks import koalas as ks
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
class SeriesDateTimeTest(ReusedSQLTestCase, SQLTestUtils):
@property
def pdf1(self):
date1 = pd.Series(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M'))
date2 = pd.Series(pd.date_range('2013-3-11 21:45:00', periods=3, freq='W'))
return pd.DataFrame(dict(start_date=date1, end_date=date2))
@property
def pd_start_date(self):
return self.pdf1['start_date']
@property
def ks_start_date(self):
return ks.from_pandas(self.pd_start_date)
def check_func(self, func):
# import pdb; pdb.set_trace()
mt.assert_series_equal(
func(self.ks_start_date).to_pandas(),
func(self.pd_start_date),
check_names=False
)
@unittest.skip(
"It fails in certain OSs presumably due to different "
"timezone behaviours inherited from C library.")
def test_subtraction(self):
pdf = self.pdf1
kdf = ks.from_pandas(pdf)
kdf['diff_seconds'] = kdf['end_date'] - kdf['start_date'] - 1
self.assertEqual(list(kdf['diff_seconds'].toPandas()), [35545499, 33644699, 31571099])
def test_div(self):
pdf = self.pdf1
kdf = ks.from_pandas(pdf)
for u in 'D', 's', 'ms':
duration = np.timedelta64(1, u)
self.assert_eq(
(kdf['end_date'] - kdf['start_date']) / duration,
(pdf['end_date'] - pdf['start_date']) / duration)
@unittest.skip("It is currently failed probably for the same reason in 'test_subtraction'")
def test_date(self):
self.check_func(lambda x: x.dt.date)
def test_time(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.time)
def test_timetz(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.timetz)
def test_year(self):
self.check_func(lambda x: x.dt.year)
def test_month(self):
self.check_func(lambda x: x.dt.month)
def test_day(self):
self.check_func(lambda x: x.dt.day)
def test_hour(self):
self.check_func(lambda x: x.dt.hour)
def test_minute(self):
self.check_func(lambda x: x.dt.minute)
def test_second(self):
self.check_func(lambda x: x.dt.second)
def test_microsecond(self):
self.check_func(lambda x: x.dt.microsecond)
def test_nanosecond(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.nanosecond)
def test_week(self):
self.check_func(lambda x: x.dt.week)
def test_weekofyear(self):
self.check_func(lambda x: x.dt.weekofyear)
def test_dayofweek(self):
self.check_func(lambda x: x.dt.dayofweek)
def test_strftime(self):
self.check_func(lambda x: x.dt.strftime('%Y-%m-%d'))
def test_unsupported_type(self):
self.assertRaisesRegex(ValueError,
'Cannot call DatetimeMethods on type LongType',
lambda: ks.Series([0]).dt)
| 1 | 10,173 | For testing some of the rounding functions | databricks-koalas | py |
@@ -56,6 +56,10 @@ class CurrencyDataFixture extends AbstractReferenceFixture
* @see \Shopsys\FrameworkBundle\Migrations\Version20180603135342
*/
$currencyCzk = $this->currencyFacade->getById(1);
+ $currencyData = $this->currencyDataFactory->createFromCurrency($currencyCzk);
+ $currencyData->minFractionDigits = Currency::DEFAULT_MIN_FRACTION_DIGITS;
+ $currencyData->roundingType = Currency::ROUNDING_TYPE_INTEGER;
+ $currencyCzk = $this->currencyFacade->edit($currencyCzk->getId(), $currencyData);
$this->addReference(self::CURRENCY_CZK, $currencyCzk);
if (count($this->domain->getAll()) > 1) { | 1 | <?php
declare(strict_types=1);
namespace Shopsys\ShopBundle\DataFixtures\Demo;
use Doctrine\Common\Persistence\ObjectManager;
use Shopsys\FrameworkBundle\Component\DataFixture\AbstractReferenceFixture;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Model\Pricing\Currency\Currency;
use Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyDataFactoryInterface;
use Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyFacade;
class CurrencyDataFixture extends AbstractReferenceFixture
{
public const CURRENCY_CZK = 'currency_czk';
public const CURRENCY_EUR = 'currency_eur';
/**
* @var \Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyFacade
*/
protected $currencyFacade;
/**
* @var \Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyDataFactoryInterface
*/
protected $currencyDataFactory;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\Domain
*/
protected $domain;
/**
* @param \Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyFacade $currencyFacade
* @param \Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyDataFactoryInterface $currencyDataFactory
* @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain
*/
public function __construct(
CurrencyFacade $currencyFacade,
CurrencyDataFactoryInterface $currencyDataFactory,
Domain $domain
) {
$this->currencyFacade = $currencyFacade;
$this->currencyDataFactory = $currencyDataFactory;
$this->domain = $domain;
}
/**
* @param \Doctrine\Common\Persistence\ObjectManager $manager
*/
public function load(ObjectManager $manager)
{
/**
* The "CZK" currency is created in database migration.
* @see \Shopsys\FrameworkBundle\Migrations\Version20180603135342
*/
$currencyCzk = $this->currencyFacade->getById(1);
$this->addReference(self::CURRENCY_CZK, $currencyCzk);
if (count($this->domain->getAll()) > 1) {
$currencyData = $this->currencyDataFactory->create();
$currencyData->name = 'Euro';
$currencyData->code = Currency::CODE_EUR;
$currencyData->exchangeRate = '25';
$currencyEuro = $this->currencyFacade->create($currencyData);
$this->addReference(self::CURRENCY_EUR, $currencyEuro);
}
}
}
| 1 | 19,743 | This should be present in UPGRADE notes. Also on currently running projects you should suggest users to update these settings. | shopsys-shopsys | php |
@@ -1053,10 +1053,9 @@ func (bdl *backpressureDiskLimiter) reserveBytes(
if count < 0 {
return count, nil
}
- // We calculate the total free bytes by adding the reported free bytes and
- // the non-`tracker` used bytes.
- tracker.updateFree(freeBytes + bdl.overallByteTracker.used -
- tracker.usedResources())
+ // TODO: verify there's not any other kind of bytes we care about
+ // subtracting too.
+ tracker.updateFree(freeBytes)
count = tracker.tryReserve(blockBytes)
if count < 0 { | 1 | // Copyright 2017 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"math"
"sync"
"time"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfssync"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// backpressureTracker keeps track of the variables used to calculate
// backpressure. It keeps track of a generic resource (which can be
// either bytes or files).
//
// Let U be the (approximate) resource usage of the journal and F be
// the free resources. Then we want to enforce
//
// U <= min(k(U+F), L),
//
// where 0 < k <= 1 is some fraction, and L > 0 is the absolute
// resource usage limit. But in addition to that, we want to set
// thresholds 0 <= m <= M <= 1 such that we apply proportional
// backpressure (with a given maximum delay) when
//
// m <= max(U/(k(U+F)), U/L) <= M,
//
// which is equivalent to
//
// m <= U/min(k(U+F), L) <= M.
//
// Note that this type doesn't do any locking, so it's the caller's
// responsibility to do so.
type backpressureTracker struct {
// minThreshold is m in the above.
minThreshold float64
// maxThreshold is M in the above.
maxThreshold float64
// limitFrac is k in the above.
limitFrac float64
// limit is L in the above.
limit int64
// used is U in the above.
used int64
// free is F in the above.
free int64
// semaphoreMax is the last calculated value of currLimit(),
// which is min(k(U+F), L).
semaphoreMax int64
// The count of the semaphore is semaphoreMax - U - I, where I
// is the resource count that is currently "in-flight",
// i.e. between beforeBlockPut() and afterBlockPut() calls.
semaphore *kbfssync.Semaphore
}
func newBackpressureTracker(minThreshold, maxThreshold, limitFrac float64,
limit, initialFree int64) (*backpressureTracker, error) {
if minThreshold < 0.0 {
return nil, errors.Errorf("minThreshold=%f < 0.0",
minThreshold)
}
if maxThreshold < minThreshold {
return nil, errors.Errorf(
"maxThreshold=%f < minThreshold=%f",
maxThreshold, minThreshold)
}
if 1.0 < maxThreshold {
return nil, errors.Errorf("1.0 < maxThreshold=%f",
maxThreshold)
}
if limitFrac < 0.01 {
return nil, errors.Errorf("limitFrac=%f < 0.01", limitFrac)
}
if limitFrac > 1.0 {
return nil, errors.Errorf("limitFrac=%f > 1.0", limitFrac)
}
if limit < 0 {
return nil, errors.Errorf("limit=%d < 0", limit)
}
if initialFree < 0 {
return nil, errors.Errorf("initialFree=%d < 0", initialFree)
}
bt := &backpressureTracker{
minThreshold, maxThreshold, limitFrac, limit,
0, initialFree, 0, kbfssync.NewSemaphore(),
}
bt.updateSemaphoreMax()
return bt, nil
}
// currLimit returns the resource limit, taking into account the
// amount of free resources left. This is min(k(U+F), L).
func (bt backpressureTracker) currLimit() float64 {
// Calculate k(U+F), converting to float64 first to avoid
// overflow, although losing some precision in the process.
usedFloat := float64(bt.used)
freeFloat := float64(bt.free)
limit := bt.limitFrac * (usedFloat + freeFloat)
minLimit := math.Min(limit, float64(bt.limit))
// Based on local tests, the magic number of 512 gets us past overflow
// issues at the limit due to floating point precision.
maxFloatForInt64 := float64(math.MaxInt64 - 512)
if minLimit > maxFloatForInt64 {
minLimit = maxFloatForInt64
}
return minLimit
}
func (bt backpressureTracker) usedFrac() float64 {
return float64(bt.used) / bt.currLimit()
}
func (bt backpressureTracker) usedResources() int64 {
return bt.used
}
// delayScale returns a number between 0 and 1, which should be
// multiplied with the maximum delay to get the backpressure delay to
// apply.
func (bt backpressureTracker) delayScale() float64 {
usedFrac := bt.usedFrac()
// We want the delay to be 0 if usedFrac <= m and the max
// delay if usedFrac >= M, so linearly interpolate the delay
// scale.
m := bt.minThreshold
M := bt.maxThreshold
return math.Min(1.0, math.Max(0.0, (usedFrac-m)/(M-m)))
}
// updateSemaphoreMax must be called whenever bt.used or bt.free
// changes.
func (bt *backpressureTracker) updateSemaphoreMax() {
newMax := int64(bt.currLimit())
delta := newMax - bt.semaphoreMax
// These operations are adjusting the *maximum* value of
// bt.semaphore.
if delta > 0 {
bt.semaphore.Release(delta)
} else if delta < 0 {
bt.semaphore.ForceAcquire(-delta)
}
bt.semaphoreMax = newMax
}
func (bt *backpressureTracker) onEnable(usedResources int64) (
availableResources int64) {
bt.used += usedResources
bt.updateSemaphoreMax()
if usedResources == 0 {
return bt.semaphore.Count()
}
return bt.semaphore.ForceAcquire(usedResources)
}
func (bt *backpressureTracker) onDisable(usedResources int64) {
bt.used -= usedResources
bt.updateSemaphoreMax()
if usedResources > 0 {
bt.semaphore.Release(usedResources)
}
}
func (bt *backpressureTracker) updateFree(freeResources int64) {
bt.free = freeResources
bt.updateSemaphoreMax()
}
func (bt *backpressureTracker) reserve(
ctx context.Context, blockResources int64) (
availableResources int64, err error) {
return bt.semaphore.Acquire(ctx, blockResources)
}
func (bt *backpressureTracker) commit(blockResources int64) {
bt.used += blockResources
bt.updateSemaphoreMax()
}
func (bt *backpressureTracker) rollback(blockResources int64) {
bt.semaphore.Release(blockResources)
}
func (bt *backpressureTracker) commitOrRollback(
blockResources int64, shouldCommit bool) {
if shouldCommit {
bt.commit(blockResources)
} else {
bt.rollback(blockResources)
}
}
func (bt *backpressureTracker) release(blockResources int64) {
if blockResources == 0 {
return
}
bt.semaphore.Release(blockResources)
bt.used -= blockResources
bt.updateSemaphoreMax()
}
func (bt *backpressureTracker) tryReserve(blockResources int64) (
availableResources int64) {
return bt.semaphore.TryAcquire(blockResources)
}
func (bt *backpressureTracker) getLimitInfo() (used int64, limit float64) {
return bt.used, bt.currLimit()
}
type backpressureTrackerStatus struct {
// Derived numbers.
UsedFrac float64
DelayScale float64
// Constants.
MinThreshold float64
MaxThreshold float64
LimitFrac float64
Limit int64
// Raw numbers.
Used int64
Free int64
Max int64
Count int64
}
func (bt *backpressureTracker) getStatus() backpressureTrackerStatus {
return backpressureTrackerStatus{
UsedFrac: bt.usedFrac(),
DelayScale: bt.delayScale(),
MinThreshold: bt.minThreshold,
MaxThreshold: bt.maxThreshold,
LimitFrac: bt.limitFrac,
Limit: bt.limit,
Used: bt.used,
Free: bt.free,
Max: bt.semaphoreMax,
Count: bt.semaphore.Count(),
}
}
// quotaBackpressureTracker keeps track of the variables used to
// calculate quota-related backpressure.
//
// Let U be the (approximate) unflushed bytes in the journal, R be the
// remote quota usage, and Q be the quota. Then we want to set
// thresholds 0 <= m <= M such that we apply proportional backpressure
// (with a given maximum delay) when
//
// m <= (U+R)/Q <= M.
//
// Note that this type doesn't do any locking, so it's the caller's
// responsibility to do so.
type quotaBackpressureTracker struct {
// minThreshold is m in the above.
minThreshold float64
// maxThreshold is M in the above.
maxThreshold float64
// unflushedBytes is U in the above.
unflushedBytes int64
// remoteUsedBytes is R in the above.
remoteUsedBytes int64
// quotaBytes is Q in the above.
quotaBytes int64
}
func newQuotaBackpressureTracker(minThreshold, maxThreshold float64) (
*quotaBackpressureTracker, error) {
if minThreshold < 0.0 {
return nil, errors.Errorf("minThreshold=%f < 0.0",
minThreshold)
}
if maxThreshold < minThreshold {
return nil, errors.Errorf(
"maxThreshold=%f < minThreshold=%f",
maxThreshold, minThreshold)
}
qbt := "aBackpressureTracker{
minThreshold, maxThreshold, 0, 0, math.MaxInt64,
}
return qbt, nil
}
func (qbt quotaBackpressureTracker) usedFrac() float64 {
return (float64(qbt.unflushedBytes) + float64(qbt.remoteUsedBytes)) /
float64(qbt.quotaBytes)
}
// delayScale returns a number between 0 and 1, which should be
// multiplied with the maximum delay to get the backpressure delay to
// apply.
func (qbt quotaBackpressureTracker) delayScale() float64 {
usedFrac := qbt.usedFrac()
// We want the delay to be 0 if usedFrac <= m and the max
// delay if usedFrac >= M, so linearly interpolate the delay
// scale.
m := qbt.minThreshold
M := qbt.maxThreshold
return math.Min(1.0, math.Max(0.0, (usedFrac-m)/(M-m)))
}
func (qbt quotaBackpressureTracker) getQuotaInfo() (
usedQuotaBytes, quotaBytes int64) {
usedQuotaBytes = qbt.unflushedBytes + qbt.remoteUsedBytes
quotaBytes = qbt.quotaBytes
return usedQuotaBytes, quotaBytes
}
func (qbt *quotaBackpressureTracker) onJournalEnable(unflushedBytes int64) {
qbt.unflushedBytes += unflushedBytes
}
func (qbt *quotaBackpressureTracker) onJournalDisable(unflushedBytes int64) {
qbt.unflushedBytes -= unflushedBytes
}
func (qbt *quotaBackpressureTracker) updateRemote(
remoteUsedBytes, quotaBytes int64) {
qbt.remoteUsedBytes = remoteUsedBytes
qbt.quotaBytes = quotaBytes
}
func (qbt *quotaBackpressureTracker) afterBlockPut(
blockBytes int64, putData bool) {
if putData {
qbt.unflushedBytes += blockBytes
}
}
func (qbt *quotaBackpressureTracker) onBlocksFlush(blockBytes int64) {
qbt.unflushedBytes -= blockBytes
}
type quotaBackpressureTrackerStatus struct {
// Derived numbers.
UsedFrac float64
DelayScale float64
// Constants.
MinThreshold float64
MaxThreshold float64
// Raw numbers.
UnflushedBytes int64
RemoteUsedBytes int64
QuotaBytes int64
}
func (qbt *quotaBackpressureTracker) getStatus() quotaBackpressureTrackerStatus {
return quotaBackpressureTrackerStatus{
UsedFrac: qbt.usedFrac(),
DelayScale: qbt.delayScale(),
MinThreshold: qbt.minThreshold,
MaxThreshold: qbt.maxThreshold,
UnflushedBytes: qbt.unflushedBytes,
RemoteUsedBytes: qbt.remoteUsedBytes,
QuotaBytes: qbt.quotaBytes,
}
}
// journalTracker aggregates all the journal trackers. This type also
// doesn't do any locking, so it's the caller's responsibility to do
// so.
type journalTracker struct {
byte, file *backpressureTracker
quota map[keybase1.UserOrTeamID]*quotaBackpressureTracker
quotaMinThreshold float64
quotaMaxThreshold float64
}
func newJournalTracker(
minThreshold, maxThreshold, quotaMinThreshold, quotaMaxThreshold, journalFrac float64,
byteLimit, fileLimit, freeBytes, freeFiles int64) (
journalTracker, error) {
// byteLimit and fileLimit must be scaled by the proportion of
// the limit that the journal should consume. Add 0.5 to round
// up.
journalByteLimit := int64((float64(byteLimit) * journalFrac) + 0.5)
byteTracker, err := newBackpressureTracker(
minThreshold, maxThreshold, journalFrac, journalByteLimit,
freeBytes)
if err != nil {
return journalTracker{}, err
}
// the fileLimit is only used by the journal, so in theory we
// don't have to scale it by journalFrac, but in the interest
// of consistency with how we treat the byteLimit, we do so
// anyway. Add 0.5 to round up.
journalFileLimit := int64((float64(fileLimit) * journalFrac) + 0.5)
fileTracker, err := newBackpressureTracker(
minThreshold, maxThreshold, journalFrac, journalFileLimit,
freeFiles)
if err != nil {
return journalTracker{}, err
}
// Test quota parameters -- actual quota trackers will be created
// on a per-chargedTo-ID basis.
_, err = newQuotaBackpressureTracker(quotaMinThreshold, quotaMaxThreshold)
if err != nil {
return journalTracker{}, err
}
return journalTracker{
byte: byteTracker,
file: fileTracker,
quota: make(
map[keybase1.UserOrTeamID]*quotaBackpressureTracker),
quotaMinThreshold: quotaMinThreshold,
quotaMaxThreshold: quotaMaxThreshold,
}, nil
}
func (jt journalTracker) getQuotaTracker(
chargedTo keybase1.UserOrTeamID) *quotaBackpressureTracker {
quota, ok := jt.quota[chargedTo]
if !ok {
var err error
quota, err = newQuotaBackpressureTracker(
jt.quotaMinThreshold, jt.quotaMaxThreshold)
if err != nil {
// We already tested the parameters, so this shouldn't
// ever happen.
panic(err)
}
jt.quota[chargedTo] = quota
}
return quota
}
type jtSnapshot struct {
used int64
free int64
max int64
count int64
}
func (jt journalTracker) getSnapshotsForTest(chargedTo keybase1.UserOrTeamID) (
byteSnapshot, fileSnapshot, quotaSnapshot jtSnapshot) {
byteSnapshot = jtSnapshot{jt.byte.used, jt.byte.free,
jt.byte.semaphoreMax, jt.byte.semaphore.Count()}
fileSnapshot = jtSnapshot{jt.file.used, jt.file.free,
jt.file.semaphoreMax, jt.file.semaphore.Count()}
usedQuotaBytes, quotaBytes := jt.getQuotaTracker(chargedTo).getQuotaInfo()
free := quotaBytes - usedQuotaBytes
quotaSnapshot = jtSnapshot{usedQuotaBytes, free, 0, 0}
return byteSnapshot, fileSnapshot, quotaSnapshot
}
func (jt journalTracker) onEnable(storedBytes, unflushedBytes, files int64,
chargedTo keybase1.UserOrTeamID) (availableBytes, availableFiles int64) {
// storedBytes should be >= unflushedBytes. But it's not too
// bad to let it go through.
availableBytes = jt.byte.onEnable(storedBytes)
availableFiles = jt.file.onEnable(files)
jt.getQuotaTracker(chargedTo).onJournalEnable(unflushedBytes)
return availableBytes, availableFiles
}
func (jt journalTracker) onDisable(storedBytes, unflushedBytes, files int64,
chargedTo keybase1.UserOrTeamID) {
// As above, storedBytes should be >= unflushedBytes. Let it
// go through here, too.
jt.byte.onDisable(storedBytes)
jt.file.onDisable(files)
jt.getQuotaTracker(chargedTo).onJournalDisable(unflushedBytes)
}
func (jt journalTracker) getDelayScale(
chargedTo keybase1.UserOrTeamID) float64 {
byteDelayScale := jt.byte.delayScale()
fileDelayScale := jt.file.delayScale()
quotaDelayScale := jt.getQuotaTracker(chargedTo).delayScale()
delayScale := math.Max(
math.Max(byteDelayScale, fileDelayScale), quotaDelayScale)
return delayScale
}
func (jt journalTracker) updateFree(
freeBytes, overallUsedBytes, freeFiles int64) {
// We calculate the total free bytes by adding the reported free bytes and
// the non-journal used bytes.
jt.byte.updateFree(freeBytes + overallUsedBytes - jt.byte.used)
jt.file.updateFree(freeFiles)
}
func (jt journalTracker) updateRemote(remoteUsedBytes, quotaBytes int64,
chargedTo keybase1.UserOrTeamID) {
jt.getQuotaTracker(chargedTo).updateRemote(remoteUsedBytes, quotaBytes)
}
func (jt journalTracker) getSemaphoreCounts() (byteCount, fileCount int64) {
return jt.byte.semaphore.Count(), jt.file.semaphore.Count()
}
func (jt journalTracker) reserve(
ctx context.Context, blockBytes, blockFiles int64) (
availableBytes, availableFiles int64, err error) {
availableBytes, err = jt.byte.reserve(ctx, blockBytes)
if err != nil {
return availableBytes, jt.file.semaphore.Count(), err
}
defer func() {
if err != nil {
jt.byte.rollback(blockBytes)
availableBytes = jt.byte.semaphore.Count()
}
}()
availableFiles, err = jt.file.reserve(ctx, blockFiles)
if err != nil {
return availableBytes, availableFiles, err
}
return availableBytes, availableFiles, nil
}
func (jt journalTracker) commitOrRollback(
blockBytes, blockFiles int64, putData bool,
chargedTo keybase1.UserOrTeamID) {
jt.byte.commitOrRollback(blockBytes, putData)
jt.file.commitOrRollback(blockFiles, putData)
jt.getQuotaTracker(chargedTo).afterBlockPut(blockBytes, putData)
}
func (jt journalTracker) onBlocksFlush(
blockBytes int64, chargedTo keybase1.UserOrTeamID) {
jt.getQuotaTracker(chargedTo).onBlocksFlush(blockBytes)
}
func (jt journalTracker) release(blockBytes, blockFiles int64) {
jt.byte.release(blockBytes)
jt.file.release(blockFiles)
}
func (jt journalTracker) getUsedBytes() int64 {
return jt.byte.used
}
func (jt journalTracker) getStatusLine(chargedTo keybase1.UserOrTeamID) string {
quota := jt.getQuotaTracker(chargedTo)
return fmt.Sprintf("journalBytes=%d, freeBytes=%d, "+
"journalFiles=%d, freeFiles=%d, "+
"quotaUnflushedBytes=%d, quotaRemoteUsedBytes=%d, "+
"quotaBytes=%d",
jt.byte.used, jt.byte.free,
jt.file.used, jt.file.free,
quota.unflushedBytes, quota.remoteUsedBytes, quota.quotaBytes)
}
func (jt journalTracker) getQuotaInfo(chargedTo keybase1.UserOrTeamID) (
usedQuotaBytes, quotaBytes int64) {
return jt.getQuotaTracker(chargedTo).getQuotaInfo()
}
func (jt journalTracker) getDiskLimitInfo() (
usedBytes int64, limitBytes float64, usedFiles int64, limitFiles float64) {
usedBytes, limitBytes = jt.byte.getLimitInfo()
usedFiles, limitFiles = jt.file.getLimitInfo()
return usedBytes, limitBytes, usedFiles, limitFiles
}
type journalTrackerStatus struct {
ByteStatus backpressureTrackerStatus
FileStatus backpressureTrackerStatus
QuotaStatus quotaBackpressureTrackerStatus
}
func (jt journalTracker) getStatus(
chargedTo keybase1.UserOrTeamID) journalTrackerStatus {
return journalTrackerStatus{
ByteStatus: jt.byte.getStatus(),
FileStatus: jt.file.getStatus(),
QuotaStatus: jt.getQuotaTracker(chargedTo).getStatus(),
}
}
type diskLimiterQuotaFn func(
ctx context.Context, chargedTo keybase1.UserOrTeamID) (int64, int64)
// backpressureDiskLimiter is an implementation of diskLimiter that
// uses backpressure to slow down block puts before they hit the disk
// limits.
type backpressureDiskLimiter struct {
log logger.Logger
maxDelay time.Duration
delayFn func(context.Context, time.Duration) error
freeBytesAndFilesFn func() (int64, int64, error)
quotaFn diskLimiterQuotaFn
// lock protects everything in journalTracker and
// diskCacheByteTracker, including the (implicit) maximum
// values of the semaphores, but not the actual semaphores
// themselves.
lock sync.RWMutex
// overallByteTracker tracks the overall number of bytes used by Keybase.
overallByteTracker *backpressureTracker
// journalTracker tracks the journal bytes and files used.
journalTracker journalTracker
// diskCacheByteTracker tracks the disk cache bytes used.
diskCacheByteTracker *backpressureTracker
// syncCacheByteTracker tracks the sync cache bytes used.
syncCacheByteTracker *backpressureTracker
}
var _ DiskLimiter = (*backpressureDiskLimiter)(nil)
type backpressureDiskLimiterParams struct {
// minThreshold is the fraction of the free bytes/files at
// which we start to apply backpressure.
minThreshold float64
// maxThreshold is the fraction of the free bytes/files at
// which we max out on backpressure.
maxThreshold float64
// quotaMinThreshold is the fraction of used quota at which we
// start to apply backpressure.
quotaMinThreshold float64
// quotaMaxThreshold is the fraction of used quota at which we
// max out on backpressure.
quotaMaxThreshold float64
// journalFrac is fraction of the free bytes/files that the
// journal is allowed to use.
journalFrac float64
// diskCacheFrac is the fraction of the free bytes that the
// disk cache is allowed to use. The disk cache doesn't store
// individual files.
diskCacheFrac float64
// syncCacheFrac is the fraction of the free bytes that the
// sync cache is allowed to use.
syncCacheFrac float64
// byteLimit is the total cap for free bytes. The journal will
// be allowed to use at most journalFrac*byteLimit, and the
// disk cache will be allowed to use at most
// diskCacheFrac*byteLimit.
byteLimit int64
// maxFreeFiles is the cap for free files. The journal will be
// allowed to use at most journalFrac*fileLimit. This limit
// doesn't apply to the disk cache, since it doesn't store
// individual files.
fileLimit int64
// maxDelay is the maximum delay used for backpressure.
maxDelay time.Duration
// delayFn is a function that takes a context and a duration
// and returns after sleeping for that duration, or if the
// context is cancelled. Overridable for testing.
delayFn func(context.Context, time.Duration) error
// freeBytesAndFilesFn is a function that returns the current
// free bytes and files on the disk containing the
// journal/disk cache directory. Overridable for testing.
freeBytesAndFilesFn func() (int64, int64, error)
// quotaFn is a function that returns the current used and
// total quota bytes. Overridable for testing.
quotaFn diskLimiterQuotaFn
}
// defaultDiskLimitMaxDelay is the maximum amount to delay a block
// put. Exposed as a constant as it is used by
// tlfJournalConfigAdapter.
const defaultDiskLimitMaxDelay = 10 * time.Second
type quotaUsageGetter func(
chargedTo keybase1.UserOrTeamID) *EventuallyConsistentQuotaUsage
func makeDefaultBackpressureDiskLimiterParams(
storageRoot string,
quotaUsage quotaUsageGetter, diskCacheFrac float64, syncCacheFrac float64) backpressureDiskLimiterParams {
return backpressureDiskLimiterParams{
// Start backpressure when 50% of free bytes or files
// are used...
minThreshold: 0.5,
// ...and max it out at 95% (slightly less than 100%
// to allow for inaccuracies in estimates).
maxThreshold: 0.95,
// Start backpressure when we've used 100% of our quota...
quotaMinThreshold: 1.0,
// ...and max it out at 120% of quota.
quotaMaxThreshold: 1.2,
// Cap journal usage to 85% of free bytes and files...
journalFrac: 0.85,
// ...and cap disk cache usage as specified. The
// disk cache doesn't store individual files.
diskCacheFrac: diskCacheFrac,
// Also cap the sync cache usage for offline files.
syncCacheFrac: syncCacheFrac,
// Set the byte limit to 200 GiB, which translates to
// having the journal take up at most 170 GiB, and the
// disk cache to take up at most 20 GiB.
byteLimit: 200 * 1024 * 1024 * 1024,
// Set the file limit to 6 million files, which
// translates to having the journal take up at most
// 900k files.
fileLimit: 6000000,
maxDelay: defaultDiskLimitMaxDelay,
delayFn: defaultDoDelay,
freeBytesAndFilesFn: func() (int64, int64, error) {
return defaultGetFreeBytesAndFiles(storageRoot)
},
quotaFn: func(ctx context.Context, chargedTo keybase1.UserOrTeamID) (
int64, int64) {
timestamp, usageBytes, _, limitBytes, err :=
quotaUsage(chargedTo).Get(ctx, 1*time.Minute, math.MaxInt64)
if err != nil {
return 0, math.MaxInt64
}
if timestamp.IsZero() {
return 0, math.MaxInt64
}
return usageBytes, limitBytes
},
}
}
// newBackpressureDiskLimiter constructs a new backpressureDiskLimiter
// with the given params.
func newBackpressureDiskLimiter(
log logger.Logger, params backpressureDiskLimiterParams) (
*backpressureDiskLimiter, error) {
freeBytes, freeFiles, err := params.freeBytesAndFilesFn()
if err != nil {
return nil, err
}
journalTracker, err := newJournalTracker(
params.minThreshold, params.maxThreshold,
params.quotaMinThreshold, params.quotaMaxThreshold,
params.journalFrac, params.byteLimit, params.fileLimit,
freeBytes, freeFiles)
if err != nil {
return nil, err
}
// byteLimit must be scaled by the proportion of the limit
// that the disk cache should consume. Add 0.5 for rounding.
diskCacheByteLimit := int64(
(float64(params.byteLimit) * params.diskCacheFrac) + 0.5)
syncCacheByteLimit := int64(
(float64(params.byteLimit) * params.syncCacheFrac) + 0.5)
overallByteTracker, err := newBackpressureTracker(
1.0, 1.0, 1.0, params.byteLimit, freeBytes)
if err != nil {
return nil, err
}
diskCacheByteTracker, err := newBackpressureTracker(
1.0, 1.0, params.diskCacheFrac, diskCacheByteLimit, freeBytes)
if err != nil {
return nil, err
}
syncCacheByteTracker, err := newBackpressureTracker(
1.0, 1.0, params.diskCacheFrac, syncCacheByteLimit, freeBytes)
if err != nil {
return nil, err
}
bdl := &backpressureDiskLimiter{
log: log,
maxDelay: params.maxDelay,
delayFn: params.delayFn,
freeBytesAndFilesFn: params.freeBytesAndFilesFn,
quotaFn: params.quotaFn,
lock: sync.RWMutex{},
overallByteTracker: overallByteTracker,
journalTracker: journalTracker,
diskCacheByteTracker: diskCacheByteTracker,
syncCacheByteTracker: syncCacheByteTracker,
}
return bdl, nil
}
// defaultDoDelay uses a timer to delay by the given duration.
func defaultDoDelay(ctx context.Context, delay time.Duration) error {
if delay == 0 {
return nil
}
timer := time.NewTimer(delay)
select {
case <-timer.C:
return nil
case <-ctx.Done():
timer.Stop()
return errors.WithStack(ctx.Err())
}
}
func defaultGetFreeBytesAndFiles(path string) (int64, int64, error) {
// getDiskLimits returns availableBytes and availableFiles,
// but we want to avoid confusing that with availBytes and
// availFiles in the sense of the semaphore value.
freeBytes, freeFiles, err := getDiskLimits(path)
if err != nil {
return 0, 0, err
}
if freeBytes > uint64(math.MaxInt64) {
freeBytes = math.MaxInt64
}
if freeFiles > uint64(math.MaxInt64) {
freeFiles = math.MaxInt64
}
return int64(freeBytes), int64(freeFiles), nil
}
func (bdl *backpressureDiskLimiter) simpleByteTrackerFromType(typ diskLimitTrackerType) (
tracker simpleResourceTracker, err error) {
switch typ {
case workingSetCacheLimitTrackerType:
return bdl.diskCacheByteTracker, nil
case syncCacheLimitTrackerType:
return bdl.syncCacheByteTracker, nil
default:
return nil, unknownTrackerTypeError{typ}
}
}
func (bdl *backpressureDiskLimiter) getJournalSnapshotsForTest(
chargedTo keybase1.UserOrTeamID) (
byteSnapshot, fileSnapshot, quotaSnapshot jtSnapshot) {
bdl.lock.RLock()
defer bdl.lock.RUnlock()
return bdl.journalTracker.getSnapshotsForTest(chargedTo)
}
func (bdl *backpressureDiskLimiter) onJournalEnable(
ctx context.Context,
journalStoredBytes, journalUnflushedBytes, journalFiles int64,
chargedTo keybase1.UserOrTeamID) (
availableBytes, availableFiles int64) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.overallByteTracker.onEnable(journalStoredBytes)
return bdl.journalTracker.onEnable(
journalStoredBytes, journalUnflushedBytes, journalFiles, chargedTo)
}
func (bdl *backpressureDiskLimiter) onJournalDisable(
ctx context.Context,
journalStoredBytes, journalUnflushedBytes, journalFiles int64,
chargedTo keybase1.UserOrTeamID) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.journalTracker.onDisable(
journalStoredBytes, journalUnflushedBytes, journalFiles, chargedTo)
bdl.overallByteTracker.onDisable(journalStoredBytes)
}
func (bdl *backpressureDiskLimiter) onSimpleByteTrackerEnable(ctx context.Context,
typ diskLimitTrackerType, diskCacheBytes int64) {
tracker, err := bdl.simpleByteTrackerFromType(typ)
if err != nil {
panic("Invalid tracker type passed to onByteTrackerEnable")
}
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.overallByteTracker.onEnable(diskCacheBytes)
tracker.onEnable(diskCacheBytes)
}
func (bdl *backpressureDiskLimiter) onSimpleByteTrackerDisable(ctx context.Context,
typ diskLimitTrackerType, diskCacheBytes int64) {
tracker, err := bdl.simpleByteTrackerFromType(typ)
if err != nil {
panic("Invalid tracker type passed to onByteTrackerDisable")
}
bdl.lock.Lock()
defer bdl.lock.Unlock()
tracker.onDisable(diskCacheBytes)
bdl.overallByteTracker.onDisable(diskCacheBytes)
}
func (bdl *backpressureDiskLimiter) getDelayLocked(
ctx context.Context, now time.Time,
chargedTo keybase1.UserOrTeamID) time.Duration {
delayScale := bdl.journalTracker.getDelayScale(chargedTo)
// Set maxDelay to min(bdl.maxDelay, time until deadline - 1s).
maxDelay := bdl.maxDelay
if deadline, ok := ctx.Deadline(); ok {
// Subtract a second to allow for some slack.
remainingTime := deadline.Sub(now) - time.Second
if remainingTime < maxDelay {
maxDelay = remainingTime
}
}
return time.Duration(delayScale * float64(maxDelay))
}
func (bdl *backpressureDiskLimiter) reserveError(err error) (
availableBytes, availableFiles int64, _ error) {
bdl.lock.RLock()
defer bdl.lock.RUnlock()
availableBytes, availableFiles =
bdl.journalTracker.getSemaphoreCounts()
return availableBytes, availableFiles, err
}
func (bdl *backpressureDiskLimiter) reserveWithBackpressure(
ctx context.Context, typ diskLimitTrackerType, blockBytes, blockFiles int64,
chargedTo keybase1.UserOrTeamID) (availableBytes, availableFiles int64,
err error) {
// TODO: if other backpressure consumers are introduced, remove this check.
if typ != journalLimitTrackerType {
return bdl.reserveError(errors.New(
"reserveWithBackpressure called with " +
"non-journal tracker type."))
}
if blockBytes == 0 {
// Better to return an error than to panic in Acquire.
return bdl.reserveError(errors.New(
"reserveWithBackpressure called with 0 blockBytes"))
}
if blockFiles == 0 {
// Better to return an error than to panic in Acquire.
return bdl.reserveError(errors.New(
"reserveWithBackpressure called with 0 blockFiles"))
}
delay, err := func() (time.Duration, error) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
// Call this under lock to avoid problems with its
// return values going stale while blocking on
// bdl.lock.
freeBytes, freeFiles, err := bdl.freeBytesAndFilesFn()
if err != nil {
return 0, err
}
bdl.overallByteTracker.updateFree(freeBytes)
bdl.journalTracker.updateFree(freeBytes, bdl.overallByteTracker.used,
freeFiles)
remoteUsedBytes, quotaBytes := bdl.quotaFn(ctx, chargedTo)
bdl.journalTracker.updateRemote(remoteUsedBytes, quotaBytes, chargedTo)
delay := bdl.getDelayLocked(ctx, time.Now(), chargedTo)
if delay > 0 {
bdl.log.CDebugf(ctx, "Delaying block put of %d bytes and %d "+
"files by %f s (%s)", blockBytes, blockFiles, delay.Seconds(),
bdl.journalTracker.getStatusLine(chargedTo))
}
return delay, nil
}()
if err != nil {
return bdl.reserveError(err)
}
// TODO: Update delay if any variables change (i.e., we suddenly free up a
// lot of space).
err = bdl.delayFn(ctx, delay)
if err != nil {
return bdl.reserveError(err)
}
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.overallByteTracker.reserve(ctx, blockBytes)
return bdl.journalTracker.reserve(ctx, blockBytes, blockFiles)
}
func (bdl *backpressureDiskLimiter) commitOrRollback(ctx context.Context,
typ diskLimitTrackerType, blockBytes, blockFiles int64, shouldCommit bool,
chargedTo keybase1.UserOrTeamID) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
switch typ {
case journalLimitTrackerType:
bdl.journalTracker.commitOrRollback(blockBytes, blockFiles,
shouldCommit, chargedTo)
default:
tracker, err := bdl.simpleByteTrackerFromType(typ)
if err != nil {
panic("Bad tracker type for commitOrRollback")
}
tracker.commitOrRollback(blockBytes, shouldCommit)
}
bdl.overallByteTracker.commitOrRollback(blockBytes, shouldCommit)
}
func (bdl *backpressureDiskLimiter) onBlocksFlush(
ctx context.Context, blockBytes int64, chargedTo keybase1.UserOrTeamID) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
bdl.journalTracker.onBlocksFlush(blockBytes, chargedTo)
}
func (bdl *backpressureDiskLimiter) release(ctx context.Context,
typ diskLimitTrackerType, blockBytes, blockFiles int64) {
bdl.lock.Lock()
defer bdl.lock.Unlock()
switch typ {
case journalLimitTrackerType:
bdl.journalTracker.release(blockBytes, blockFiles)
default:
tracker, err := bdl.simpleByteTrackerFromType(typ)
if err != nil {
panic("Bad tracker type for commitOrRollback")
}
tracker.release(blockBytes)
}
bdl.overallByteTracker.release(blockBytes)
}
func (bdl *backpressureDiskLimiter) reserveBytes(
ctx context.Context, typ diskLimitTrackerType, blockBytes int64) (
availableBytes int64, err error) {
if blockBytes == 0 {
// Better to return an error than to panic in ForceAcquire.
return 0, errors.New("reserve called with 0 blockBytes")
}
tracker, err := bdl.simpleByteTrackerFromType(typ)
if err != nil {
return 0, err
}
bdl.lock.Lock()
defer bdl.lock.Unlock()
// Call this under lock to avoid problems with its return
// values going stale while blocking on bdl.lock.
freeBytes, _, err := bdl.freeBytesAndFilesFn()
if err != nil {
return 0, err
}
bdl.overallByteTracker.updateFree(freeBytes)
count := bdl.overallByteTracker.tryReserve(blockBytes)
if count < 0 {
return count, nil
}
// We calculate the total free bytes by adding the reported free bytes and
// the non-`tracker` used bytes.
tracker.updateFree(freeBytes + bdl.overallByteTracker.used -
tracker.usedResources())
count = tracker.tryReserve(blockBytes)
if count < 0 {
bdl.overallByteTracker.rollback(blockBytes)
}
return count, nil
}
func (bdl *backpressureDiskLimiter) getQuotaInfo(
chargedTo keybase1.UserOrTeamID) (usedQuotaBytes, quotaBytes int64) {
bdl.lock.RLock()
defer bdl.lock.RUnlock()
return bdl.journalTracker.getQuotaInfo(chargedTo)
}
func (bdl *backpressureDiskLimiter) getDiskLimitInfo() (
usedBytes int64, limitBytes float64, usedFiles int64, limitFiles float64) {
bdl.lock.RLock()
defer bdl.lock.RUnlock()
return bdl.journalTracker.getDiskLimitInfo()
}
type backpressureDiskLimiterStatus struct {
Type string
// Derived stats.
CurrentDelaySec float64
JournalTrackerStatus journalTrackerStatus
DiskCacheByteStatus backpressureTrackerStatus
SyncCacheByteStatus backpressureTrackerStatus
}
func (bdl *backpressureDiskLimiter) getStatus(
ctx context.Context, chargedTo keybase1.UserOrTeamID) interface{} {
bdl.lock.Lock()
defer bdl.lock.Unlock()
currentDelay := bdl.getDelayLocked(
context.Background(), time.Now(), chargedTo)
jStatus := bdl.journalTracker.getStatus(chargedTo)
// If we haven't updated the quota limit yet, update it now.
if jStatus.QuotaStatus.QuotaBytes == math.MaxInt64 {
remoteUsedBytes, quotaBytes := bdl.quotaFn(ctx, chargedTo)
bdl.journalTracker.updateRemote(remoteUsedBytes, quotaBytes, chargedTo)
jStatus = bdl.journalTracker.getStatus(chargedTo)
}
return backpressureDiskLimiterStatus{
Type: "BackpressureDiskLimiter",
CurrentDelaySec: currentDelay.Seconds(),
JournalTrackerStatus: jStatus,
DiskCacheByteStatus: bdl.diskCacheByteTracker.getStatus(),
SyncCacheByteStatus: bdl.syncCacheByteTracker.getStatus(),
}
}
| 1 | 20,508 | Are you sure you want to do this? This will affect the working set block cache too, such that it'll now be tracking global free space. It throws off the fraction calculation. | keybase-kbfs | go |
@@ -44,6 +44,7 @@ bool CompoundReaderListener::hasReaderAttached(){
return false;
}
ReaderListener* CompoundReaderListener::getAttachedListener(){
+ std::lock_guard<std::mutex> lock(attached_listener_mutex);
if(attached_listener != nullptr)
return attached_listener;
return nullptr; | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
/******
Implementation of functions for InfectableReaderListener
*********/
#include "CompoundReaderListener.h"
namespace eprosima{
namespace fastrtps{
namespace rtps{
void CompoundReaderListener::attachListener(ReaderListener *secondary_listener){
attached_listener_mutex.lock();
attached_listener = secondary_listener;
attached_listener_mutex.unlock();
}
void CompoundReaderListener::detachListener(){
attached_listener_mutex.lock();
attached_listener = nullptr;
attached_listener_mutex.unlock();
}
bool CompoundReaderListener::hasReaderAttached(){
if(attached_listener != nullptr)
return true;
return false;
}
ReaderListener* CompoundReaderListener::getAttachedListener(){
if(attached_listener != nullptr)
return attached_listener;
return nullptr;
}
//Namespace ends
}}}
| 1 | 12,774 | Why would the read access need to be mutexed? The caller might still get a wrong answer since the value might change immediately after the mutex is unlocked before the caller receives the value. | eProsima-Fast-DDS | cpp |
@@ -631,6 +631,16 @@ class TabbedBrowser(tabwidget.TabWidget):
if config.val.tabs.tabs_are_windows:
self.window().setWindowIcon(icon)
+ @pyqtSlot(usertypes.KeyMode)
+ def on_mode_entered(self, mode):
+ """Save input mode when tabs.mode_on_change = restore."""
+ input_modes = [usertypes.KeyMode.insert, usertypes.KeyMode.passthrough]
+ if (mode in input_modes and
+ config.val.tabs.mode_on_change == 'restore'):
+ tab = self.currentWidget()
+ if tab and tab is not None:
+ tab.data.input_mode = mode
+
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Give focus to current tab if command mode was left.""" | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main tabbed browser widget."""
import functools
import attr
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QTimer, QUrl
from PyQt5.QtGui import QIcon
from qutebrowser.config import config
from qutebrowser.keyinput import modeman
from qutebrowser.mainwindow import tabwidget, mainwindow
from qutebrowser.browser import signalfilter, browsertab
from qutebrowser.utils import (log, usertypes, utils, qtutils, objreg,
urlutils, message, jinja)
@attr.s
class UndoEntry:
"""Information needed for :undo."""
url = attr.ib()
history = attr.ib()
index = attr.ib()
pinned = attr.ib()
class TabDeletedError(Exception):
"""Exception raised when _tab_index is called for a deleted tab."""
class TabbedBrowser(tabwidget.TabWidget):
"""A TabWidget with QWebViews inside.
Provides methods to manage tabs, convenience methods to interact with the
current tab (cur_*) and filters signals to re-emit them when they occurred
in the currently visible tab.
For all tab-specific signals (cur_*) emitted by a tab, this happens:
- the signal gets filtered with _filter_signals and self.cur_* gets
emitted if the signal occurred in the current tab.
Attributes:
search_text/search_options: Search parameters which are shared between
all tabs.
_win_id: The window ID this tabbedbrowser is associated with.
_filter: A SignalFilter instance.
_now_focused: The tab which is focused now.
_tab_insert_idx_left: Where to insert a new tab with
tabs.new_tab_position set to 'prev'.
_tab_insert_idx_right: Same as above, for 'next'.
_undo_stack: List of lists of UndoEntry objects of closed tabs.
shutting_down: Whether we're currently shutting down.
_local_marks: Jump markers local to each page
_global_marks: Jump markers used across all pages
default_window_icon: The qutebrowser window icon
private: Whether private browsing is on for this window.
Signals:
cur_progress: Progress of the current tab changed (load_progress).
cur_load_started: Current tab started loading (load_started)
cur_load_finished: Current tab finished loading (load_finished)
cur_url_changed: Current URL changed.
cur_link_hovered: Link hovered in current tab (link_hovered)
cur_scroll_perc_changed: Scroll percentage of current tab changed.
arg 1: x-position in %.
arg 2: y-position in %.
cur_load_status_changed: Loading status of current tab changed.
close_window: The last tab was closed, close this window.
resized: Emitted when the browser window has resized, so the completion
widget can adjust its size to it.
arg: The new size.
current_tab_changed: The current tab changed to the emitted tab.
new_tab: Emits the new WebView and its index when a new tab is opened.
"""
cur_progress = pyqtSignal(int)
cur_load_started = pyqtSignal()
cur_load_finished = pyqtSignal(bool)
cur_url_changed = pyqtSignal(QUrl)
cur_link_hovered = pyqtSignal(str)
cur_scroll_perc_changed = pyqtSignal(int, int)
cur_load_status_changed = pyqtSignal(str)
cur_fullscreen_requested = pyqtSignal(bool)
close_window = pyqtSignal()
resized = pyqtSignal('QRect')
current_tab_changed = pyqtSignal(browsertab.AbstractTab)
new_tab = pyqtSignal(browsertab.AbstractTab, int)
def __init__(self, *, win_id, private, parent=None):
super().__init__(win_id, parent)
self._win_id = win_id
self._tab_insert_idx_left = 0
self._tab_insert_idx_right = -1
self.shutting_down = False
self.tabCloseRequested.connect(self.on_tab_close_requested)
self.new_tab_requested.connect(self.tabopen)
self.currentChanged.connect(self.on_current_changed)
self.cur_load_started.connect(self.on_cur_load_started)
self.cur_fullscreen_requested.connect(self.tabBar().maybe_hide)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self._undo_stack = []
self._filter = signalfilter.SignalFilter(win_id, self)
self._now_focused = None
self.search_text = None
self.search_options = {}
self._local_marks = {}
self._global_marks = {}
self.default_window_icon = self.window().windowIcon()
self.private = private
config.instance.changed.connect(self._on_config_changed)
def __repr__(self):
return utils.get_repr(self, count=self.count())
@pyqtSlot(str)
def _on_config_changed(self, option):
if option == 'tabs.favicons.show':
self._update_favicons()
elif option == 'window.title_format':
self._update_window_title()
elif option in ['tabs.title.format', 'tabs.title.format_pinned']:
self._update_tab_titles()
def _tab_index(self, tab):
"""Get the index of a given tab.
Raises TabDeletedError if the tab doesn't exist anymore.
"""
try:
idx = self.indexOf(tab)
except RuntimeError as e:
log.webview.debug("Got invalid tab ({})!".format(e))
raise TabDeletedError(e)
if idx == -1:
log.webview.debug("Got invalid tab (index is -1)!")
raise TabDeletedError("index is -1!")
return idx
def widgets(self):
"""Get a list of open tab widgets.
We don't implement this as generator so we can delete tabs while
iterating over the list.
"""
widgets = []
for i in range(self.count()):
widget = self.widget(i)
if widget is None:
log.webview.debug("Got None-widget in tabbedbrowser!")
else:
widgets.append(widget)
return widgets
def _update_window_title(self, field=None):
"""Change the window title to match the current tab.
Args:
idx: The tab index to update.
field: A field name which was updated. If given, the title
is only set if the given field is in the template.
"""
title_format = config.val.window.title_format
if field is not None and ('{' + field + '}') not in title_format:
return
idx = self.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating window title because index is -1")
return
fields = self.get_tab_fields(idx)
fields['id'] = self._win_id
title = title_format.format(**fields)
self.window().setWindowTitle(title)
def _connect_tab_signals(self, tab):
"""Set up the needed signals for tab."""
# filtered signals
tab.link_hovered.connect(
self._filter.create(self.cur_link_hovered, tab))
tab.load_progress.connect(
self._filter.create(self.cur_progress, tab))
tab.load_finished.connect(
self._filter.create(self.cur_load_finished, tab))
tab.load_started.connect(
self._filter.create(self.cur_load_started, tab))
tab.scroller.perc_changed.connect(
self._filter.create(self.cur_scroll_perc_changed, tab))
tab.url_changed.connect(
self._filter.create(self.cur_url_changed, tab))
tab.load_status_changed.connect(
self._filter.create(self.cur_load_status_changed, tab))
tab.fullscreen_requested.connect(
self._filter.create(self.cur_fullscreen_requested, tab))
# misc
tab.scroller.perc_changed.connect(self.on_scroll_pos_changed)
tab.url_changed.connect(
functools.partial(self.on_url_changed, tab))
tab.title_changed.connect(
functools.partial(self.on_title_changed, tab))
tab.icon_changed.connect(
functools.partial(self.on_icon_changed, tab))
tab.load_progress.connect(
functools.partial(self.on_load_progress, tab))
tab.load_finished.connect(
functools.partial(self.on_load_finished, tab))
tab.load_started.connect(
functools.partial(self.on_load_started, tab))
tab.window_close_requested.connect(
functools.partial(self.on_window_close_requested, tab))
tab.renderer_process_terminated.connect(
functools.partial(self._on_renderer_process_terminated, tab))
tab.new_tab_requested.connect(self.tabopen)
if not self.private:
web_history = objreg.get('web-history')
tab.add_history_item.connect(web_history.add_from_tab)
def current_url(self):
"""Get the URL of the current tab.
Intended to be used from command handlers.
Return:
The current URL as QUrl.
"""
idx = self.currentIndex()
return super().tab_url(idx)
def shutdown(self):
"""Try to shut down all tabs cleanly."""
self.shutting_down = True
# Reverse tabs so we don't have to recacluate tab titles over and over
# Removing first causes [2..-1] to be recomputed
# Removing the last causes nothing to be recomputed
for tab in reversed(self.widgets()):
self._remove_tab(tab)
def tab_close_prompt_if_pinned(
self, tab, force, yes_action,
text="Are you sure you want to close a pinned tab?"):
"""Helper method for tab_close.
If tab is pinned, prompt. If not, run yes_action.
If tab is destroyed, abort question.
"""
if tab.data.pinned and not force:
message.confirm_async(
title='Pinned Tab',
text=text,
yes_action=yes_action, default=False, abort_on=[tab.destroyed])
else:
yes_action()
def close_tab(self, tab, *, add_undo=True, new_undo=True):
"""Close a tab.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
"""
last_close = config.val.tabs.last_close
count = self.count()
if last_close == 'ignore' and count == 1:
return
self._remove_tab(tab, add_undo=add_undo, new_undo=new_undo)
if count == 1: # We just closed the last tab above.
if last_close == 'close':
self.close_window.emit()
elif last_close == 'blank':
self.openurl(QUrl('about:blank'), newtab=True)
elif last_close == 'startpage':
for url in config.val.url.start_pages:
self.openurl(url, newtab=True)
elif last_close == 'default-page':
self.openurl(config.val.url.default_page, newtab=True)
def _remove_tab(self, tab, *, add_undo=True, new_undo=True, crashed=False):
"""Remove a tab from the tab list and delete it properly.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
crashed: Whether we're closing a tab with crashed renderer process.
"""
idx = self.indexOf(tab)
if idx == -1:
if crashed:
return
raise TabDeletedError("tab {} is not contained in "
"TabbedWidget!".format(tab))
if tab is self._now_focused:
self._now_focused = None
if tab is objreg.get('last-focused-tab', None, scope='window',
window=self._win_id):
objreg.delete('last-focused-tab', scope='window',
window=self._win_id)
if tab.url().isEmpty():
# There are some good reasons why a URL could be empty
# (target="_blank" with a download, see [1]), so we silently ignore
# this.
# [1] https://github.com/qutebrowser/qutebrowser/issues/163
pass
elif not tab.url().isValid():
# We display a warning for URLs which are not empty but invalid -
# but we don't return here because we want the tab to close either
# way.
urlutils.invalid_url_error(tab.url(), "saving tab")
elif add_undo:
try:
history_data = tab.history.serialize()
except browsertab.WebTabError:
pass # special URL
else:
entry = UndoEntry(tab.url(), history_data, idx,
tab.data.pinned)
if new_undo or not self._undo_stack:
self._undo_stack.append([entry])
else:
self._undo_stack[-1].append(entry)
tab.shutdown()
self.removeTab(idx)
if not crashed:
# WORKAROUND for a segfault when we delete the crashed tab.
# see https://bugreports.qt.io/browse/QTBUG-58698
tab.layout().unwrap()
tab.deleteLater()
def undo(self):
"""Undo removing of a tab or tabs."""
# Remove unused tab which may be created after the last tab is closed
last_close = config.val.tabs.last_close
use_current_tab = False
if last_close in ['blank', 'startpage', 'default-page']:
only_one_tab_open = self.count() == 1
no_history = len(self.widget(0).history) == 1
urls = {
'blank': QUrl('about:blank'),
'startpage': config.val.url.start_pages[0],
'default-page': config.val.url.default_page,
}
first_tab_url = self.widget(0).url()
last_close_urlstr = urls[last_close].toString().rstrip('/')
first_tab_urlstr = first_tab_url.toString().rstrip('/')
last_close_url_used = first_tab_urlstr == last_close_urlstr
use_current_tab = (only_one_tab_open and no_history and
last_close_url_used)
for entry in reversed(self._undo_stack.pop()):
if use_current_tab:
self.openurl(entry.url, newtab=False)
newtab = self.widget(0)
use_current_tab = False
else:
newtab = self.tabopen(entry.url, background=False,
idx=entry.index)
newtab.history.deserialize(entry.history)
self.set_tab_pinned(newtab, entry.pinned)
@pyqtSlot('QUrl', bool)
def openurl(self, url, newtab):
"""Open a URL, used as a slot.
Args:
url: The URL to open as QUrl.
newtab: True to open URL in a new tab, False otherwise.
"""
qtutils.ensure_valid(url)
if newtab or self.currentWidget() is None:
self.tabopen(url, background=False)
else:
self.currentWidget().openurl(url)
@pyqtSlot(int)
def on_tab_close_requested(self, idx):
"""Close a tab via an index."""
tab = self.widget(idx)
if tab is None:
log.webview.debug("Got invalid tab {} for index {}!".format(
tab, idx))
return
self.tab_close_prompt_if_pinned(
tab, False, lambda: self.close_tab(tab))
@pyqtSlot(browsertab.AbstractTab)
def on_window_close_requested(self, widget):
"""Close a tab with a widget given."""
try:
self.close_tab(widget)
except TabDeletedError:
log.webview.debug("Requested to close {!r} which does not "
"exist!".format(widget))
@pyqtSlot('QUrl')
@pyqtSlot('QUrl', bool)
@pyqtSlot('QUrl', bool, bool)
def tabopen(self, url=None, background=None, related=True, idx=None, *,
ignore_tabs_are_windows=False):
"""Open a new tab with a given URL.
Inner logic for open-tab and open-tab-bg.
Also connect all the signals we need to _filter_signals.
Args:
url: The URL to open as QUrl or None for an empty tab.
background: Whether to open the tab in the background.
if None, the `tabs.background_tabs`` setting decides.
related: Whether the tab was opened from another existing tab.
If this is set, the new position might be different. With
the default settings we handle it like Chromium does:
- Tabs from clicked links etc. are to the right of
the current (related=True).
- Explicitly opened tabs are at the very right
(related=False)
idx: The index where the new tab should be opened.
ignore_tabs_are_windows: If given, never open a new window, even
with tabs.tabs_are_windows set.
Return:
The opened WebView instance.
"""
if url is not None:
qtutils.ensure_valid(url)
log.webview.debug("Creating new tab with URL {}, background {}, "
"related {}, idx {}".format(
url, background, related, idx))
if (config.val.tabs.tabs_are_windows and self.count() > 0 and
not ignore_tabs_are_windows):
window = mainwindow.MainWindow(private=self.private)
window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window.win_id)
return tabbed_browser.tabopen(url=url, background=background,
related=related)
tab = browsertab.create(win_id=self._win_id, private=self.private,
parent=self)
self._connect_tab_signals(tab)
if idx is None:
idx = self._get_new_tab_idx(related)
self.insertTab(idx, tab, "")
if url is not None:
tab.openurl(url)
if background is None:
background = config.val.tabs.background
if background:
# Make sure the background tab has the correct initial size.
# With a foreground tab, it's going to be resized correctly by the
# layout anyways.
tab.resize(self.currentWidget().size())
self.tab_index_changed.emit(self.currentIndex(), self.count())
else:
self.setCurrentWidget(tab)
tab.show()
self.new_tab.emit(tab, idx)
return tab
def _get_new_tab_idx(self, related):
"""Get the index of a tab to insert.
Args:
related: Whether the tab was opened from another tab (as a "child")
Return:
The index of the new tab.
"""
if related:
pos = config.val.tabs.new_position.related
else:
pos = config.val.tabs.new_position.unrelated
if pos == 'prev':
idx = self._tab_insert_idx_left
# On first sight, we'd think we have to decrement
# self._tab_insert_idx_left here, as we want the next tab to be
# *before* the one we just opened. However, since we opened a tab
# *before* the currently focused tab, indices will shift by
# 1 automatically.
elif pos == 'next':
idx = self._tab_insert_idx_right
self._tab_insert_idx_right += 1
elif pos == 'first':
idx = 0
elif pos == 'last':
idx = -1
else:
raise ValueError("Invalid tabs.new_position '{}'.".format(pos))
log.webview.debug("tabs.new_position {} -> opening new tab at {}, "
"next left: {} / right: {}".format(
pos, idx, self._tab_insert_idx_left,
self._tab_insert_idx_right))
return idx
def _update_favicons(self):
"""Update favicons when config was changed."""
for i, tab in enumerate(self.widgets()):
if config.val.tabs.favicons.show:
self.setTabIcon(i, tab.icon())
if config.val.tabs.tabs_are_windows:
self.window().setWindowIcon(tab.icon())
else:
self.setTabIcon(i, QIcon())
if config.val.tabs.tabs_are_windows:
self.window().setWindowIcon(self.default_window_icon)
@pyqtSlot()
def on_load_started(self, tab):
"""Clear icon and update title when a tab started loading.
Args:
tab: The tab where the signal belongs to.
"""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self._update_tab_title(idx)
if tab.data.keep_icon:
tab.data.keep_icon = False
else:
self.setTabIcon(idx, QIcon())
if (config.val.tabs.tabs_are_windows and
config.val.tabs.favicons.show):
self.window().setWindowIcon(self.default_window_icon)
if idx == self.currentIndex():
self._update_window_title()
@pyqtSlot()
def on_cur_load_started(self):
"""Leave insert/hint mode when loading started."""
modeman.leave(self._win_id, usertypes.KeyMode.insert, 'load started',
maybe=True)
modeman.leave(self._win_id, usertypes.KeyMode.hint, 'load started',
maybe=True)
@pyqtSlot(browsertab.AbstractTab, str)
def on_title_changed(self, tab, text):
"""Set the title of a tab.
Slot for the title_changed signal of any tab.
Args:
tab: The WebView where the title was changed.
text: The text to set.
"""
if not text:
log.webview.debug("Ignoring title change to '{}'.".format(text))
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
log.webview.debug("Changing title for idx {} to '{}'".format(
idx, text))
self.set_page_title(idx, text)
if idx == self.currentIndex():
self._update_window_title()
@pyqtSlot(browsertab.AbstractTab, QUrl)
def on_url_changed(self, tab, url):
"""Set the new URL as title if there's no title yet.
Args:
tab: The WebView where the title was changed.
url: The new URL.
"""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if not self.page_title(idx):
self.set_page_title(idx, url.toDisplayString())
@pyqtSlot(browsertab.AbstractTab, QIcon)
def on_icon_changed(self, tab, icon):
"""Set the icon of a tab.
Slot for the iconChanged signal of any tab.
Args:
tab: The WebView where the title was changed.
icon: The new icon
"""
if not config.val.tabs.favicons.show:
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.setTabIcon(idx, icon)
if config.val.tabs.tabs_are_windows:
self.window().setWindowIcon(icon)
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Give focus to current tab if command mode was left."""
if mode in [usertypes.KeyMode.command, usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno]:
widget = self.currentWidget()
log.modes.debug("Left status-input mode, focusing {!r}".format(
widget))
if widget is None:
return
widget.setFocus()
@pyqtSlot(int)
def on_current_changed(self, idx):
"""Set last-focused-tab and leave hinting mode when focus changed."""
mode_on_change = config.val.tabs.mode_on_change
modes_to_save = [usertypes.KeyMode.insert,
usertypes.KeyMode.passthrough]
if idx == -1 or self.shutting_down:
# closing the last tab (before quitting) or shutting down
return
tab = self.widget(idx)
if tab is None:
log.webview.debug("on_current_changed got called with invalid "
"index {}".format(idx))
return
if self._now_focused is not None and mode_on_change == 'restore':
current_mode = modeman.instance(self._win_id).mode
if current_mode not in modes_to_save:
current_mode = usertypes.KeyMode.normal
self._now_focused.data.input_mode = current_mode
log.modes.debug("Current tab changed, focusing {!r}".format(tab))
tab.setFocus()
modes_to_leave = [usertypes.KeyMode.hint, usertypes.KeyMode.caret]
if mode_on_change != 'persist':
modes_to_leave += modes_to_save
for mode in modes_to_leave:
modeman.leave(self._win_id, mode, 'tab changed', maybe=True)
if mode_on_change == 'restore':
modeman.enter(self._win_id, tab.data.input_mode,
'restore input mode for tab')
if self._now_focused is not None:
objreg.register('last-focused-tab', self._now_focused, update=True,
scope='window', window=self._win_id)
self._now_focused = tab
self.current_tab_changed.emit(tab)
QTimer.singleShot(0, self._update_window_title)
self._tab_insert_idx_left = self.currentIndex()
self._tab_insert_idx_right = self.currentIndex() + 1
@pyqtSlot()
def on_cmd_return_pressed(self):
"""Set focus when the commandline closes."""
log.modes.debug("Commandline closed, focusing {!r}".format(self))
def on_load_progress(self, tab, perc):
"""Adjust tab indicator on load progress."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
start = config.val.colors.tabs.indicator.start
stop = config.val.colors.tabs.indicator.stop
system = config.val.colors.tabs.indicator.system
color = utils.interpolate_color(start, stop, perc, system)
self.set_tab_indicator_color(idx, color)
self._update_tab_title(idx)
if idx == self.currentIndex():
self._update_window_title()
def on_load_finished(self, tab, ok):
"""Adjust tab indicator when loading finished."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if ok:
start = config.val.colors.tabs.indicator.start
stop = config.val.colors.tabs.indicator.stop
system = config.val.colors.tabs.indicator.system
color = utils.interpolate_color(start, stop, 100, system)
else:
color = config.val.colors.tabs.indicator.error
self.set_tab_indicator_color(idx, color)
self._update_tab_title(idx)
if idx == self.currentIndex():
self._update_window_title()
tab.handle_auto_insert_mode(ok)
@pyqtSlot()
def on_scroll_pos_changed(self):
"""Update tab and window title when scroll position changed."""
idx = self.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating scroll position because index is "
"-1")
return
self._update_window_title('scroll_pos')
self._update_tab_title(idx, 'scroll_pos')
def _on_renderer_process_terminated(self, tab, status, code):
"""Show an error when a renderer process terminated."""
if status == browsertab.TerminationStatus.normal:
return
messages = {
browsertab.TerminationStatus.abnormal:
"Renderer process exited with status {}".format(code),
browsertab.TerminationStatus.crashed:
"Renderer process crashed",
browsertab.TerminationStatus.killed:
"Renderer process was killed",
browsertab.TerminationStatus.unknown:
"Renderer process did not start",
}
msg = messages[status]
def show_error_page(html):
tab.set_html(html)
log.webview.error(msg)
if qtutils.version_check('5.9', compiled=False):
url_string = tab.url(requested=True).toDisplayString()
error_page = jinja.render(
'error.html', title="Error loading {}".format(url_string),
url=url_string, error=msg)
QTimer.singleShot(100, lambda: show_error_page(error_page))
else:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58698
message.error(msg)
self._remove_tab(tab, crashed=True)
if self.count() == 0:
self.tabopen(QUrl('about:blank'))
def resizeEvent(self, e):
"""Extend resizeEvent of QWidget to emit a resized signal afterwards.
Args:
e: The QResizeEvent
"""
super().resizeEvent(e)
self.resized.emit(self.geometry())
def wheelEvent(self, e):
"""Override wheelEvent of QWidget to forward it to the focused tab.
Args:
e: The QWheelEvent
"""
if self._now_focused is not None:
self._now_focused.wheelEvent(e)
else:
e.ignore()
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
# strip the fragment as it may interfere with scrolling
try:
url = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
# show an error only if the mark is not automatically set
if key != "'":
message.error("Failed to set mark: url invalid")
return
point = self.currentWidget().scroller.pos_px()
if key.isupper():
self._global_marks[key] = point, url
else:
if url not in self._local_marks:
self._local_marks[url] = {}
self._local_marks[url][key] = point
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
try:
# consider urls that differ only in fragment to be identical
urlkey = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
urlkey = None
tab = self.currentWidget()
if key.isupper():
if key in self._global_marks:
point, url = self._global_marks[key]
def callback(ok):
"""Scroll once loading finished."""
if ok:
self.cur_load_finished.disconnect(callback)
tab.scroller.to_point(point)
self.openurl(url, newtab=False)
self.cur_load_finished.connect(callback)
else:
message.error("Mark {} is not set".format(key))
elif urlkey is None:
message.error("Current URL is invalid!")
elif urlkey in self._local_marks and key in self._local_marks[urlkey]:
point = self._local_marks[urlkey][key]
# save the pre-jump position in the special ' mark
# this has to happen after we read the mark, otherwise jump_mark
# "'" would just jump to the current position every time
self.set_mark("'")
tab.scroller.to_point(point)
else:
message.error("Mark {} is not set".format(key))
| 1 | 20,732 | This fits on one line without the parentheses :wink: | qutebrowser-qutebrowser | py |
@@ -64,7 +64,7 @@ public class TestCloudRecovery extends SolrCloudTestCase {
@Before
public void beforeTest() throws Exception {
- configureCluster(2)
+ configureCluster(4)
.addConfig("config", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.configure();
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.apache.commons.io.IOUtils;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.cloud.ClusterStateUtil;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.core.SolrCore;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.update.DirectUpdateHandler2;
import org.apache.solr.update.UpdateLog;
import org.apache.solr.update.UpdateShardHandler;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Metric;
import com.codahale.metrics.Timer;
public class TestCloudRecovery extends SolrCloudTestCase {
private static final String COLLECTION = "collection1";
private static boolean onlyLeaderIndexes;
private int nrtReplicas;
private int tlogReplicas;
@BeforeClass
public static void setupCluster() throws Exception {
System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory");
System.setProperty("solr.ulog.numRecordsToKeep", "1000");
}
@Before
public void beforeTest() throws Exception {
configureCluster(2)
.addConfig("config", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.configure();
onlyLeaderIndexes = random().nextBoolean();
nrtReplicas = 2; // onlyLeaderIndexes?0:2;
tlogReplicas = 0; // onlyLeaderIndexes?2:0; TODO: SOLR-12313 tlog replicas break tests because
// TestInjection#waitForInSyncWithLeader is broken
CollectionAdminRequest
.createCollection(COLLECTION, "config", 2, nrtReplicas, tlogReplicas, 0)
.setMaxShardsPerNode(2)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(COLLECTION, 2, 2 * (nrtReplicas + tlogReplicas));
// SOLR-12314 : assert that these values are from the solr.xml file and not UpdateShardHandlerConfig#DEFAULT
for (JettySolrRunner jettySolrRunner : cluster.getJettySolrRunners()) {
UpdateShardHandler shardHandler = jettySolrRunner.getCoreContainer().getUpdateShardHandler();
int socketTimeout = shardHandler.getSocketTimeout();
int connectionTimeout = shardHandler.getConnectionTimeout();
assertEquals(340000, socketTimeout);
assertEquals(45000, connectionTimeout);
}
}
@After
public void afterTest() throws Exception {
shutdownCluster();
}
@Test
// commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018
public void leaderRecoverFromLogOnStartupTest() throws Exception {
AtomicInteger countReplayLog = new AtomicInteger(0);
DirectUpdateHandler2.commitOnClose = false;
UpdateLog.testing_logReplayFinishHook = countReplayLog::incrementAndGet;
CloudSolrClient cloudClient = cluster.getSolrClient();
cloudClient.add(COLLECTION, sdoc("id", "1"));
cloudClient.add(COLLECTION, sdoc("id", "2"));
cloudClient.add(COLLECTION, sdoc("id", "3"));
cloudClient.add(COLLECTION, sdoc("id", "4"));
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("q", "*:*");
QueryResponse resp = cloudClient.query(COLLECTION, params);
assertEquals(0, resp.getResults().getNumFound());
ChaosMonkey.stop(cluster.getJettySolrRunners());
for (JettySolrRunner jettySolrRunner : cluster.getJettySolrRunners()) {
cluster.waitForJettyToStop(jettySolrRunner);
}
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
ChaosMonkey.start(cluster.getJettySolrRunners());
cluster.waitForAllNodes(30);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), COLLECTION, 120000));
resp = cloudClient.query(COLLECTION, params);
assertEquals(4, resp.getResults().getNumFound());
// Make sure all nodes is recover from tlog
if (onlyLeaderIndexes) {
// Leader election can be kicked off, so 2 tlog replicas will replay its tlog before becoming new leader
assertTrue( countReplayLog.get() >=2);
} else {
assertEquals(4, countReplayLog.get());
}
// check metrics
int replicationCount = 0;
int errorsCount = 0;
int skippedCount = 0;
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
List<String> registryNames = manager.registryNames().stream()
.filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
for (String registry : registryNames) {
Map<String, Metric> metrics = manager.registry(registry).getMetrics();
Timer timer = (Timer)metrics.get("REPLICATION.peerSync.time");
Counter counter = (Counter)metrics.get("REPLICATION.peerSync.errors");
Counter skipped = (Counter)metrics.get("REPLICATION.peerSync.skipped");
replicationCount += timer.getCount();
errorsCount += counter.getCount();
skippedCount += skipped.getCount();
}
}
if (onlyLeaderIndexes) {
assertTrue(replicationCount >= 2);
} else {
assertEquals(2, replicationCount);
}
}
@Test
// commented out on: 17-Feb-2019 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
public void corruptedLogTest() throws Exception {
AtomicInteger countReplayLog = new AtomicInteger(0);
DirectUpdateHandler2.commitOnClose = false;
UpdateLog.testing_logReplayFinishHook = countReplayLog::incrementAndGet;
CloudSolrClient cloudClient = cluster.getSolrClient();
cloudClient.add(COLLECTION, sdoc("id", "1000"));
cloudClient.add(COLLECTION, sdoc("id", "1001"));
for (int i = 0; i < 10; i++) {
cloudClient.add(COLLECTION, sdoc("id", String.valueOf(i)));
}
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("q", "*:*");
QueryResponse resp = cloudClient.query(COLLECTION, params);
assertEquals(0, resp.getResults().getNumFound());
int logHeaderSize = Integer.MAX_VALUE;
Map<String, byte[]> contentFiles = new HashMap<>();
for (JettySolrRunner solrRunner : cluster.getJettySolrRunners()) {
for (SolrCore solrCore : solrRunner.getCoreContainer().getCores()) {
File tlogFolder = new File(solrCore.getUlogDir(), UpdateLog.TLOG_NAME);
String[] tLogFiles = tlogFolder.list();
Arrays.sort(tLogFiles);
String lastTLogFile = tlogFolder.getAbsolutePath() + "/" + tLogFiles[tLogFiles.length - 1];
try (FileInputStream inputStream = new FileInputStream(lastTLogFile)){
byte[] tlogBytes = IOUtils.toByteArray(inputStream);
contentFiles.put(lastTLogFile, tlogBytes);
logHeaderSize = Math.min(tlogBytes.length, logHeaderSize);
}
}
}
ChaosMonkey.stop(cluster.getJettySolrRunners());
for (JettySolrRunner j : cluster.getJettySolrRunners()) {
cluster.waitForJettyToStop(j);
}
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
for (Map.Entry<String, byte[]> entry : contentFiles.entrySet()) {
byte[] tlogBytes = entry.getValue();
if (tlogBytes.length <= logHeaderSize) continue;
try (FileOutputStream stream = new FileOutputStream(entry.getKey())) {
int skipLastBytes = Math.max(random().nextInt(tlogBytes.length - logHeaderSize)-2, 2);
for (int i = 0; i < entry.getValue().length - skipLastBytes; i++) {
stream.write(tlogBytes[i]);
}
}
}
ChaosMonkey.start(cluster.getJettySolrRunners());
cluster.waitForAllNodes(30);
Thread.sleep(1000);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), COLLECTION, 120000));
cluster.waitForActiveCollection(COLLECTION, 2, 2 * (nrtReplicas + tlogReplicas));
cloudClient.getZkStateReader().forceUpdateCollection(COLLECTION);
resp = cloudClient.query(COLLECTION, params);
// Make sure cluster still healthy
// TODO: AwaitsFix - this will fail under test beasting
// assertTrue(resp.toString(), resp.getResults().getNumFound() >= 2);
}
}
| 1 | 29,215 | Looks like `TestCloudRecovery` changes are unrelated? | apache-lucene-solr | java |
@@ -10,16 +10,14 @@ declare(strict_types = 1);
namespace Ergonode\Attribute\Persistence\Dbal\Projector\Attribute;
use Doctrine\DBAL\Connection;
+use Doctrine\DBAL\DBALException;
use Ergonode\Attribute\Domain\Event\Attribute\AttributeHintChangedEvent;
use Ergonode\Core\Domain\Entity\AbstractId;
-use Ergonode\EventSourcing\Infrastructure\DomainEventInterface;
-use Ergonode\EventSourcing\Infrastructure\Exception\UnsupportedEventException;
-use Ergonode\EventSourcing\Infrastructure\Projector\DomainEventProjectorInterface;
use Ramsey\Uuid\Uuid;
/**
*/
-class AttributeHintChangedEventProjector implements DomainEventProjectorInterface
+class AttributeHintChangedEventProjector
{
private const TABLE = 'value_translation';
| 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types = 1);
namespace Ergonode\Attribute\Persistence\Dbal\Projector\Attribute;
use Doctrine\DBAL\Connection;
use Ergonode\Attribute\Domain\Event\Attribute\AttributeHintChangedEvent;
use Ergonode\Core\Domain\Entity\AbstractId;
use Ergonode\EventSourcing\Infrastructure\DomainEventInterface;
use Ergonode\EventSourcing\Infrastructure\Exception\UnsupportedEventException;
use Ergonode\EventSourcing\Infrastructure\Projector\DomainEventProjectorInterface;
use Ramsey\Uuid\Uuid;
/**
*/
class AttributeHintChangedEventProjector implements DomainEventProjectorInterface
{
private const TABLE = 'value_translation';
/**
* @var Connection
*/
private $connection;
/**
* @param Connection $connection
*/
public function __construct(Connection $connection)
{
$this->connection = $connection;
}
/**
* {@inheritDoc}
*/
public function supports(DomainEventInterface $event): bool
{
return $event instanceof AttributeHintChangedEvent;
}
/**
* {@inheritDoc}
*
* @throws \Throwable
*/
public function projection(AbstractId $aggregateId, DomainEventInterface $event): void
{
if (!$this->supports($event)) {
throw new UnsupportedEventException($event, AttributeHintChangedEvent::class);
}
$this->connection->transactional(function () use ($aggregateId, $event) {
$from = $event->getFrom()->getTranslations();
$to = $event->getTo()->getTranslations();
foreach ($to as $language => $value) {
$result = $this->connection->update(
self::TABLE,
[
'language' => $language,
'value' => $value,
],
[
'value_id' => $this->getTranslationId('hint', $aggregateId),
'language' => $language,
]
);
if (!$result) {
$this->connection->insert(
self::TABLE,
[
'id' => Uuid::uuid4()->toString(),
'value_id' => $this->getTranslationId('hint', $aggregateId),
'language' => $language,
'value' => $value,
]
);
}
}
foreach ($from as $language => $value) {
if (!isset($to[$language])) {
$this->connection->delete(
self::TABLE,
[
'value_id' => $this->getTranslationId('hint', $aggregateId),
'language' => $language,
]
);
}
}
});
}
/**
* @param string $field
* @param AbstractId $attributeId
*
* @return string
*/
private function getTranslationId(string $field, AbstractId $attributeId): string
{
$qb = $this->connection->createQueryBuilder();
return $qb->select($field)
->from('attribute')
->where($qb->expr()->eq('id', ':id'))
->setParameter(':id', $attributeId->getValue())
->execute()
->fetch(\PDO::FETCH_COLUMN);
}
}
| 1 | 8,461 | Try to separate it to different methods. Invoke is huge :) | ergonode-backend | php |
@@ -16,7 +16,7 @@
</thead>
<tbody>
<% scope.each do |plan| %>
- <tr>
+ <tr id="<%= dom_id(plan) %>">
<td>
<%= link_to "#{plan.title.length > 60 ? "#{plan.title[0..59]} ..." : plan.title}",
plan_path(plan) %> | 1 | <div class="table-responsive">
<table class="table table-hover" id="my-plans">
<thead>
<tr>
<th scope="col"><%= _('Project Title') %> <%= paginable_sort_link('plans.title') %></th>
<th scope="col"><%= _('Template') %> <%= paginable_sort_link('templates.title') %></th>
<th scope="col"><%= _('Edited') %> <%= paginable_sort_link('plans.updated_at') %></th>
<th scope="col"><%= _('Role') %></th>
<th scope="col" class="text-center sorter-false"><%= _('Test') %></th>
<th scope="col" ><%= _('Visibility') %></th>
<th scope="col" ><%= _('Shared') %></th>
<th scope="col" class="sorter-false">
<span aria-hidden="false" class="sr-only"><%= _('Actions') %></span>
</th>
</tr>
</thead>
<tbody>
<% scope.each do |plan| %>
<tr>
<td>
<%= link_to "#{plan.title.length > 60 ? "#{plan.title[0..59]} ..." : plan.title}",
plan_path(plan) %>
</td>
<td><%= plan.template.title %></td>
<td><%= l(plan.updated_at.to_date, formats: :short) %></td>
<td><%= display_role(plan.roles.find_by(user: current_user)) %></td>
<td class="text-center">
<% if plan.administerable_by?(current_user.id) then %>
<%= form_for plan, url: set_test_plan_path(plan), html: { method: :post, class: 'set_test_plan', remote: true } do |f| %>
<%= check_box_tag(:is_test, "1", (plan.visibility === 'is_test')) %>
<%= f.submit(_('Update'), style: 'display:none') %>
<% end %>
<% else %>
<%= plan.visibility === 'is_test' ? _('Yes') : _('No') %>
<% end %>
</td>
<td class="plan-visibility">
<%= plan.visibility === 'is_test' ? _('N/A') : sanitize(display_visibility(plan.visibility)) %>
</td>
<td>
<% if plan.shared? %>
<%= _("Yes") %>
<% else %>
<%= _('No') %>
<% end %>
</td>
<td>
<div class="dropdown">
<button class="btn btn-link dropdown-toggle" type="button"
id="plan-<%= plan.id %>-actions" data-toggle="dropdown"
aria-haspopup="true" aria-expanded="true">
<%= _('Actions') %><span class="caret"></span>
</button>
<ul class="dropdown-menu" aria-labelledby="plan-<%= plan.id %>-actions">
<% if plan.editable_by?(current_user.id) then %>
<li><%= link_to _('Edit'), plan_path(plan) %></li>
<% if plan.administerable_by?(current_user.id) %>
<li><%= link_to _('Share'), share_plan_path(plan) %></li>
<% end %>
<li><%= link_to _('Download'), download_plan_path(plan) %></li>
<li><%= link_to(_('Make a copy'), duplicate_plan_path(plan), data: { method: :post }) %></li>
<% else %>
<li><%= link_to _('View'), plan_path(plan) %></li>
<% if !plan.reviewable_by?(current_user.id) %>
<li><%= link_to _('Download'), download_plan_path(plan) %></li>
<% end %>
<% end %>
<% role = plan.roles.where(user_id: current_user.id).first %>
<% conf = (role.creator? && plan.publicly_visible?) ? _("Are you sure you wish to remove this public plan? This will remove it from the Public DMPs page but any collaborators will still be able to access it.") : _("Are you sure you wish to remove this plan? Any collaborators will still be able to access it.") %>
<li><%= link_to _('Remove'), deactivate_role_path(role), 'data-method': 'put', rel: 'nofollow', 'data-confirm': conf %></li>
</ul>
</div>
</td>
</tr>
<% end %>
</tbody>
</table>
</div> | 1 | 18,073 | Looks like another spot that would benefit from `truncate` | DMPRoadmap-roadmap | rb |
@@ -272,7 +272,7 @@ static mrb_value build_constants(mrb_state *mrb, const char *server_name, size_t
mrb_ary_set(mrb, ary, i, lit);
}
for (; i != H2O_MAX_TOKENS * 2; ++i) {
- const h2o_token_t *token = h2o__tokens + i - H2O_MAX_TOKENS;
+ const h2o_token_t *token = h2o__tokens + (i - H2O_MAX_TOKENS);
mrb_value lit = mrb_nil_value();
if (token == H2O_TOKEN_CONTENT_TYPE) {
lit = mrb_str_new_lit(mrb, "CONTENT_TYPE"); | 1 | /*
* Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Ryosuke Matsumoto,
* Masayoshi Takahashi
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <errno.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <mruby.h>
#include <mruby/proc.h>
#include <mruby/array.h>
#include <mruby/class.h>
#include <mruby/compile.h>
#include <mruby/error.h>
#include <mruby/hash.h>
#include <mruby/opcode.h>
#include <mruby/string.h>
#include <mruby/throw.h>
#include <mruby/variable.h>
#include <mruby_input_stream.h>
#include "h2o.h"
#include "h2o/mruby_.h"
#include "mruby/embedded.c.h"
#define STATUS_FALLTHRU 399
#define FALLTHRU_SET_PREFIX "x-fallthru-set-"
#define FREEZE_STRING(v) MRB_SET_FROZEN_FLAG(mrb_obj_ptr(v))
void h2o_mruby__abort_exc(mrb_state *mrb, const char *mess, const char *file, int line)
{
h2o__fatal(file, line, "%s:%s\n", mess, RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc))));
}
mrb_value h2o_mruby__new_str(mrb_state *mrb, const char *s, size_t len, int is_static, const char *file, int line)
{
if (mrb->exc != NULL)
h2o_mruby__abort_exc(mrb, "h2o_mruby_new_str:precondition failure", file, line);
mrb_value ret = is_static ? mrb_str_new_static(mrb, s, len) : mrb_str_new(mrb, s, len);
if (mrb->exc != NULL)
h2o_mruby__abort_exc(mrb, "h2o_mruby_new_str:failed to create string", file, line);
return ret;
}
static void on_gc_dispose_generator(mrb_state *mrb, void *_generator)
{
h2o_mruby_generator_t *generator = _generator;
if (generator == NULL)
return;
generator->refs.generator = mrb_nil_value();
}
static void on_gc_dispose_error_stream(mrb_state *mrb, void *_error_stream)
{
h2o_mruby_error_stream_t *error_stream = _error_stream;
if (error_stream == NULL)
return;
if (error_stream->generator != NULL) {
error_stream->generator->error_stream = NULL;
error_stream->generator->refs.error_stream = mrb_nil_value();
}
free(error_stream);
}
const static struct mrb_data_type generator_type = {"generator", on_gc_dispose_generator};
const static struct mrb_data_type error_stream_type = {"error_stream", on_gc_dispose_error_stream};
h2o_mruby_generator_t *h2o_mruby_get_generator(mrb_state *mrb, mrb_value obj)
{
h2o_mruby_generator_t *generator = mrb_data_check_get_ptr(mrb, obj, &generator_type);
return generator;
}
h2o_mruby_error_stream_t *h2o_mruby_get_error_stream(mrb_state *mrb, mrb_value obj)
{
h2o_mruby_error_stream_t *error_stream = mrb_data_check_get_ptr(mrb, obj, &error_stream_type);
return error_stream;
}
void h2o_mruby_setup_globals(mrb_state *mrb)
{
const char *root = getenv("H2O_ROOT");
if (root == NULL)
root = H2O_TO_STR(H2O_ROOT);
mrb_gv_set(mrb, mrb_intern_lit(mrb, "$H2O_ROOT"), h2o_mruby_new_str(mrb, root, strlen(root)));
h2o_mruby_eval_expr(mrb, "$LOAD_PATH << \"#{$H2O_ROOT}/share/h2o/mruby\"");
h2o_mruby_assert(mrb);
/* require core modules and include built-in libraries */
h2o_mruby_eval_expr(mrb, "require \"#{$H2O_ROOT}/share/h2o/mruby/preloads.rb\"");
if (mrb->exc != NULL) {
const char *msg = "";
if (mrb_obj_is_instance_of(mrb, mrb_obj_value(mrb->exc), mrb_class_get(mrb, "LoadError"))) {
msg = "Did you forget to run `make install`?\n";
}
h2o_fatal("an error occurred while loading %s/%s: %s\n%s", root, "share/h2o/mruby/preloads.rb",
RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc))), msg);
}
}
mrb_value h2o_mruby_to_str(mrb_state *mrb, mrb_value v)
{
if (!mrb_string_p(v))
H2O_MRUBY_EXEC_GUARD({ v = mrb_str_to_str(mrb, v); });
return v;
}
mrb_value h2o_mruby_to_int(mrb_state *mrb, mrb_value v)
{
H2O_MRUBY_EXEC_GUARD({ v = mrb_Integer(mrb, v); });
return v;
}
mrb_value h2o_mruby_eval_expr(mrb_state *mrb, const char *expr)
{
return mrb_funcall(mrb, mrb_top_self(mrb), "eval", 1, mrb_str_new_cstr(mrb, expr));
}
mrb_value h2o_mruby_eval_expr_location(mrb_state *mrb, const char *expr, const char *path, const int lineno)
{
return mrb_funcall(mrb, mrb_top_self(mrb), "eval", 4, mrb_str_new_cstr(mrb, expr), mrb_nil_value(), mrb_str_new_cstr(mrb, path),
mrb_fixnum_value(lineno));
}
void h2o_mruby_define_callback(mrb_state *mrb, const char *name, h2o_mruby_callback_t callback)
{
h2o_mruby_shared_context_t *shared_ctx = mrb->ud;
h2o_vector_reserve(NULL, &shared_ctx->callbacks, shared_ctx->callbacks.size + 1);
shared_ctx->callbacks.entries[shared_ctx->callbacks.size++] = callback;
mrb_value args[2];
args[0] = mrb_str_new_cstr(mrb, name);
args[1] = mrb_fixnum_value(-(int)shared_ctx->callbacks.size);
mrb_funcall_argv(mrb, mrb_top_self(mrb), mrb_intern_lit(mrb, "_h2o_define_callback"), 2, args);
if (mrb->exc != NULL) {
h2o_error_printf("failed to define mruby function: %s\n", name);
h2o_mruby_assert(mrb);
}
}
mrb_value h2o_mruby_create_data_instance(mrb_state *mrb, mrb_value class_obj, void *ptr, const mrb_data_type *type)
{
struct RClass *klass = mrb_class_ptr(class_obj);
struct RData *data = mrb_data_object_alloc(mrb, klass, ptr, type);
return mrb_obj_value(data);
}
struct RProc *h2o_mruby_compile_code(mrb_state *mrb, h2o_mruby_config_vars_t *config, char *errbuf)
{
mrbc_context *cxt;
struct mrb_parser_state *parser;
struct RProc *proc = NULL;
/* parse */
if ((cxt = mrbc_context_new(mrb)) == NULL) {
h2o_fatal("%s: no memory\n", H2O_MRUBY_MODULE_NAME);
}
if (config->path != NULL)
mrbc_filename(mrb, cxt, config->path);
cxt->capture_errors = 1;
cxt->lineno = config->lineno;
if ((parser = mrb_parse_nstring(mrb, config->source.base, (int)config->source.len, cxt)) == NULL) {
h2o_fatal("%s: no memory\n", H2O_MRUBY_MODULE_NAME);
}
/* return erro if errbuf is supplied, or abort */
if (parser->nerr != 0) {
if (errbuf == NULL) {
h2o_fatal("%s: internal error (unexpected state)\n", H2O_MRUBY_MODULE_NAME);
}
snprintf(errbuf, 256, "line %d:%s", parser->error_buffer[0].lineno, parser->error_buffer[0].message);
strcat(errbuf, "\n\n");
if (h2o_str_at_position(errbuf + strlen(errbuf), config->source.base, config->source.len,
parser->error_buffer[0].lineno - config->lineno + 1, parser->error_buffer[0].column) != 0) {
/* remove trailing "\n\n" in case we failed to append the source code at the error location */
errbuf[strlen(errbuf) - 2] = '\0';
}
goto Exit;
}
/* generate code */
if ((proc = mrb_generate_code(mrb, parser)) == NULL) {
h2o_fatal("%s: internal error (mrb_generate_code failed)\n", H2O_MRUBY_MODULE_NAME);
}
Exit:
mrb_parser_free(parser);
mrbc_context_free(mrb, cxt);
return proc;
}
static h2o_iovec_t convert_header_name_to_env(h2o_mem_pool_t *pool, const char *name, size_t len)
{
#define KEY_PREFIX "HTTP_"
#define KEY_PREFIX_LEN (sizeof(KEY_PREFIX) - 1)
h2o_iovec_t ret;
ret.len = len + KEY_PREFIX_LEN;
ret.base = h2o_mem_alloc_pool(pool, char, ret.len);
memcpy(ret.base, KEY_PREFIX, KEY_PREFIX_LEN);
char *d = ret.base + KEY_PREFIX_LEN;
for (; len != 0; ++name, --len)
*d++ = *name == '-' ? '_' : h2o_toupper(*name);
return ret;
#undef KEY_PREFIX
#undef KEY_PREFIX_LEN
}
static int handle_early_hints_header(h2o_mruby_shared_context_t *shared_ctx, h2o_iovec_t *name, h2o_iovec_t value, void *_req)
{
h2o_req_t *req = _req;
h2o_add_header_by_str(&req->pool, &req->res.headers, name->base, name->len, 1, NULL, value.base, value.len);
return 0;
}
mrb_value send_early_hints_proc(mrb_state *mrb, mrb_value self)
{
mrb_value headers;
mrb_get_args(mrb, "H", &headers);
h2o_mruby_generator_t *generator = h2o_mruby_get_generator(mrb, mrb_proc_cfunc_env_get(mrb, 0));
if (generator == NULL)
return mrb_nil_value();
if (h2o_mruby_iterate_rack_headers(mrb->ud, headers, handle_early_hints_header, generator->req) == -1)
mrb_exc_raise(mrb, mrb_obj_value(mrb->exc));
generator->req->res.status = 103;
h2o_send_informational(generator->req);
return mrb_nil_value();
}
static mrb_value build_constants(mrb_state *mrb, const char *server_name, size_t server_name_len)
{
mrb_value ary = mrb_ary_new_capa(mrb, H2O_MRUBY_NUM_CONSTANTS);
mrb_int i;
int gc_arena = mrb_gc_arena_save(mrb);
{
h2o_mem_pool_t pool;
h2o_mem_init_pool(&pool);
for (i = 0; i != H2O_MAX_TOKENS; ++i) {
const h2o_token_t *token = h2o__tokens + i;
if (token->buf.len == 0)
continue;
mrb_value lit = h2o_mruby_new_str(mrb, token->buf.base, token->buf.len);
FREEZE_STRING(lit);
mrb_ary_set(mrb, ary, i, lit);
}
for (; i != H2O_MAX_TOKENS * 2; ++i) {
const h2o_token_t *token = h2o__tokens + i - H2O_MAX_TOKENS;
mrb_value lit = mrb_nil_value();
if (token == H2O_TOKEN_CONTENT_TYPE) {
lit = mrb_str_new_lit(mrb, "CONTENT_TYPE");
} else if (token->buf.len != 0) {
h2o_iovec_t n = convert_header_name_to_env(&pool, token->buf.base, token->buf.len);
lit = h2o_mruby_new_str(mrb, n.base, n.len);
}
if (mrb_string_p(lit)) {
FREEZE_STRING(lit);
mrb_ary_set(mrb, ary, i, lit);
}
}
h2o_mem_clear_pool(&pool);
}
#define SET_STRING(idx, value) \
do { \
mrb_value lit = (value); \
FREEZE_STRING(lit); \
mrb_ary_set(mrb, ary, idx, lit); \
} while (0)
#define SET_LITERAL(idx, str) SET_STRING(idx, mrb_str_new_lit(mrb, str))
SET_LITERAL(H2O_MRUBY_LIT_REQUEST_METHOD, "REQUEST_METHOD");
SET_LITERAL(H2O_MRUBY_LIT_SCRIPT_NAME, "SCRIPT_NAME");
SET_LITERAL(H2O_MRUBY_LIT_PATH_INFO, "PATH_INFO");
SET_LITERAL(H2O_MRUBY_LIT_QUERY_STRING, "QUERY_STRING");
SET_LITERAL(H2O_MRUBY_LIT_SERVER_NAME, "SERVER_NAME");
SET_LITERAL(H2O_MRUBY_LIT_SERVER_ADDR, "SERVER_ADDR");
SET_LITERAL(H2O_MRUBY_LIT_SERVER_PORT, "SERVER_PORT");
SET_LITERAL(H2O_MRUBY_LIT_SERVER_PROTOCOL, "SERVER_PROTOCOL");
SET_LITERAL(H2O_MRUBY_LIT_CONTENT_LENGTH, "CONTENT_LENGTH");
SET_LITERAL(H2O_MRUBY_LIT_REMOTE_ADDR, "REMOTE_ADDR");
SET_LITERAL(H2O_MRUBY_LIT_REMOTE_PORT, "REMOTE_PORT");
SET_LITERAL(H2O_MRUBY_LIT_REMOTE_USER, "REMOTE_USER");
SET_LITERAL(H2O_MRUBY_LIT_RACK_URL_SCHEME, "rack.url_scheme");
SET_LITERAL(H2O_MRUBY_LIT_RACK_MULTITHREAD, "rack.multithread");
SET_LITERAL(H2O_MRUBY_LIT_RACK_MULTIPROCESS, "rack.multiprocess");
SET_LITERAL(H2O_MRUBY_LIT_RACK_RUN_ONCE, "rack.run_once");
SET_LITERAL(H2O_MRUBY_LIT_RACK_HIJACK_, "rack.hijack?");
SET_LITERAL(H2O_MRUBY_LIT_RACK_INPUT, "rack.input");
SET_LITERAL(H2O_MRUBY_LIT_RACK_ERRORS, "rack.errors");
SET_LITERAL(H2O_MRUBY_LIT_RACK_EARLY_HINTS, "rack.early_hints");
SET_LITERAL(H2O_MRUBY_LIT_SERVER_SOFTWARE, "SERVER_SOFTWARE");
SET_LITERAL(H2O_MRUBY_LIT_H2O_REMAINING_DELEGATIONS, "h2o.remaining_delegations");
SET_LITERAL(H2O_MRUBY_LIT_H2O_REMAINING_REPROCESSES, "h2o.remaining_reprocesses");
SET_STRING(H2O_MRUBY_LIT_SERVER_SOFTWARE_VALUE, h2o_mruby_new_str(mrb, server_name, server_name_len));
#undef SET_LITERAL
#undef SET_STRING
h2o_mruby_eval_expr_location(mrb, H2O_MRUBY_CODE_CORE, "(h2o)lib/handler/mruby/embedded/core.rb", 1);
h2o_mruby_assert(mrb);
mrb_ary_set(mrb, ary, H2O_MRUBY_PROC_EACH_TO_ARRAY,
mrb_funcall(mrb, mrb_obj_value(mrb->kernel_module), "_h2o_proc_each_to_array", 0));
h2o_mruby_assert(mrb);
mrb_gc_arena_restore(mrb, gc_arena);
return ary;
}
static void handle_exception(h2o_mruby_context_t *ctx, h2o_mruby_generator_t *generator)
{
mrb_state *mrb = ctx->shared->mrb;
assert(mrb->exc != NULL);
if (generator == NULL || generator->req->_generator != NULL) {
h2o_error_printf("mruby raised: %s\n", RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc))));
} else {
h2o_req_log_error(generator->req, H2O_MRUBY_MODULE_NAME, "mruby raised: %s\n",
RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc))));
h2o_send_error_500(generator->req, "Internal Server Error", "Internal Server Error", 0);
}
mrb->exc = NULL;
}
mrb_value send_error_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again)
{
mrb_state *mrb = ctx->shared->mrb;
mrb->exc = mrb_obj_ptr(mrb_ary_entry(args, 0));
h2o_mruby_generator_t *generator = h2o_mruby_get_generator(mrb, mrb_ary_entry(args, 1));
handle_exception(ctx, generator);
return mrb_nil_value();
}
mrb_value block_request_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again)
{
mrb_state *mrb = ctx->shared->mrb;
mrb_value blocking_req = mrb_ary_new_capa(mrb, 2);
mrb_ary_set(mrb, blocking_req, 0, ctx->proc);
mrb_ary_set(mrb, blocking_req, 1, input);
mrb_ary_push(mrb, ctx->blocking_reqs, blocking_req);
return mrb_nil_value();
}
mrb_value run_blocking_requests_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args,
int *run_again)
{
mrb_state *mrb = ctx->shared->mrb;
mrb_value exc = mrb_ary_entry(args, 0);
if (!mrb_nil_p(exc)) {
mrb->exc = mrb_obj_ptr(exc);
handle_exception(ctx, NULL);
}
mrb_int i;
mrb_int len = RARRAY_LEN(ctx->blocking_reqs);
for (i = 0; i != len; ++i) {
mrb_value blocking_req = mrb_ary_entry(ctx->blocking_reqs, i);
mrb_value blocking_req_resumer = mrb_ary_entry(blocking_req, 0);
mrb_value blocking_req_input = mrb_ary_entry(blocking_req, 1);
h2o_mruby_run_fiber(ctx, blocking_req_resumer, blocking_req_input, NULL);
}
mrb_ary_clear(mrb, ctx->blocking_reqs);
return mrb_nil_value();
}
mrb_value run_child_fiber_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again)
{
mrb_state *mrb = ctx->shared->mrb;
mrb_value resumer = mrb_ary_entry(args, 0);
/*
* swap receiver to run child fiber immediately, while storing main fiber resumer
* which will be called after the child fiber is yielded
*/
mrb_ary_push(mrb, ctx->resumers, *receiver);
*receiver = resumer;
*run_again = 1;
return mrb_nil_value();
}
mrb_value finish_child_fiber_callback(h2o_mruby_context_t *ctx, mrb_value input, mrb_value *receiver, mrb_value args,
int *run_again)
{
/* do nothing */
return mrb_nil_value();
}
static mrb_value error_stream_write(mrb_state *mrb, mrb_value self)
{
h2o_mruby_error_stream_t *error_stream;
if ((error_stream = h2o_mruby_get_error_stream(mrb, self)) == NULL) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "ErrorStream#write wrong self");
}
mrb_value msgstr;
mrb_get_args(mrb, "o", &msgstr);
msgstr = h2o_mruby_to_str(mrb, msgstr);
h2o_iovec_t msg = h2o_iovec_init(RSTRING_PTR(msgstr), RSTRING_LEN(msgstr));
if (error_stream->generator != NULL) {
h2o_req_t *req = error_stream->generator->req;
req->error_log_delegate.cb(req->error_log_delegate.data, h2o_iovec_init(NULL, 0), msg);
} else if (error_stream->ctx->handler->pathconf->error_log.emit_request_errors) {
h2o_write_error_log(h2o_iovec_init(NULL, 0), msg);
}
return mrb_fixnum_value(msg.len);
}
static h2o_mruby_shared_context_t *create_shared_context(h2o_context_t *ctx)
{
/* init mruby in every thread */
h2o_mruby_shared_context_t *shared_ctx = h2o_mem_alloc(sizeof(*shared_ctx));
if ((shared_ctx->mrb = mrb_open()) == NULL) {
h2o_fatal("%s: no memory\n", H2O_MRUBY_MODULE_NAME);
}
shared_ctx->mrb->ud = shared_ctx;
shared_ctx->ctx = ctx;
shared_ctx->current_context = NULL;
shared_ctx->callbacks = (h2o_mruby_callbacks_t){NULL};
h2o_mruby_setup_globals(shared_ctx->mrb);
shared_ctx->constants = build_constants(shared_ctx->mrb, ctx->globalconf->server_name.base, ctx->globalconf->server_name.len);
shared_ctx->symbols.sym_call = mrb_intern_lit(shared_ctx->mrb, "call");
shared_ctx->symbols.sym_close = mrb_intern_lit(shared_ctx->mrb, "close");
shared_ctx->symbols.sym_method = mrb_intern_lit(shared_ctx->mrb, "method");
shared_ctx->symbols.sym_headers = mrb_intern_lit(shared_ctx->mrb, "headers");
shared_ctx->symbols.sym_body = mrb_intern_lit(shared_ctx->mrb, "body");
shared_ctx->symbols.sym_async = mrb_intern_lit(shared_ctx->mrb, "async");
h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__send_error", send_error_callback);
h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__block_request", block_request_callback);
h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__run_blocking_requests", run_blocking_requests_callback);
h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__run_child_fiber", run_child_fiber_callback);
h2o_mruby_define_callback(shared_ctx->mrb, "_h2o__finish_child_fiber", finish_child_fiber_callback);
h2o_mruby_sender_init_context(shared_ctx);
h2o_mruby_http_request_init_context(shared_ctx);
h2o_mruby_redis_init_context(shared_ctx);
h2o_mruby_sleep_init_context(shared_ctx);
h2o_mruby_middleware_init_context(shared_ctx);
h2o_mruby_channel_init_context(shared_ctx);
struct RClass *module = mrb_define_module(shared_ctx->mrb, "H2O");
mrb_ary_set(shared_ctx->mrb, shared_ctx->constants, H2O_MRUBY_H2O_MODULE, mrb_obj_value(module));
struct RClass *generator_klass = mrb_define_class_under(shared_ctx->mrb, module, "Generator", shared_ctx->mrb->object_class);
mrb_ary_set(shared_ctx->mrb, shared_ctx->constants, H2O_MRUBY_GENERATOR_CLASS, mrb_obj_value(generator_klass));
struct RClass *error_stream_class = mrb_class_get_under(shared_ctx->mrb, module, "ErrorStream");
mrb_ary_set(shared_ctx->mrb, shared_ctx->constants, H2O_MRUBY_ERROR_STREAM_CLASS, mrb_obj_value(error_stream_class));
mrb_define_method(shared_ctx->mrb, error_stream_class, "write", error_stream_write, MRB_ARGS_REQ(1));
return shared_ctx;
}
static void dispose_shared_context(void *data)
{
if (data == NULL)
return;
h2o_mruby_shared_context_t *shared_ctx = (h2o_mruby_shared_context_t *)data;
mrb_close(shared_ctx->mrb);
free(shared_ctx);
}
static h2o_mruby_shared_context_t *get_shared_context(h2o_context_t *ctx)
{
static size_t key = SIZE_MAX;
void **data = h2o_context_get_storage(ctx, &key, dispose_shared_context);
if (*data == NULL) {
*data = create_shared_context(ctx);
}
return *data;
}
mrb_value prepare_fibers(h2o_mruby_context_t *ctx)
{
mrb_state *mrb = ctx->shared->mrb;
h2o_mruby_config_vars_t config = ctx->handler->config;
mrb_value conf = mrb_hash_new_capa(mrb, 3);
mrb_hash_set(mrb, conf, mrb_symbol_value(mrb_intern_lit(mrb, "code")),
h2o_mruby_new_str(mrb, config.source.base, config.source.len));
mrb_hash_set(mrb, conf, mrb_symbol_value(mrb_intern_lit(mrb, "file")),
h2o_mruby_new_str(mrb, config.path, strlen(config.path)));
mrb_hash_set(mrb, conf, mrb_symbol_value(mrb_intern_lit(mrb, "line")), mrb_fixnum_value(config.lineno));
/* run code and generate handler */
mrb_value result = mrb_funcall(mrb, mrb_obj_value(mrb->kernel_module), "_h2o_prepare_app", 1, conf);
h2o_mruby_assert(mrb);
assert(mrb_array_p(result));
return result;
}
static void on_context_init(h2o_handler_t *_handler, h2o_context_t *ctx)
{
h2o_mruby_handler_t *handler = (void *)_handler;
h2o_mruby_context_t *handler_ctx = h2o_mem_alloc(sizeof(*handler_ctx));
handler_ctx->handler = handler;
handler_ctx->shared = get_shared_context(ctx);
mrb_state *mrb = handler_ctx->shared->mrb;
handler_ctx->blocking_reqs = mrb_ary_new(mrb);
handler_ctx->resumers = mrb_ary_new(mrb);
/* compile code (must be done for each thread) */
int arena = mrb_gc_arena_save(mrb);
mrb_value fibers = prepare_fibers(handler_ctx);
assert(mrb_array_p(fibers));
handler_ctx->proc = mrb_ary_entry(fibers, 0);
/* run configurator */
mrb_value configurator = mrb_ary_entry(fibers, 1);
h2o_mruby_run_fiber(handler_ctx, configurator, mrb_nil_value(), NULL);
h2o_mruby_assert(handler_ctx->shared->mrb);
mrb_gc_arena_restore(mrb, arena);
mrb_gc_protect(mrb, handler_ctx->proc);
mrb_gc_protect(mrb, configurator);
h2o_context_set_handler_context(ctx, &handler->super, handler_ctx);
}
static void on_context_dispose(h2o_handler_t *_handler, h2o_context_t *ctx)
{
h2o_mruby_handler_t *handler = (void *)_handler;
h2o_mruby_context_t *handler_ctx = h2o_context_get_handler_context(ctx, &handler->super);
if (handler_ctx == NULL)
return;
free(handler_ctx);
}
static void on_handler_dispose(h2o_handler_t *_handler)
{
h2o_mruby_handler_t *handler = (void *)_handler;
free(handler->config.source.base);
free(handler->config.path);
free(handler);
}
static void stringify_address(h2o_conn_t *conn, socklen_t (*cb)(h2o_conn_t *conn, struct sockaddr *), mrb_state *mrb,
mrb_value *host, mrb_value *port)
{
struct sockaddr_storage ss;
socklen_t sslen;
char buf[NI_MAXHOST];
*host = mrb_nil_value();
*port = mrb_nil_value();
if ((sslen = cb(conn, (void *)&ss)) == 0)
return;
size_t l = h2o_socket_getnumerichost((void *)&ss, sslen, buf);
if (l != SIZE_MAX)
*host = h2o_mruby_new_str(mrb, buf, l);
int32_t p = h2o_socket_getport((void *)&ss);
if (p != -1) {
l = (int)sprintf(buf, "%" PRIu16, (uint16_t)p);
*port = h2o_mruby_new_str(mrb, buf, l);
}
}
static void on_rack_input_free(mrb_state *mrb, const char *base, mrb_int len, void *_input_stream)
{
/* reset ref to input_stream */
mrb_value *input_stream = _input_stream;
*input_stream = mrb_nil_value();
}
static int build_env_sort_header_cb(const void *_x, const void *_y)
{
const h2o_header_t *x = *(const h2o_header_t **)_x, *y = *(const h2o_header_t **)_y;
if (x->name->len < y->name->len)
return -1;
if (x->name->len > y->name->len)
return 1;
if (x->name->base != y->name->base) {
int r = memcmp(x->name->base, y->name->base, x->name->len);
if (r != 0)
return r;
}
assert(x != y);
/* the order of the headers having the same name needs to be retained */
return x < y ? -1 : 1;
}
static mrb_value build_path_info(mrb_state *mrb, h2o_req_t *req, size_t confpath_len_wo_slash)
{
if (req->path_normalized.len == confpath_len_wo_slash)
return mrb_str_new_lit(mrb, "");
assert(req->path_normalized.len > confpath_len_wo_slash);
size_t path_info_start, path_info_end = req->query_at != SIZE_MAX ? req->query_at : req->path.len;
if (req->norm_indexes == NULL) {
path_info_start = confpath_len_wo_slash;
} else if (req->norm_indexes[0] == 0 && confpath_len_wo_slash == 0) {
/* path without leading slash */
path_info_start = 0;
} else {
path_info_start = req->norm_indexes[confpath_len_wo_slash] - 1;
}
return h2o_mruby_new_str(mrb, req->path.base + path_info_start, path_info_end - path_info_start);
}
int h2o_mruby_iterate_native_headers(h2o_mruby_shared_context_t *shared_ctx, h2o_mem_pool_t *pool, h2o_headers_t *headers,
int (*cb)(h2o_mruby_shared_context_t *, h2o_mem_pool_t *, h2o_header_t *, void *),
void *cb_data)
{
h2o_header_t **sorted = alloca(sizeof(*sorted) * headers->size);
size_t i, num_sorted = 0;
for (i = 0; i != headers->size; ++i) {
if (headers->entries[i].name == &H2O_TOKEN_TRANSFER_ENCODING->buf)
continue;
sorted[num_sorted++] = headers->entries + i;
}
qsort(sorted, num_sorted, sizeof(*sorted), build_env_sort_header_cb);
h2o_iovec_t *values = alloca(sizeof(*values) * (num_sorted * 2 - 1));
for (i = 0; i != num_sorted; ++i) {
/* build flattened value of the header field values that have the same name as sorted[i] */
size_t num_values = 0;
values[num_values++] = sorted[i]->value;
while (i < num_sorted - 1 && h2o_header_name_is_equal(sorted[i], sorted[i + 1])) {
++i;
values[num_values++] = h2o_iovec_init(sorted[i]->name == &H2O_TOKEN_COOKIE->buf ? "; " : ", ", 2);
values[num_values++] = sorted[i]->value;
}
h2o_header_t h = *sorted[i];
h.value = num_values == 1 ? values[0] : h2o_concat_list(pool, values, num_values);
if (cb(shared_ctx, pool, &h, cb_data) != 0) {
assert(shared_ctx->mrb->exc != NULL);
return -1;
}
}
return 0;
}
static int iterate_headers_callback(h2o_mruby_shared_context_t *shared_ctx, h2o_mem_pool_t *pool, h2o_header_t *header,
void *cb_data)
{
mrb_value env = mrb_obj_value(cb_data);
mrb_value n;
if (h2o_iovec_is_token(header->name)) {
const h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, header->name);
n = h2o_mruby_token_env_key(shared_ctx, token);
} else {
h2o_iovec_t vec = convert_header_name_to_env(pool, header->name->base, header->name->len);
n = h2o_mruby_new_str(shared_ctx->mrb, vec.base, vec.len);
}
mrb_value v = h2o_mruby_new_str(shared_ctx->mrb, header->value.base, header->value.len);
mrb_hash_set(shared_ctx->mrb, env, n, v);
return 0;
}
mrb_value h2o_mruby_token_string(h2o_mruby_shared_context_t *shared, const h2o_token_t *token)
{
return mrb_ary_entry(shared->constants, token - h2o__tokens);
}
mrb_value h2o_mruby_token_env_key(h2o_mruby_shared_context_t *shared, const h2o_token_t *token)
{
return mrb_ary_entry(shared->constants, token - h2o__tokens + H2O_MAX_TOKENS);
}
static mrb_value build_env(h2o_mruby_generator_t *generator)
{
h2o_mruby_shared_context_t *shared = generator->ctx->shared;
mrb_state *mrb = shared->mrb;
mrb_value env = mrb_hash_new_capa(mrb, 16);
char http_version[sizeof("HTTP/1.0")];
size_t http_version_sz;
/* environment */
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_REQUEST_METHOD),
h2o_mruby_new_str(mrb, generator->req->method.base, generator->req->method.len));
size_t confpath_len_wo_slash = generator->req->pathconf->path.len;
if (generator->req->pathconf->path.base[generator->req->pathconf->path.len - 1] == '/')
--confpath_len_wo_slash;
assert(confpath_len_wo_slash <= generator->req->path_normalized.len);
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SCRIPT_NAME),
h2o_mruby_new_str(mrb, generator->req->pathconf->path.base, confpath_len_wo_slash));
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_PATH_INFO),
build_path_info(mrb, generator->req, confpath_len_wo_slash));
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_QUERY_STRING),
generator->req->query_at != SIZE_MAX
? h2o_mruby_new_str(mrb, generator->req->path.base + generator->req->query_at + 1,
generator->req->path.len - (generator->req->query_at + 1))
: mrb_str_new_lit(mrb, ""));
mrb_hash_set(
mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_NAME),
h2o_mruby_new_str(mrb, generator->req->hostconf->authority.host.base, generator->req->hostconf->authority.host.len));
http_version_sz = h2o_stringify_protocol_version(http_version, generator->req->version);
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_PROTOCOL),
h2o_mruby_new_str(mrb, http_version, http_version_sz));
{
mrb_value h, p;
stringify_address(generator->req->conn, generator->req->conn->callbacks->get_sockname, mrb, &h, &p);
if (!mrb_nil_p(h))
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_ADDR), h);
if (!mrb_nil_p(p))
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_PORT), p);
}
mrb_hash_set(mrb, env, h2o_mruby_token_env_key(shared, H2O_TOKEN_HOST),
h2o_mruby_new_str(mrb, generator->req->authority.base, generator->req->authority.len));
if (generator->req->entity.base != NULL) {
char buf[32];
int l = sprintf(buf, "%zu", generator->req->entity.len);
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_CONTENT_LENGTH), h2o_mruby_new_str(mrb, buf, l));
generator->rack_input = mrb_input_stream_value(mrb, NULL, 0);
mrb_input_stream_set_data(mrb, generator->rack_input, generator->req->entity.base, (mrb_int)generator->req->entity.len, 0,
on_rack_input_free, &generator->rack_input);
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_INPUT), generator->rack_input);
}
{
mrb_value h, p;
stringify_address(generator->req->conn, generator->req->conn->callbacks->get_peername, mrb, &h, &p);
if (!mrb_nil_p(h))
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_REMOTE_ADDR), h);
if (!mrb_nil_p(p))
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_REMOTE_PORT), p);
}
{
size_t i;
for (i = 0; i != generator->req->env.size; i += 2) {
h2o_iovec_t *name = generator->req->env.entries + i, *value = name + 1;
mrb_hash_set(mrb, env, h2o_mruby_new_str(mrb, name->base, name->len), h2o_mruby_new_str(mrb, value->base, value->len));
}
}
/* headers */
h2o_mruby_iterate_native_headers(shared, &generator->req->pool, &generator->req->headers, iterate_headers_callback,
mrb_obj_ptr(env));
mrb_value early_data_key = h2o_mruby_token_env_key(shared, H2O_TOKEN_EARLY_DATA);
int found_early_data = !mrb_nil_p(mrb_hash_fetch(mrb, env, early_data_key, mrb_nil_value()));
if (!found_early_data && h2o_conn_is_early_data(generator->req->conn)) {
mrb_hash_set(mrb, env, early_data_key, h2o_mruby_new_str(mrb, "1", 1));
generator->req->reprocess_if_too_early = 1;
}
/* rack.* */
/* TBD rack.version? */
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_URL_SCHEME),
h2o_mruby_new_str(mrb, generator->req->scheme->name.base, generator->req->scheme->name.len));
/* we are using shared-none architecture, and therefore declare ourselves as multiprocess */
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_MULTITHREAD), mrb_false_value());
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_MULTIPROCESS), mrb_true_value());
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_RUN_ONCE), mrb_false_value());
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_HIJACK_), mrb_false_value());
mrb_value error_stream = h2o_mruby_create_data_instance(
shared->mrb, mrb_ary_entry(shared->constants, H2O_MRUBY_ERROR_STREAM_CLASS), generator->error_stream, &error_stream_type);
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_ERRORS), error_stream);
generator->refs.error_stream = error_stream;
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_RACK_EARLY_HINTS),
mrb_obj_value(mrb_proc_new_cfunc_with_env(mrb, send_early_hints_proc, 1, &generator->refs.generator)));
/* server name */
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_SOFTWARE),
mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_SERVER_SOFTWARE_VALUE));
/* h2o specific */
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_H2O_REMAINING_DELEGATIONS),
mrb_fixnum_value(generator->req->remaining_delegations));
mrb_hash_set(mrb, env, mrb_ary_entry(shared->constants, H2O_MRUBY_LIT_H2O_REMAINING_REPROCESSES),
mrb_fixnum_value(generator->req->remaining_reprocesses));
return env;
}
int h2o_mruby_set_response_header(h2o_mruby_shared_context_t *shared_ctx, h2o_iovec_t *name, h2o_iovec_t value, void *_req)
{
h2o_req_t *req = _req;
const h2o_token_t *token;
static const h2o_iovec_t fallthru_set_prefix = {H2O_STRLIT(FALLTHRU_SET_PREFIX)};
h2o_iovec_t lc_name;
if (h2o_iovec_is_token(name)) {
token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, name);
} else {
/* convert name to lowercase */
lc_name = h2o_strdup(&req->pool, name->base, name->len);
h2o_strtolower(lc_name.base, lc_name.len);
token = h2o_lookup_token(lc_name.base, lc_name.len);
}
if (token != NULL) {
if (token->flags.proxy_should_drop_for_res) {
/* skip */
} else if (token == H2O_TOKEN_CONTENT_LENGTH) {
req->res.content_length = h2o_strtosize(value.base, value.len);
} else {
value = h2o_strdup(&req->pool, value.base, value.len);
if (token == H2O_TOKEN_LINK) {
h2o_iovec_t new_value = h2o_push_path_in_link_header(req, value.base, value.len);
if (new_value.len)
h2o_add_header(&req->pool, &req->res.headers, token, NULL, new_value.base, new_value.len);
} else {
h2o_add_header(&req->pool, &req->res.headers, token, NULL, value.base, value.len);
}
}
} else if (lc_name.len > fallthru_set_prefix.len &&
h2o_memis(lc_name.base, fallthru_set_prefix.len, fallthru_set_prefix.base, fallthru_set_prefix.len)) {
/* register environment variables (with the name converted to uppercase, and using `_`) */
size_t i;
lc_name.base += fallthru_set_prefix.len;
lc_name.len -= fallthru_set_prefix.len;
for (i = 0; i != lc_name.len; ++i)
lc_name.base[i] = lc_name.base[i] == '-' ? '_' : h2o_toupper(lc_name.base[i]);
h2o_iovec_t *slot = h2o_req_getenv(req, lc_name.base, lc_name.len, 1);
*slot = h2o_strdup(&req->pool, value.base, value.len);
} else {
value = h2o_strdup(&req->pool, value.base, value.len);
h2o_add_header_by_str(&req->pool, &req->res.headers, lc_name.base, lc_name.len, 0, NULL, value.base, value.len);
}
return 0;
}
static void clear_rack_input(h2o_mruby_generator_t *generator)
{
if (!mrb_nil_p(generator->rack_input))
mrb_input_stream_set_data(generator->ctx->shared->mrb, generator->rack_input, NULL, -1, 0, NULL, NULL);
}
static void on_generator_dispose(void *_generator)
{
h2o_mruby_generator_t *generator = _generator;
clear_rack_input(generator);
generator->req = NULL;
if (!mrb_nil_p(generator->refs.generator))
DATA_PTR(generator->refs.generator) = NULL;
if (generator->error_stream != NULL)
generator->error_stream->generator = NULL;
if (generator->sender != NULL)
generator->sender->dispose(generator);
}
static int on_req(h2o_handler_t *_handler, h2o_req_t *req)
{
h2o_mruby_handler_t *handler = (void *)_handler;
h2o_mruby_shared_context_t *shared = get_shared_context(req->conn->ctx);
int gc_arena = mrb_gc_arena_save(shared->mrb);
h2o_mruby_context_t *ctx = h2o_context_get_handler_context(req->conn->ctx, &handler->super);
h2o_mruby_generator_t *generator = h2o_mem_alloc_shared(&req->pool, sizeof(*generator), on_generator_dispose);
generator->super.proceed = NULL;
generator->super.stop = NULL;
generator->req = req;
generator->ctx = ctx;
generator->rack_input = mrb_nil_value();
generator->sender = NULL;
generator->error_stream = h2o_mem_alloc(sizeof(*generator->error_stream));
generator->error_stream->ctx = ctx;
generator->error_stream->generator = generator;
mrb_value gen = h2o_mruby_create_data_instance(shared->mrb, mrb_ary_entry(shared->constants, H2O_MRUBY_GENERATOR_CLASS),
generator, &generator_type);
generator->refs.generator = gen;
mrb_value env = build_env(generator);
mrb_value args = mrb_ary_new(shared->mrb);
mrb_ary_set(shared->mrb, args, 0, env);
mrb_ary_set(shared->mrb, args, 1, gen);
int is_delegate = 0;
h2o_mruby_run_fiber(ctx, ctx->proc, args, &is_delegate);
mrb_gc_arena_restore(shared->mrb, gc_arena);
if (is_delegate)
return -1;
return 0;
}
static int send_response(h2o_mruby_generator_t *generator, mrb_int status, mrb_value resp, int *is_delegate)
{
mrb_state *mrb = generator->ctx->shared->mrb;
mrb_value body;
h2o_iovec_t content = {NULL};
/* set status */
generator->req->res.status = (int)status;
/* set headers */
if (h2o_mruby_iterate_rack_headers(generator->ctx->shared, mrb_ary_entry(resp, 1), h2o_mruby_set_response_header,
generator->req) != 0) {
return -1;
}
/* return without processing body, if status is fallthru */
if (generator->req->res.status == STATUS_FALLTHRU) {
if (is_delegate != NULL) {
*is_delegate = 1;
} else {
assert(generator->req->handler == &generator->ctx->handler->super);
h2o_delegate_request_deferred(generator->req);
}
return 0;
}
/* add date: if it's missing from the response */
if (h2o_find_header(&generator->req->res.headers, H2O_TOKEN_DATE, -1) == -1)
h2o_resp_add_date_header(generator->req);
/* obtain body */
body = mrb_ary_entry(resp, 2);
/* flatten body if possible */
if (mrb_array_p(body)) {
mrb_int i, len = RARRAY_LEN(body);
/* calculate the length of the output, while at the same time converting the elements of the output array to string */
content.len = 0;
for (i = 0; i != len; ++i) {
mrb_value e = mrb_ary_entry(body, i);
if (!mrb_string_p(e)) {
e = h2o_mruby_to_str(mrb, e);
if (mrb->exc != NULL)
return -1;
mrb_ary_set(mrb, body, i, e);
}
content.len += RSTRING_LEN(e);
}
/* allocate memory, and copy the response */
char *dst = content.base = h2o_mem_alloc_pool(&generator->req->pool, char, content.len);
for (i = 0; i != len; ++i) {
mrb_value e = mrb_ary_entry(body, i);
assert(mrb_string_p(e));
memcpy(dst, RSTRING_PTR(e), RSTRING_LEN(e));
dst += RSTRING_LEN(e);
}
/* reset body to nil, now that we have read all data */
body = mrb_nil_value();
}
/* use fiber in case we need to call #each */
if (!mrb_nil_p(body)) {
if (h2o_mruby_init_sender(generator, body) != 0)
return -1;
h2o_start_response(generator->req, &generator->super);
generator->sender->start(generator);
return 0;
}
/* send the entire response immediately */
if (status == 101 || status == 204 || status == 304 ||
h2o_memis(generator->req->input.method.base, generator->req->input.method.len, H2O_STRLIT("HEAD"))) {
h2o_start_response(generator->req, &generator->super);
h2o_send(generator->req, NULL, 0, H2O_SEND_STATE_FINAL);
} else {
if (content.len < generator->req->res.content_length) {
generator->req->res.content_length = content.len;
} else {
content.len = generator->req->res.content_length;
}
h2o_start_response(generator->req, &generator->super);
h2o_send(generator->req, &content, 1, H2O_SEND_STATE_FINAL);
}
return 0;
}
void h2o_mruby_run_fiber(h2o_mruby_context_t *ctx, mrb_value receiver, mrb_value input, int *is_delegate)
{
h2o_mruby_context_t *old_ctx = ctx->shared->current_context;
ctx->shared->current_context = ctx;
mrb_state *mrb = ctx->shared->mrb;
mrb_value output, resp;
mrb_int status = 0;
h2o_mruby_generator_t *generator = NULL;
h2o_mruby_send_response_callback_t send_response_callback = NULL;
while (1) {
/* send input to fiber */
output = mrb_funcall_argv(mrb, receiver, ctx->shared->symbols.sym_call, 1, &input);
if (mrb->exc != NULL)
goto GotException;
if (!mrb_array_p(output)) {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "Fiber.yield must return an array"));
goto GotException;
}
resp = mrb_ary_entry(output, 0);
if (!mrb_array_p(resp)) {
if ((send_response_callback = h2o_mruby_middleware_get_send_response_callback(ctx, resp)) != NULL) {
break;
} else {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "rack app did not return an array"));
goto GotException;
}
}
/* fetch status */
H2O_MRUBY_EXEC_GUARD({ status = mrb_int(mrb, mrb_ary_entry(resp, 0)); });
if (mrb->exc != NULL)
goto GotException;
if (status >= 0) {
if (!(100 <= status && status <= 999)) {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "status returned from rack app is out of range"));
goto GotException;
}
break;
}
receiver = mrb_ary_entry(resp, 1);
mrb_value args = mrb_ary_entry(resp, 2);
int run_again = 0;
size_t callback_index = -status - 1;
if (callback_index >= ctx->shared->callbacks.size) {
input = mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "unexpected callback id sent from rack app");
run_again = 1;
} else {
h2o_mruby_callback_t callback = ctx->shared->callbacks.entries[callback_index];
input = callback(ctx, input, &receiver, args, &run_again);
}
if (mrb->exc != NULL)
goto GotException;
if (run_again == 0) {
if (RARRAY_LEN(ctx->resumers) == 0)
goto Exit;
receiver = mrb_ary_pop(mrb, ctx->resumers);
}
mrb_gc_protect(mrb, receiver);
mrb_gc_protect(mrb, input);
}
/* retrieve and validate generator */
generator = h2o_mruby_get_generator(mrb, mrb_ary_entry(output, 1));
if (generator == NULL)
goto Exit; /* do nothing if req is already closed */
if (send_response_callback == NULL)
send_response_callback = send_response;
if (send_response_callback(generator, status, resp, is_delegate) != 0)
goto GotException;
goto Exit;
GotException:
if (generator == NULL && mrb_array_p(output))
generator = h2o_mruby_get_generator(mrb, mrb_ary_entry(output, 1));
handle_exception(ctx, generator);
Exit:
ctx->shared->current_context = old_ctx;
}
h2o_mruby_handler_t *h2o_mruby_register(h2o_pathconf_t *pathconf, h2o_mruby_config_vars_t *vars)
{
h2o_mruby_handler_t *handler = (void *)h2o_create_handler(pathconf, sizeof(*handler));
handler->super.on_context_init = on_context_init;
handler->super.on_context_dispose = on_context_dispose;
handler->super.dispose = on_handler_dispose;
handler->super.on_req = on_req;
handler->config.source = h2o_strdup(NULL, vars->source.base, vars->source.len);
if (vars->path != NULL)
handler->config.path = h2o_strdup(NULL, vars->path, SIZE_MAX).base;
handler->config.lineno = vars->lineno;
handler->pathconf = pathconf;
return handler;
}
mrb_value h2o_mruby_each_to_array(h2o_mruby_shared_context_t *shared_ctx, mrb_value src)
{
return mrb_funcall_argv(shared_ctx->mrb, mrb_ary_entry(shared_ctx->constants, H2O_MRUBY_PROC_EACH_TO_ARRAY),
shared_ctx->symbols.sym_call, 1, &src);
}
int h2o_mruby_iterate_header_values(h2o_mruby_shared_context_t *shared_ctx, mrb_value name, mrb_value value,
int (*cb)(h2o_mruby_shared_context_t *, h2o_iovec_t *, h2o_iovec_t, void *), void *cb_data)
{
mrb_state *mrb = shared_ctx->mrb;
h2o_iovec_t namevec;
/* convert name and value to string */
name = h2o_mruby_to_str(mrb, name);
if (mrb->exc != NULL)
return -1;
namevec = (h2o_iovec_init(RSTRING_PTR(name), RSTRING_LEN(name)));
value = h2o_mruby_to_str(mrb, value);
if (mrb->exc != NULL)
return -1;
/* call the callback, splitting the values with '\n' */
const char *vstart = RSTRING_PTR(value), *vend = vstart + RSTRING_LEN(value), *eol;
while (1) {
for (eol = vstart; eol != vend; ++eol)
if (*eol == '\n')
break;
if (cb(shared_ctx, &namevec, h2o_iovec_init(vstart, eol - vstart), cb_data) != 0)
return -1;
if (eol == vend)
break;
vstart = eol + 1;
}
return 0;
}
int h2o_mruby_iterate_rack_headers(h2o_mruby_shared_context_t *shared_ctx, mrb_value headers,
int (*cb)(h2o_mruby_shared_context_t *, h2o_iovec_t *, h2o_iovec_t, void *), void *cb_data)
{
mrb_state *mrb = shared_ctx->mrb;
if (!(mrb_hash_p(headers) || mrb_array_p(headers))) {
headers = h2o_mruby_each_to_array(shared_ctx, headers);
if (mrb->exc != NULL)
return -1;
assert(mrb_array_p(headers));
}
if (mrb_hash_p(headers)) {
mrb_value keys = mrb_hash_keys(mrb, headers);
mrb_int i, len = RARRAY_LEN(keys);
for (i = 0; i != len; ++i) {
mrb_value k = mrb_ary_entry(keys, i);
mrb_value v = mrb_hash_get(mrb, headers, k);
if (h2o_mruby_iterate_header_values(shared_ctx, k, v, cb, cb_data) != 0)
return -1;
}
} else {
assert(mrb_array_p(headers));
mrb_int i, len = RARRAY_LEN(headers);
for (i = 0; i != len; ++i) {
mrb_value pair = mrb_ary_entry(headers, i);
if (!mrb_array_p(pair)) {
mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "array element of headers MUST by an array"));
return -1;
}
if (h2o_mruby_iterate_header_values(shared_ctx, mrb_ary_entry(pair, 0), mrb_ary_entry(pair, 1), cb, cb_data) != 0)
return -1;
}
}
return 0;
}
| 1 | 14,045 | This change is not related to what this PR wanted to fix, but undefined behavior sanitizer warns without this parenthesis because `h2o__tokens + i` overflows. | h2o-h2o | c |
@@ -223,7 +223,7 @@ class DocstringParameterChecker(BaseChecker):
# skip functions that match the 'no-docstring-rgx' config option
no_docstring_rgx = get_global_option(self, "no-docstring-rgx")
- if no_docstring_rgx and re.match(no_docstring_rgx, node.name):
+ if no_docstring_rgx.pattern and re.match(no_docstring_rgx, node.name):
return
# skip functions smaller than 'docstring-min-length' | 1 | # Copyright (c) 2014-2015 Bruno Daniel <[email protected]>
# Copyright (c) 2015-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2016-2019 Ashley Whetter <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2017 Ville Skyttä <[email protected]>
# Copyright (c) 2017 John Paraskevopoulos <[email protected]>
# Copyright (c) 2018, 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2018 Jim Robertson <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Adam Dangoor <[email protected]>
# Copyright (c) 2019, 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2020 Luigi <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2020 Damien Baty <[email protected]>
# Copyright (c) 2021 SupImDos <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Logan Miller <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Pylint plugin for checking in Sphinx, Google, or Numpy style docstrings
"""
import re
from typing import Optional
import astroid
from astroid import nodes
from pylint.checkers import BaseChecker
from pylint.checkers import utils as checker_utils
from pylint.extensions import _check_docs_utils as utils
from pylint.extensions._check_docs_utils import Docstring
from pylint.interfaces import IAstroidChecker
from pylint.utils import get_global_option
class DocstringParameterChecker(BaseChecker):
"""Checker for Sphinx, Google, or Numpy style docstrings
* Check that all function, method and constructor parameters are mentioned
in the params and types part of the docstring. Constructor parameters
can be documented in either the class docstring or ``__init__`` docstring,
but not both.
* Check that there are no naming inconsistencies between the signature and
the documentation, i.e. also report documented parameters that are missing
in the signature. This is important to find cases where parameters are
renamed only in the code, not in the documentation.
* Check that all explicitly raised exceptions in a function are documented
in the function docstring. Caught exceptions are ignored.
Activate this checker by adding the line::
load-plugins=pylint.extensions.docparams
to the ``MASTER`` section of your ``.pylintrc``.
:param linter: linter object
:type linter: :class:`pylint.lint.PyLinter`
"""
__implements__ = IAstroidChecker
name = "parameter_documentation"
msgs = {
"W9005": (
'"%s" has constructor parameters documented in class and __init__',
"multiple-constructor-doc",
"Please remove parameter declarations in the class or constructor.",
),
"W9006": (
'"%s" not documented as being raised',
"missing-raises-doc",
"Please document exceptions for all raised exception types.",
),
"W9008": (
"Redundant returns documentation",
"redundant-returns-doc",
"Please remove the return/rtype documentation from this method.",
),
"W9010": (
"Redundant yields documentation",
"redundant-yields-doc",
"Please remove the yields documentation from this method.",
),
"W9011": (
"Missing return documentation",
"missing-return-doc",
"Please add documentation about what this method returns.",
{"old_names": [("W9007", "old-missing-returns-doc")]},
),
"W9012": (
"Missing return type documentation",
"missing-return-type-doc",
"Please document the type returned by this method.",
# we can't use the same old_name for two different warnings
# {'old_names': [('W9007', 'missing-returns-doc')]},
),
"W9013": (
"Missing yield documentation",
"missing-yield-doc",
"Please add documentation about what this generator yields.",
{"old_names": [("W9009", "old-missing-yields-doc")]},
),
"W9014": (
"Missing yield type documentation",
"missing-yield-type-doc",
"Please document the type yielded by this method.",
# we can't use the same old_name for two different warnings
# {'old_names': [('W9009', 'missing-yields-doc')]},
),
"W9015": (
'"%s" missing in parameter documentation',
"missing-param-doc",
"Please add parameter declarations for all parameters.",
{"old_names": [("W9003", "old-missing-param-doc")]},
),
"W9016": (
'"%s" missing in parameter type documentation',
"missing-type-doc",
"Please add parameter type declarations for all parameters.",
{"old_names": [("W9004", "old-missing-type-doc")]},
),
"W9017": (
'"%s" differing in parameter documentation',
"differing-param-doc",
"Please check parameter names in declarations.",
),
"W9018": (
'"%s" differing in parameter type documentation',
"differing-type-doc",
"Please check parameter names in type declarations.",
),
"W9019": (
'"%s" useless ignored parameter documentation',
"useless-param-doc",
"Please remove the ignored parameter documentation.",
),
"W9020": (
'"%s" useless ignored parameter type documentation',
"useless-type-doc",
"Please remove the ignored parameter type documentation.",
),
"W9021": (
'Missing any documentation in "%s"',
"missing-any-param-doc",
"Please add parameter and/or type documentation.",
),
}
options = (
(
"accept-no-param-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing parameter "
"documentation in the docstring of a function that has "
"parameters.",
},
),
(
"accept-no-raise-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing raises "
"documentation in the docstring of a function that "
"raises an exception.",
},
),
(
"accept-no-return-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing return "
"documentation in the docstring of a function that "
"returns a statement.",
},
),
(
"accept-no-yields-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing yields "
"documentation in the docstring of a generator.",
},
),
(
"default-docstring-type",
{
"type": "choice",
"default": "default",
"choices": list(utils.DOCSTRING_TYPES),
"help": "If the docstring type cannot be guessed "
"the specified docstring type will be used.",
},
),
)
priority = -2
constructor_names = {"__init__", "__new__"}
not_needed_param_in_docstring = {"self", "cls"}
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""Called for function and method definitions (def).
:param node: Node for a function or method definition in the AST
:type node: :class:`astroid.scoped_nodes.Function`
"""
node_doc = utils.docstringify(node.doc, self.config.default_docstring_type)
# skip functions that match the 'no-docstring-rgx' config option
no_docstring_rgx = get_global_option(self, "no-docstring-rgx")
if no_docstring_rgx and re.match(no_docstring_rgx, node.name):
return
# skip functions smaller than 'docstring-min-length'
lines = checker_utils.get_node_last_lineno(node) - node.lineno
max_lines = get_global_option(self, "docstring-min-length")
if max_lines > -1 and lines < max_lines:
return
self.check_functiondef_params(node, node_doc)
self.check_functiondef_returns(node, node_doc)
self.check_functiondef_yields(node, node_doc)
visit_asyncfunctiondef = visit_functiondef
def check_functiondef_params(self, node, node_doc):
node_allow_no_param = None
if node.name in self.constructor_names:
class_node = checker_utils.node_frame_class(node)
if class_node is not None:
class_doc = utils.docstringify(
class_node.doc, self.config.default_docstring_type
)
self.check_single_constructor_params(class_doc, node_doc, class_node)
# __init__ or class docstrings can have no parameters documented
# as long as the other documents them.
node_allow_no_param = (
class_doc.has_params()
or class_doc.params_documented_elsewhere()
or None
)
class_allow_no_param = (
node_doc.has_params()
or node_doc.params_documented_elsewhere()
or None
)
self.check_arguments_in_docstring(
class_doc, node.args, class_node, class_allow_no_param
)
self.check_arguments_in_docstring(
node_doc, node.args, node, node_allow_no_param
)
def check_functiondef_returns(self, node, node_doc):
if (not node_doc.supports_yields and node.is_generator()) or node.is_abstract():
return
return_nodes = node.nodes_of_class(astroid.Return)
if (node_doc.has_returns() or node_doc.has_rtype()) and not any(
utils.returns_something(ret_node) for ret_node in return_nodes
):
self.add_message("redundant-returns-doc", node=node)
def check_functiondef_yields(self, node, node_doc):
if not node_doc.supports_yields or node.is_abstract():
return
if (
node_doc.has_yields() or node_doc.has_yields_type()
) and not node.is_generator():
self.add_message("redundant-yields-doc", node=node)
def visit_raise(self, node: nodes.Raise) -> None:
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
expected_excs = utils.possible_exc_types(node)
if not expected_excs:
return
if not func_node.doc:
# If this is a property setter,
# the property should have the docstring instead.
property_ = utils.get_setters_property(func_node)
if property_:
func_node = property_
doc = utils.docstringify(func_node.doc, self.config.default_docstring_type)
if not doc.is_valid():
if doc.doc:
self._handle_no_raise_doc(expected_excs, func_node)
return
found_excs_full_names = doc.exceptions()
# Extract just the class name, e.g. "error" from "re.error"
found_excs_class_names = {exc.split(".")[-1] for exc in found_excs_full_names}
missing_excs = expected_excs - found_excs_class_names
self._add_raise_message(missing_excs, func_node)
def visit_return(self, node: nodes.Return) -> None:
if not utils.returns_something(node):
return
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
doc = utils.docstringify(func_node.doc, self.config.default_docstring_type)
if not doc.is_valid() and self.config.accept_no_return_doc:
return
is_property = checker_utils.decorated_with_property(func_node)
if not (doc.has_returns() or (doc.has_property_returns() and is_property)):
self.add_message("missing-return-doc", node=func_node)
if func_node.returns:
return
if not (doc.has_rtype() or (doc.has_property_type() and is_property)):
self.add_message("missing-return-type-doc", node=func_node)
def visit_yield(self, node: nodes.Yield) -> None:
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
doc = utils.docstringify(func_node.doc, self.config.default_docstring_type)
if not doc.is_valid() and self.config.accept_no_yields_doc:
return
if doc.supports_yields:
doc_has_yields = doc.has_yields()
doc_has_yields_type = doc.has_yields_type()
else:
doc_has_yields = doc.has_returns()
doc_has_yields_type = doc.has_rtype()
if not doc_has_yields:
self.add_message("missing-yield-doc", node=func_node)
if not (doc_has_yields_type or func_node.returns):
self.add_message("missing-yield-type-doc", node=func_node)
def visit_yieldfrom(self, node: nodes.YieldFrom) -> None:
self.visit_yield(node)
def _compare_missing_args(
self,
found_argument_names,
message_id,
not_needed_names,
expected_argument_names,
warning_node,
):
"""Compare the found argument names with the expected ones and
generate a message if there are arguments missing.
:param found_argument_names: argument names found in the docstring
:type found_argument_names: set
:param message_id: pylint message id
:type message_id: str
:param not_needed_names: names that may be omitted
:type not_needed_names: set
:param expected_argument_names: Expected argument names
:type expected_argument_names: set
:param warning_node: The node to be analyzed
:type warning_node: :class:`astroid.scoped_nodes.Node`
"""
missing_argument_names = (
expected_argument_names - found_argument_names
) - not_needed_names
if missing_argument_names:
self.add_message(
message_id,
args=(", ".join(sorted(missing_argument_names)),),
node=warning_node,
)
def _compare_different_args(
self,
found_argument_names,
message_id,
not_needed_names,
expected_argument_names,
warning_node,
):
"""Compare the found argument names with the expected ones and
generate a message if there are extra arguments found.
:param found_argument_names: argument names found in the docstring
:type found_argument_names: set
:param message_id: pylint message id
:type message_id: str
:param not_needed_names: names that may be omitted
:type not_needed_names: set
:param expected_argument_names: Expected argument names
:type expected_argument_names: set
:param warning_node: The node to be analyzed
:type warning_node: :class:`astroid.scoped_nodes.Node`
"""
differing_argument_names = (
(expected_argument_names ^ found_argument_names)
- not_needed_names
- expected_argument_names
)
if differing_argument_names:
self.add_message(
message_id,
args=(", ".join(sorted(differing_argument_names)),),
node=warning_node,
)
def _compare_ignored_args(
self,
found_argument_names,
message_id,
ignored_argument_names,
warning_node,
):
"""Compare the found argument names with the ignored ones and
generate a message if there are ignored arguments found.
:param found_argument_names: argument names found in the docstring
:type found_argument_names: set
:param message_id: pylint message id
:type message_id: str
:param ignored_argument_names: Expected argument names
:type ignored_argument_names: set
:param warning_node: The node to be analyzed
:type warning_node: :class:`astroid.scoped_nodes.Node`
"""
existing_ignored_argument_names = ignored_argument_names & found_argument_names
if existing_ignored_argument_names:
self.add_message(
message_id,
args=(", ".join(sorted(existing_ignored_argument_names)),),
node=warning_node,
)
def check_arguments_in_docstring(
self,
doc: Docstring,
arguments_node: astroid.Arguments,
warning_node: astroid.NodeNG,
accept_no_param_doc: Optional[bool] = None,
):
"""Check that all parameters in a function, method or class constructor
on the one hand and the parameters mentioned in the parameter
documentation (e.g. the Sphinx tags 'param' and 'type') on the other
hand are consistent with each other.
* Undocumented parameters except 'self' are noticed.
* Undocumented parameter types except for 'self' and the ``*<args>``
and ``**<kwargs>`` parameters are noticed.
* Parameters mentioned in the parameter documentation that don't or no
longer exist in the function parameter list are noticed.
* If the text "For the parameters, see" or "For the other parameters,
see" (ignoring additional whitespace) is mentioned in the docstring,
missing parameter documentation is tolerated.
* If there's no Sphinx style, Google style or NumPy style parameter
documentation at all, i.e. ``:param`` is never mentioned etc., the
checker assumes that the parameters are documented in another format
and the absence is tolerated.
:param doc: Docstring for the function, method or class.
:type doc: :class:`Docstring`
:param arguments_node: Arguments node for the function, method or
class constructor.
:type arguments_node: :class:`astroid.scoped_nodes.Arguments`
:param warning_node: The node to assign the warnings to
:type warning_node: :class:`astroid.scoped_nodes.Node`
:param accept_no_param_doc: Whether or not to allow no parameters
to be documented.
If None then this value is read from the configuration.
:type accept_no_param_doc: bool or None
"""
# Tolerate missing param or type declarations if there is a link to
# another method carrying the same name.
if not doc.doc:
return
if accept_no_param_doc is None:
accept_no_param_doc = self.config.accept_no_param_doc
tolerate_missing_params = doc.params_documented_elsewhere()
# Collect the function arguments.
expected_argument_names = {arg.name for arg in arguments_node.args}
expected_argument_names.update(arg.name for arg in arguments_node.kwonlyargs)
not_needed_type_in_docstring = self.not_needed_param_in_docstring.copy()
expected_but_ignored_argument_names = set()
ignored_argument_names = get_global_option(self, "ignored-argument-names")
if ignored_argument_names:
expected_but_ignored_argument_names = {
arg
for arg in expected_argument_names
if ignored_argument_names.match(arg)
}
if arguments_node.vararg is not None:
expected_argument_names.add(arguments_node.vararg)
not_needed_type_in_docstring.add(arguments_node.vararg)
if arguments_node.kwarg is not None:
expected_argument_names.add(arguments_node.kwarg)
not_needed_type_in_docstring.add(arguments_node.kwarg)
params_with_doc, params_with_type = doc.match_param_docs()
# Tolerate no parameter documentation at all.
if not params_with_doc and not params_with_type and accept_no_param_doc:
tolerate_missing_params = True
# This is before the update of param_with_type because this must check only
# the type documented in a docstring, not the one using pep484
# See #4117 and #4593
self._compare_ignored_args(
params_with_type,
"useless-type-doc",
expected_but_ignored_argument_names,
warning_node,
)
for index, arg_name in enumerate(arguments_node.args):
if arguments_node.annotations[index]:
params_with_type.add(arg_name.name)
for index, arg_name in enumerate(arguments_node.kwonlyargs):
if arguments_node.kwonlyargs_annotations[index]:
params_with_type.add(arg_name.name)
if not tolerate_missing_params:
missing_param_doc = (expected_argument_names - params_with_doc) - (
self.not_needed_param_in_docstring | expected_but_ignored_argument_names
)
missing_type_doc = (expected_argument_names - params_with_type) - (
not_needed_type_in_docstring | expected_but_ignored_argument_names
)
if (
missing_param_doc == expected_argument_names == missing_type_doc
and len(expected_argument_names) != 0
):
self.add_message(
"missing-any-param-doc",
args=(warning_node.name),
node=warning_node,
)
else:
self._compare_missing_args(
params_with_doc,
"missing-param-doc",
self.not_needed_param_in_docstring
| expected_but_ignored_argument_names,
expected_argument_names,
warning_node,
)
self._compare_missing_args(
params_with_type,
"missing-type-doc",
not_needed_type_in_docstring | expected_but_ignored_argument_names,
expected_argument_names,
warning_node,
)
self._compare_different_args(
params_with_doc,
"differing-param-doc",
self.not_needed_param_in_docstring,
expected_argument_names,
warning_node,
)
self._compare_different_args(
params_with_type,
"differing-type-doc",
not_needed_type_in_docstring,
expected_argument_names,
warning_node,
)
self._compare_ignored_args(
params_with_doc,
"useless-param-doc",
expected_but_ignored_argument_names,
warning_node,
)
def check_single_constructor_params(self, class_doc, init_doc, class_node):
if class_doc.has_params() and init_doc.has_params():
self.add_message(
"multiple-constructor-doc", args=(class_node.name,), node=class_node
)
def _handle_no_raise_doc(self, excs, node):
if self.config.accept_no_raise_doc:
return
self._add_raise_message(excs, node)
def _add_raise_message(self, missing_excs, node):
"""
Adds a message on :param:`node` for the missing exception type.
:param missing_excs: A list of missing exception types.
:type missing_excs: set(str)
:param node: The node show the message on.
:type node: nodes.NodeNG
"""
if node.is_abstract():
try:
missing_excs.remove("NotImplementedError")
except KeyError:
pass
if not missing_excs:
return
self.add_message(
"missing-raises-doc", args=(", ".join(sorted(missing_excs)),), node=node
)
def register(linter):
"""Required method to auto register this checker.
:param linter: Main interface object for Pylint plugins
:type linter: Pylint object
"""
linter.register_checker(DocstringParameterChecker(linter))
| 1 | 16,314 | This is actually incorrect and should be the other way around. Going to fix this tomorrow. If `no_docstring_rgx == re.compile("")` no function should be checked, since every function matches the pattern. If `no_docstring_rgx == re.compile("^$")` all functions should be checked. | PyCQA-pylint | py |
@@ -95,7 +95,7 @@ def get_spotify_oauth():
"""
client_id = current_app.config['SPOTIFY_CLIENT_ID']
client_secret = current_app.config['SPOTIFY_CLIENT_SECRET']
- scope = 'user-read-recently-played'
+ scope = 'user-read-recently-played user-read-currently-playing'
redirect_url = current_app.config['SPOTIFY_CALLBACK_URL']
return spotipy.oauth2.SpotifyOAuth(client_id, client_secret, redirect_uri=redirect_url, scope=scope)
| 1 | import pytz
from flask import current_app
import spotipy.oauth2
from listenbrainz.db import spotify as db_spotify
import datetime
SPOTIFY_API_RETRIES = 5
class Spotify:
def __init__(self, user_id, musicbrainz_id, musicbrainz_row_id, user_token, token_expires,
refresh_token, last_updated, active, error_message, latest_listened_at):
self.user_id = user_id
self.user_token = user_token
self.token_expires = token_expires
self.refresh_token = refresh_token
self.last_updated = last_updated
self.active = active
self.error_message = error_message
self.musicbrainz_id = musicbrainz_id
self.latest_listened_at = latest_listened_at
self.musicbrainz_row_id = musicbrainz_row_id
def get_spotipy_client(self):
return spotipy.Spotify(auth=self.user_token)
@property
def last_updated_iso(self):
if self.last_updated is None:
return None
return self.last_updated.isoformat() + "Z"
@property
def latest_listened_at_iso(self):
if self.latest_listened_at is None:
return None
return self.latest_listened_at.isoformat() + "Z"
@property
def token_expired(self):
now = datetime.datetime.utcnow()
now = now.replace(tzinfo=pytz.UTC)
return now >= self.token_expires
@staticmethod
def from_dbrow(row):
return Spotify(
user_id=row['user_id'],
user_token=row['user_token'],
token_expires=row['token_expires'],
refresh_token=row['refresh_token'],
last_updated=row['last_updated'],
active=row['active'],
error_message=row['error_message'],
musicbrainz_id=row['musicbrainz_id'],
musicbrainz_row_id=row['musicbrainz_row_id'],
latest_listened_at=row['latest_listened_at'],
)
def __str__(self):
return "<Spotify(user:%s): %s>" % (self.user_id, self.musicbrainz_id)
def refresh_user_token(spotify_user):
""" Refreshes the user token for the given spotify user.
Args:
spotify_user (domain.spotify.Spotify): the user whose token is to be refreshed
Returns:
user (domain.spotify.Spotify): the same user with updated tokens
"""
auth = get_spotify_oauth()
retries = SPOTIFY_API_RETRIES
new_token = None
while retries > 0:
new_token = auth.refresh_access_token(spotify_user.refresh_token)
if new_token:
break
retries -= 1
if new_token is None:
raise SpotifyAPIError('Could not refresh API Token for Spotify user')
access_token = new_token['access_token']
refresh_token = new_token['refresh_token']
expires_at = new_token['expires_at']
db_spotify.update_token(spotify_user.user_id, access_token, refresh_token, expires_at)
return get_user(spotify_user.user_id)
def get_spotify_oauth():
""" Returns a spotipy OAuth instance that can be used to authenticate with spotify.
"""
client_id = current_app.config['SPOTIFY_CLIENT_ID']
client_secret = current_app.config['SPOTIFY_CLIENT_SECRET']
scope = 'user-read-recently-played'
redirect_url = current_app.config['SPOTIFY_CALLBACK_URL']
return spotipy.oauth2.SpotifyOAuth(client_id, client_secret, redirect_uri=redirect_url, scope=scope)
def get_user(user_id):
""" Returns a Spotify instance corresponding to the specified LB row ID.
If the user_id is not present in the spotify table, returns None
Args:
user_id (int): the ListenBrainz row ID of the user
"""
row = db_spotify.get_user(user_id)
if row:
return Spotify.from_dbrow(row)
return None
def remove_user(user_id):
""" Delete user entry for user with specified ListenBrainz user ID.
Args:
user_id (int): the ListenBrainz row ID of the user
"""
db_spotify.delete_spotify(user_id)
def add_new_user(user_id, spot_access_token):
"""Create a spotify row for a user based on OAuth access tokens
Args:
user_id: A flask auth `current_user.id`
spot_access_token: A spotipy access token from SpotifyOAuth.get_access_token
"""
access_token = spot_access_token['access_token']
refresh_token = spot_access_token['refresh_token']
expires_at = spot_access_token['expires_at']
db_spotify.create_spotify(user_id, access_token, refresh_token, expires_at)
def get_active_users_to_process():
""" Returns a list of Spotify user instances that need their Spotify listens imported.
"""
return [Spotify.from_dbrow(row) for row in db_spotify.get_active_users_to_process()]
def update_last_updated(user_id, success=True, error_message=None):
""" Update the last_update field for user with specified user ID.
Also, set the user as active or inactive depending on whether their listens
were imported without error.
If there was an error, add the error to the db.
Args:
user_id (int): the ListenBrainz row ID of the user
success (bool): flag representing whether the last import was successful or not.
error_message (str): the user-friendly error message to be displayed.
"""
if error_message:
db_spotify.add_update_error(user_id, error_message)
else:
db_spotify.update_last_updated(user_id, success)
def update_latest_listened_at(user_id, timestamp):
""" Update the latest_listened_at field for user with specified ListenBrainz user ID.
Args:
user_id (int): the ListenBrainz row ID of the user
timestamp (int): the unix timestamp of the latest listen imported for the user
"""
db_spotify.update_latest_listened_at(user_id, timestamp)
class SpotifyImporterException(Exception):
pass
class SpotifyListenBrainzError(Exception):
pass
class SpotifyAPIError(Exception):
pass
| 1 | 15,147 | Should this be in a config file? | metabrainz-listenbrainz-server | py |
@@ -3,13 +3,15 @@ const {
eachAsync,
executeLegacyOperation,
makeInterruptibleAsyncInterval,
- BufferPool
+ BufferPool,
+ shuffle
} = require('../../src/utils');
const { expect } = require('chai');
const sinon = require('sinon');
+const { MongoRuntimeError } = require('../../src/error');
-describe('utils', function () {
- context('eachAsync', function () {
+describe('driver utils', function () {
+ context('eachAsync function', function () {
it('should callback with an error', function (done) {
eachAsync(
[{ error: false }, { error: true }], | 1 | 'use strict';
const {
eachAsync,
executeLegacyOperation,
makeInterruptibleAsyncInterval,
BufferPool
} = require('../../src/utils');
const { expect } = require('chai');
const sinon = require('sinon');
describe('utils', function () {
context('eachAsync', function () {
it('should callback with an error', function (done) {
eachAsync(
[{ error: false }, { error: true }],
(item, cb) => {
cb(item.error ? new Error('error requested') : null);
},
err => {
expect(err).to.exist;
done();
}
);
});
it('should propagate a synchronously thrown error', function (done) {
expect(() =>
eachAsync(
[{}],
() => {
throw new Error('something wicked');
},
err => {
expect(err).to.not.exist;
done(err);
}
)
).to.throw(/something wicked/);
done();
});
});
describe('#makeInterruptibleAsyncInterval', function () {
let clock, executor, fnSpy;
beforeEach(function () {
clock = sinon.useFakeTimers();
fnSpy = sinon.spy(cb => {
cb();
});
});
afterEach(function () {
if (executor) {
executor.stop();
}
clock.restore();
});
context('when the immediate option is provided', function () {
it('executes the function immediately and schedules the next execution on the interval', function () {
executor = makeInterruptibleAsyncInterval(fnSpy, {
immediate: true,
minInterval: 10,
interval: 30
});
// expect immediate invocation
expect(fnSpy.calledOnce).to.be.true;
// advance clock by less than the scheduled interval to ensure we don't execute early
clock.tick(29);
expect(fnSpy.calledOnce).to.be.true;
// advance clock to the interval
clock.tick(1);
expect(fnSpy.calledTwice).to.be.true;
});
});
context('when the immediate option is not provided', function () {
it('executes the function on the provided interval', function () {
executor = makeInterruptibleAsyncInterval(fnSpy, { minInterval: 10, interval: 30 });
// advance clock by less than the scheduled interval to ensure we don't execute early
clock.tick(29);
expect(fnSpy.callCount).to.equal(0);
// advance clock to the interval
clock.tick(1);
expect(fnSpy.calledOnce).to.be.true;
// advance clock by the interval
clock.tick(30);
expect(fnSpy.calledTwice).to.be.true;
});
});
describe('#wake', function () {
context('when the time until next call is negative', () => {
// somehow we missed the execution, due to an unreliable clock
it('should execute immediately and schedule the next execution on the interval if this is the first wake', () => {
let fakeClockHasTicked = false;
executor = makeInterruptibleAsyncInterval(fnSpy, {
minInterval: 10,
interval: 30,
clock: () => {
if (fakeClockHasTicked) {
return 81;
}
fakeClockHasTicked = true;
return 50;
}
});
// tick the environment clock by a smaller amount than the interval
clock.tick(2);
// sanity check to make sure we haven't called execute yet
expect(fnSpy.callCount).to.equal(0);
executor.wake();
// expect immediate execution since expected next call time was 50 + 30 = 80, but the clock shows 81
expect(fnSpy.calledOnce).to.be.true;
// move forward by more than minInterval but less than full interval to ensure we're scheduling correctly
clock.tick(29);
expect(fnSpy.calledOnce).to.be.true;
// move forward by the full interval to make sure the scheduled call executes
clock.tick(1);
expect(fnSpy.calledTwice).to.be.true;
});
it('should execute immediately and schedule the next execution on the interval if this is a repeated wake and the current execution is not rescheduled', () => {
let fakeClockTickCount = 0;
executor = makeInterruptibleAsyncInterval(fnSpy, {
minInterval: 10,
interval: 30,
clock: () => {
if (fakeClockTickCount === 0) {
// on init, return arbitrary starting time
fakeClockTickCount++;
return 50;
}
if (fakeClockTickCount === 1) {
// expected execution time is 80
// on first wake return a time so less than minInterval is left and no need to reschedule
fakeClockTickCount++;
return 71;
}
return 81;
}
});
// tick the clock by a small amount before and after the wake to make sure no unexpected async things are happening
clock.tick(11);
executor.wake();
clock.tick(5);
expect(fnSpy.callCount).to.equal(0);
// call our second wake that gets the overdue timer, so expect immediate execution
executor.wake();
expect(fnSpy.calledOnce).to.be.true;
// move forward by more than minInterval but less than full interval to ensure we're scheduling correctly
clock.tick(29);
expect(fnSpy.calledOnce).to.be.true;
// move forward by the full interval to make sure the scheduled call executes
clock.tick(1);
expect(fnSpy.calledTwice).to.be.true;
});
it('should execute immediately and schedule the next execution on the interval if this is a repeated wake even if the current execution is rescheduled', () => {
let fakeClockTickCount = 0;
executor = makeInterruptibleAsyncInterval(fnSpy, {
minInterval: 10,
interval: 30,
clock: () => {
if (fakeClockTickCount === 0) {
// on init, return arbitrary starting time
fakeClockTickCount++;
return 50;
}
if (fakeClockTickCount === 1) {
// expected execution time is 80
// on first wake return a time so that more than minInterval is left
fakeClockTickCount++;
return 61;
}
return 81;
}
});
// tick the clock by a small amount before and after the wake to make sure no unexpected async things are happening
clock.tick(2);
executor.wake();
clock.tick(9);
expect(fnSpy.callCount).to.equal(0);
// call our second wake that gets the overdue timer, so expect immediate execution
executor.wake();
expect(fnSpy.calledOnce).to.be.true;
// move forward by more than minInterval but less than full interval to ensure we're scheduling correctly
clock.tick(29);
expect(fnSpy.calledOnce).to.be.true;
// move forward by the full interval to make sure the scheduled call executes
clock.tick(1);
expect(fnSpy.calledTwice).to.be.true;
});
});
context('when the time until next call is less than the minInterval', () => {
// we can't make it go any faster, so we should let the scheduled execution run
it('should execute on the interval if this is the first wake', () => {
executor = makeInterruptibleAsyncInterval(fnSpy, {
minInterval: 10,
interval: 30
});
// tick the environment clock so that less than minInterval is left
clock.tick(21);
executor.wake();
// move forward to just before exepected execution time
clock.tick(8);
expect(fnSpy.callCount).to.equal(0);
// move forward to the full interval to make sure the scheduled call executes
clock.tick(1);
expect(fnSpy.calledOnce).to.be.true;
// check to make sure the next execution runs as expected
clock.tick(29);
expect(fnSpy.calledOnce).to.be.true;
clock.tick(1);
expect(fnSpy.calledTwice).to.be.true;
});
it('should execute on the original interval if this is a repeated wake and the current execution is not rescheduled', () => {
executor = makeInterruptibleAsyncInterval(fnSpy, {
minInterval: 10,
interval: 30
});
// tick the environment clock so that less than minInterval is left
clock.tick(21);
executor.wake();
// tick the environment clock some more so that the next wake is called at a different time
clock.tick(2);
executor.wake();
// tick to just before the expected execution time
clock.tick(6);
expect(fnSpy.callCount).to.equal(0);
// tick up to 20 for the expected execution
clock.tick(1);
expect(fnSpy.calledOnce).to.be.true;
// check to make sure the next execution runs as expected
clock.tick(29);
expect(fnSpy.calledOnce).to.be.true;
clock.tick(1);
expect(fnSpy.calledTwice).to.be.true;
});
it('should execute on the minInterval from the first wake if this is a repeated wake and the current execution is rescheduled', () => {
executor = makeInterruptibleAsyncInterval(fnSpy, {
minInterval: 10,
interval: 30
});
// tick the environment clock so that more than minInterval is left
clock.tick(13);
executor.wake();
// the first wake should move up the execution to occur at 23 ticks from the start
// we tick 8 to get to 21, so that less than minInterval is left on the original interval expected execution
clock.tick(8);
executor.wake();
// now we tick to just before the rescheduled execution time
clock.tick(1);
expect(fnSpy.callCount).to.equal(0);
// tick up to 23 for the expected execution
clock.tick(1);
expect(fnSpy.calledOnce).to.be.true;
// check to make sure the next execution runs as expected
clock.tick(29);
expect(fnSpy.calledOnce).to.be.true;
clock.tick(1);
expect(fnSpy.calledTwice).to.be.true;
});
});
context('when the time until next call is more than the minInterval', () => {
// expedite the execution to minInterval
it('should execute on the minInterval if this is the first wake', () => {
executor = makeInterruptibleAsyncInterval(fnSpy, {
minInterval: 10,
interval: 30
});
// tick the environment clock so that more than minInterval is left
clock.tick(3);
executor.wake();
// the first wake should move up the execution to occur at 13 ticks from the start
// we tick to just before the rescheduled execution time
clock.tick(9);
expect(fnSpy.callCount).to.equal(0);
// tick up to 13 for the expected execution
clock.tick(1);
expect(fnSpy.calledOnce).to.be.true;
// check to make sure the next execution runs as expected
clock.tick(29);
expect(fnSpy.calledOnce).to.be.true;
clock.tick(1);
expect(fnSpy.calledTwice).to.be.true;
});
it('should execute on the minInterval from the first wake if this is a repeated wake', () => {
// NOTE: under regular circumstances, if the second wake is early enough to warrant a reschedule
// then the first wake must have already warranted a reschedule
executor = makeInterruptibleAsyncInterval(fnSpy, {
minInterval: 10,
interval: 30
});
// tick the environment clock so that more than minInterval is left
clock.tick(3);
executor.wake();
// the first wake should move up the execution to occur at 13 ticks from the start
// we tick a bit more so that more than minInterval is still left and call our repeated wake
clock.tick(2);
executor.wake();
// tick up to just before the expected execution
clock.tick(7);
expect(fnSpy.callCount).to.equal(0);
// now go up to 13
clock.tick(1);
expect(fnSpy.calledOnce).to.be.true;
// check to make sure the next execution runs as expected
clock.tick(29);
expect(fnSpy.calledOnce).to.be.true;
clock.tick(1);
expect(fnSpy.calledTwice).to.be.true;
});
});
});
});
context('BufferPool', function () {
it('should report the correct length', function () {
const buffer = new BufferPool();
buffer.append(Buffer.from([0, 1]));
buffer.append(Buffer.from([2, 3]));
buffer.append(Buffer.from([2, 3]));
expect(buffer).property('length').to.equal(6);
});
it('return an empty buffer if too many bytes requested', function () {
const buffer = new BufferPool();
buffer.append(Buffer.from([0, 1, 2, 3]));
const data = buffer.read(6);
expect(data).to.have.length(0);
expect(buffer).property('length').to.equal(4);
});
context('peek', function () {
it('exact size', function () {
const buffer = new BufferPool();
buffer.append(Buffer.from([0, 1]));
const data = buffer.peek(2);
expect(data).to.eql(Buffer.from([0, 1]));
expect(buffer).property('length').to.equal(2);
});
it('within first buffer', function () {
const buffer = new BufferPool();
buffer.append(Buffer.from([0, 1, 2, 3]));
const data = buffer.peek(2);
expect(data).to.eql(Buffer.from([0, 1]));
expect(buffer).property('length').to.equal(4);
});
it('across multiple buffers', function () {
const buffer = new BufferPool();
buffer.append(Buffer.from([0, 1]));
buffer.append(Buffer.from([2, 3]));
buffer.append(Buffer.from([4, 5]));
expect(buffer).property('length').to.equal(6);
const data = buffer.peek(5);
expect(data).to.eql(Buffer.from([0, 1, 2, 3, 4]));
expect(buffer).property('length').to.equal(6);
});
});
context('read', function () {
it('should throw an error if a negative size is requested', function () {
const buffer = new BufferPool();
expect(() => buffer.read(-1)).to.throw(/Argument "size" must be a non-negative number/);
});
it('should throw an error if a non-number size is requested', function () {
const buffer = new BufferPool();
expect(() => buffer.read('256')).to.throw(/Argument "size" must be a non-negative number/);
});
it('exact size', function () {
const buffer = new BufferPool();
buffer.append(Buffer.from([0, 1]));
const data = buffer.read(2);
expect(data).to.eql(Buffer.from([0, 1]));
expect(buffer).property('length').to.equal(0);
});
it('within first buffer', function () {
const buffer = new BufferPool();
buffer.append(Buffer.from([0, 1, 2, 3]));
const data = buffer.read(2);
expect(data).to.eql(Buffer.from([0, 1]));
expect(buffer).property('length').to.equal(2);
});
it('across multiple buffers', function () {
const buffer = new BufferPool();
buffer.append(Buffer.from([0, 1]));
buffer.append(Buffer.from([2, 3]));
buffer.append(Buffer.from([4, 5]));
expect(buffer).property('length').to.equal(6);
const data = buffer.read(5);
expect(data).to.eql(Buffer.from([0, 1, 2, 3, 4]));
expect(buffer).property('length').to.equal(1);
expect(buffer.read(1)).to.eql(Buffer.from([5]));
});
});
});
context('executeLegacyOperation', function () {
it('should call callback with errors on throw errors, and rethrow error', function () {
const expectedError = new Error('THIS IS AN ERROR');
let callbackError, caughtError;
const topology = {
logicalSessionTimeoutMinutes: null
};
const operation = () => {
throw expectedError;
};
const callback = err => (callbackError = err);
const options = { skipSessions: true };
try {
executeLegacyOperation(topology, operation, [{}, callback], options);
} catch (e) {
caughtError = e;
}
expect(callbackError).to.equal(expectedError);
expect(caughtError).to.equal(expectedError);
});
it('should reject promise with errors on throw errors, and rethrow error', function () {
const expectedError = new Error('THIS IS AN ERROR');
const topology = {
logicalSessionTimeoutMinutes: null
};
const operation = () => {
throw expectedError;
};
const options = { skipSessions: true };
return executeLegacyOperation(topology, operation, [{}, null], options).then(null, err => {
expect(err).to.equal(expectedError);
});
});
});
});
| 1 | 21,411 | purely preference, but have you considered using parentheses to indicate that a method is being tested? e.g., `describe('eachAsync()')` | mongodb-node-mongodb-native | js |
@@ -449,9 +449,10 @@ ResultCode RocksEngine::createCheckpoint(const std::string& name) {
auto checkpointPath = folly::stringPrintf("%s/checkpoints/%s/data",
dataPath_.c_str(), name.c_str());
LOG(INFO) << "Target checkpoint path : " << checkpointPath;
- if (fs::FileUtils::exist(checkpointPath)) {
- LOG(ERROR) << "The snapshot file already exists: " << checkpointPath;
- return ResultCode::ERR_CHECKPOINT_ERROR;
+ if (fs::FileUtils::exist(checkpointPath) &&
+ !fs::FileUtils::remove(checkpointPath.data(), true)) {
+ LOG(ERROR) << "Remove exist dir failed of checkpoint : " << checkpointPath;
+ return ResultCode::ERR_IO_ERROR;
}
auto parent = checkpointPath.substr(0, checkpointPath.rfind('/')); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/RocksEngine.h"
#include <folly/String.h>
#include "fs/FileUtils.h"
#include "kvstore/KVStore.h"
#include "kvstore/RocksEngineConfig.h"
#include <rocksdb/convenience.h>
DEFINE_bool(enable_auto_repair, false, "True for auto repair db.");
namespace nebula {
namespace kvstore {
using fs::FileUtils;
using fs::FileType;
namespace {
/***************************************
*
* Implementation of WriteBatch
*
**************************************/
class RocksWriteBatch : public WriteBatch {
private:
rocksdb::WriteBatch batch_;
public:
RocksWriteBatch() : batch_(FLAGS_rocksdb_batch_size) {}
virtual ~RocksWriteBatch() = default;
ResultCode put(folly::StringPiece key, folly::StringPiece value) override {
if (batch_.Put(toSlice(key), toSlice(value)).ok()) {
return ResultCode::SUCCEEDED;
} else {
return ResultCode::ERR_UNKNOWN;
}
}
ResultCode remove(folly::StringPiece key) override {
if (batch_.Delete(toSlice(key)).ok()) {
return ResultCode::SUCCEEDED;
} else {
return ResultCode::ERR_UNKNOWN;
}
}
// Remove all keys in the range [start, end)
ResultCode removeRange(folly::StringPiece start, folly::StringPiece end) override {
if (batch_.DeleteRange(toSlice(start), toSlice(end)).ok()) {
return ResultCode::SUCCEEDED;
} else {
return ResultCode::ERR_UNKNOWN;
}
}
rocksdb::WriteBatch* data() {
return &batch_;
}
};
} // Anonymous namespace
/***************************************
*
* Implementation of WriteBatch
*
**************************************/
RocksEngine::RocksEngine(GraphSpaceID spaceId,
const std::string& dataPath,
std::shared_ptr<rocksdb::MergeOperator> mergeOp,
std::shared_ptr<rocksdb::CompactionFilterFactory> cfFactory)
: KVEngine(spaceId)
, dataPath_(folly::stringPrintf("%s/nebula/%d", dataPath.c_str(), spaceId)) {
auto path = folly::stringPrintf("%s/data", dataPath_.c_str());
if (FileUtils::fileType(path.c_str()) == FileType::NOTEXIST) {
if (!FileUtils::makeDir(path)) {
LOG(FATAL) << "makeDir " << path << " failed";
}
}
if (FileUtils::fileType(path.c_str()) != FileType::DIRECTORY) {
LOG(FATAL) << path << " is not directory";
}
rocksdb::Options options;
rocksdb::DB* db = nullptr;
rocksdb::Status status = initRocksdbOptions(options);
CHECK(status.ok());
if (mergeOp != nullptr) {
options.merge_operator = mergeOp;
}
if (cfFactory != nullptr) {
options.compaction_filter_factory = cfFactory;
}
status = rocksdb::DB::Open(options, path, &db);
if (status.IsNoSpace()) {
LOG(WARNING) << status.ToString();
} else if (status.IsCorruption() || status.IsIncomplete() || status.IsTryAgain()) {
if (FLAGS_enable_auto_repair && !status.ok()) {
LOG(ERROR) << "try repair db. [" << status.ToString() << "] -> ["
<< rocksdb::RepairDB(path, options).ToString() << "]";
status = rocksdb::DB::Open(options, path, &db);
}
CHECK(status.ok()) << status.ToString();
} else {
CHECK(status.ok()) << status.ToString();
}
db_.reset(db);
partsNum_ = allParts().size();
LOG(INFO) << "open rocksdb on " << path;
}
void RocksEngine::stop() {
if (db_) {
// Because we trigger compaction in WebService, we need to stop all background work
// before we stop HttpServer.
rocksdb::CancelAllBackgroundWork(db_.get(), true);
}
}
std::unique_ptr<WriteBatch> RocksEngine::startBatchWrite() {
return std::make_unique<RocksWriteBatch>();
}
ResultCode RocksEngine::commitBatchWrite(std::unique_ptr<WriteBatch> batch,
bool disableWAL,
bool sync) {
rocksdb::WriteOptions options;
options.disableWAL = disableWAL;
options.sync = sync;
auto* b = static_cast<RocksWriteBatch*>(batch.get());
rocksdb::Status status = db_->Write(options, b->data());
if (status.ok()) {
return ResultCode::SUCCEEDED;
}
LOG(ERROR) << "Write into rocksdb failed because of " << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
ResultCode RocksEngine::get(const std::string& key, std::string* value) {
rocksdb::ReadOptions options;
rocksdb::Status status = db_->Get(options, rocksdb::Slice(key), value);
if (status.ok()) {
return ResultCode::SUCCEEDED;
} else if (status.IsNotFound()) {
VLOG(3) << "Get: " << key << " Not Found";
return ResultCode::ERR_KEY_NOT_FOUND;
} else {
VLOG(3) << "Get Failed: " << key << " " << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
}
std::vector<Status> RocksEngine::multiGet(const std::vector<std::string>& keys,
std::vector<std::string>* values) {
rocksdb::ReadOptions options;
std::vector<rocksdb::Slice> slices;
for (size_t index = 0; index < keys.size(); index++) {
slices.emplace_back(keys[index]);
}
auto status = db_->MultiGet(options, slices, values);
std::vector<Status> ret;
std::transform(status.begin(), status.end(), std::back_inserter(ret),
[] (const auto& s) {
if (s.ok()) {
return Status::OK();
} else if (s.IsNotFound()) {
return Status::KeyNotFound();
} else {
return Status::Error();
}
});
return ret;
}
ResultCode RocksEngine::range(const std::string& start,
const std::string& end,
std::unique_ptr<KVIterator>* storageIter) {
rocksdb::ReadOptions options;
rocksdb::Iterator* iter = db_->NewIterator(options);
if (iter) {
iter->Seek(rocksdb::Slice(start));
}
storageIter->reset(new RocksRangeIter(iter, start, end));
return ResultCode::SUCCEEDED;
}
ResultCode RocksEngine::prefix(const std::string& prefix,
std::unique_ptr<KVIterator>* storageIter) {
rocksdb::ReadOptions options;
rocksdb::Iterator* iter = db_->NewIterator(options);
if (iter) {
iter->Seek(rocksdb::Slice(prefix));
}
storageIter->reset(new RocksPrefixIter(iter, prefix));
return ResultCode::SUCCEEDED;
}
ResultCode RocksEngine::rangeWithPrefix(const std::string& start,
const std::string& prefix,
std::unique_ptr<KVIterator>* storageIter) {
rocksdb::ReadOptions options;
rocksdb::Iterator* iter = db_->NewIterator(options);
if (iter) {
iter->Seek(rocksdb::Slice(start));
}
storageIter->reset(new RocksPrefixIter(iter, prefix));
return ResultCode::SUCCEEDED;
}
ResultCode RocksEngine::put(std::string key, std::string value) {
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
rocksdb::Status status = db_->Put(options, key, value);
if (status.ok()) {
return ResultCode::SUCCEEDED;
} else {
VLOG(3) << "Put Failed: " << key << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
}
ResultCode RocksEngine::multiPut(std::vector<KV> keyValues) {
rocksdb::WriteBatch updates(FLAGS_rocksdb_batch_size);
for (size_t i = 0; i < keyValues.size(); i++) {
updates.Put(keyValues[i].first, keyValues[i].second);
}
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
rocksdb::Status status = db_->Write(options, &updates);
if (status.ok()) {
return ResultCode::SUCCEEDED;
} else {
VLOG(3) << "MultiPut Failed: " << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
}
ResultCode RocksEngine::remove(const std::string& key) {
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
auto status = db_->Delete(options, key);
if (status.ok()) {
return ResultCode::SUCCEEDED;
} else {
VLOG(3) << "Remove Failed: " << key << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
}
ResultCode RocksEngine::multiRemove(std::vector<std::string> keys) {
rocksdb::WriteBatch deletes(FLAGS_rocksdb_batch_size);
for (size_t i = 0; i < keys.size(); i++) {
deletes.Delete(keys[i]);
}
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
rocksdb::Status status = db_->Write(options, &deletes);
if (status.ok()) {
return ResultCode::SUCCEEDED;
} else {
VLOG(3) << "MultiRemove Failed: " << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
}
ResultCode RocksEngine::removeRange(const std::string& start,
const std::string& end) {
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
auto status = db_->DeleteRange(options, db_->DefaultColumnFamily(), start, end);
if (status.ok()) {
return ResultCode::SUCCEEDED;
} else {
VLOG(3) << "RemoveRange Failed: " << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
}
std::string RocksEngine::partKey(PartitionID partId) {
return NebulaKeyUtils::systemPartKey(partId);
}
void RocksEngine::addPart(PartitionID partId) {
auto ret = put(partKey(partId), "");
if (ret == ResultCode::SUCCEEDED) {
partsNum_++;
CHECK_GE(partsNum_, 0);
}
}
void RocksEngine::removePart(PartitionID partId) {
rocksdb::WriteOptions options;
options.disableWAL = FLAGS_rocksdb_disable_wal;
auto status = db_->Delete(options, partKey(partId));
if (status.ok()) {
partsNum_--;
CHECK_GE(partsNum_, 0);
}
}
std::vector<PartitionID> RocksEngine::allParts() {
std::unique_ptr<KVIterator> iter;
static const std::string prefixStr = NebulaKeyUtils::systemPrefix();
CHECK_EQ(ResultCode::SUCCEEDED, this->prefix(prefixStr, &iter));
std::vector<PartitionID> parts;
while (iter->valid()) {
auto key = iter->key();
CHECK_EQ(key.size(), sizeof(PartitionID) + sizeof(NebulaSystemKeyType));
PartitionID partId = *reinterpret_cast<const PartitionID*>(key.data());
if (!NebulaKeyUtils::isSystemPart(key)) {
VLOG(3) << "Skip: " << std::bitset<32>(partId);
iter->next();
continue;
}
partId = partId >> 8;
parts.emplace_back(partId);
iter->next();
}
return parts;
}
int32_t RocksEngine::totalPartsNum() {
return partsNum_;
}
ResultCode RocksEngine::ingest(const std::vector<std::string>& files) {
rocksdb::IngestExternalFileOptions options;
options.move_files = true;
options.failed_move_fall_back_to_copy = true;
options.verify_checksums_before_ingest = true;
options.verify_checksums_readahead_size = 2U << 20;
options.write_global_seqno = false;
options.snapshot_consistency = true;
options.allow_global_seqno = true;
rocksdb::Status status = db_->IngestExternalFile(files, options);
if (status.ok()) {
return ResultCode::SUCCEEDED;
} else {
LOG(ERROR) << "Ingest Failed: " << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
}
ResultCode RocksEngine::setOption(const std::string& configKey,
const std::string& configValue) {
std::unordered_map<std::string, std::string> configOptions = {
{configKey, configValue}
};
rocksdb::Status status = db_->SetOptions(configOptions);
if (status.ok()) {
LOG(INFO) << "SetOption Succeeded: " << configKey << ":" << configValue;
return ResultCode::SUCCEEDED;
} else {
LOG(ERROR) << "SetOption Failed: " << configKey << ":" << configValue;
return ResultCode::ERR_INVALID_ARGUMENT;
}
}
ResultCode RocksEngine::setDBOption(const std::string& configKey,
const std::string& configValue) {
std::unordered_map<std::string, std::string> configOptions = {
{configKey, configValue}
};
rocksdb::Status status = db_->SetDBOptions(configOptions);
if (status.ok()) {
LOG(INFO) << "SetDBOption Succeeded: " << configKey << ":" << configValue;
return ResultCode::SUCCEEDED;
} else {
LOG(ERROR) << "SetDBOption Failed: " << configKey << ":" << configValue;
return ResultCode::ERR_INVALID_ARGUMENT;
}
}
ResultCode RocksEngine::compact() {
rocksdb::CompactRangeOptions options;
rocksdb::Status status = db_->CompactRange(options, nullptr, nullptr);
if (status.ok()) {
return ResultCode::SUCCEEDED;
} else {
LOG(ERROR) << "CompactAll Failed: " << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
}
ResultCode RocksEngine::flush() {
rocksdb::FlushOptions options;
rocksdb::Status status = db_->Flush(options);
if (status.ok()) {
return ResultCode::SUCCEEDED;
} else {
LOG(ERROR) << "Flush Failed: " << status.ToString();
return ResultCode::ERR_UNKNOWN;
}
}
ResultCode RocksEngine::createCheckpoint(const std::string& name) {
LOG(INFO) << "Begin checkpoint : " << dataPath_;
/*
* The default checkpoint directory structure is :
* |--FLAGS_data_path
* |----nebula
* |------space1
* |--------data
* |--------wal
* |--------checkpoints
* |----------snapshot1
* |------------data
* |------------wal
* |----------snapshot2
* |----------snapshot3
*
*/
auto checkpointPath = folly::stringPrintf("%s/checkpoints/%s/data",
dataPath_.c_str(), name.c_str());
LOG(INFO) << "Target checkpoint path : " << checkpointPath;
if (fs::FileUtils::exist(checkpointPath)) {
LOG(ERROR) << "The snapshot file already exists: " << checkpointPath;
return ResultCode::ERR_CHECKPOINT_ERROR;
}
auto parent = checkpointPath.substr(0, checkpointPath.rfind('/'));
if (!FileUtils::exist(parent)) {
if (!FileUtils::makeDir(parent)) {
LOG(ERROR) << "Make dir " << parent << " failed";
return ResultCode::ERR_UNKNOWN;
}
}
rocksdb::Checkpoint* checkpoint;
rocksdb::Status status = rocksdb::Checkpoint::Create(db_.get(), &checkpoint);
std::unique_ptr<rocksdb::Checkpoint> cp(checkpoint);
if (!status.ok()) {
LOG(ERROR) << "Init checkpoint Failed: " << status.ToString();
return ResultCode::ERR_CHECKPOINT_ERROR;
}
status = cp->CreateCheckpoint(checkpointPath, 0);
if (!status.ok()) {
LOG(ERROR) << "Create checkpoint Failed: " << status.ToString();
return ResultCode::ERR_CHECKPOINT_ERROR;
}
return ResultCode::SUCCEEDED;
}
} // namespace kvstore
} // namespace nebula
| 1 | 29,932 | Why not return success? | vesoft-inc-nebula | cpp |
@@ -3,12 +3,19 @@ namespace Psalm\Report;
use Psalm\Config;
use Psalm\Internal\Analyzer\DataFlowNodeData;
+use Psalm\Internal\Analyzer\IssueData;
use Psalm\Report;
+use function get_cfg_var;
+use function ini_get;
+use function strtr;
use function substr;
class ConsoleReport extends Report
{
+ /** @var string|null */
+ private $linkFormat;
+
public function create(): string
{
$output = ''; | 1 | <?php
namespace Psalm\Report;
use Psalm\Config;
use Psalm\Internal\Analyzer\DataFlowNodeData;
use Psalm\Report;
use function substr;
class ConsoleReport extends Report
{
public function create(): string
{
$output = '';
foreach ($this->issues_data as $issue_data) {
$output .= $this->format($issue_data) . "\n" . "\n";
}
return $output;
}
private function format(\Psalm\Internal\Analyzer\IssueData $issue_data): string
{
$issue_string = '';
$is_error = $issue_data->severity === Config::REPORT_ERROR;
if ($is_error) {
$issue_string .= ($this->use_color ? "\e[0;31mERROR\e[0m" : 'ERROR');
} else {
$issue_string .= 'INFO';
}
$issue_reference = $issue_data->link ? ' (see ' . $issue_data->link . ')' : '';
$issue_string .= ': ' . $issue_data->type
. ' - ' . $issue_data->file_name . ':' . $issue_data->line_from . ':' . $issue_data->column_from
. ' - ' . $issue_data->message . $issue_reference . "\n";
if ($issue_data->taint_trace) {
$issue_string .= $this->getTaintSnippets($issue_data->taint_trace);
} elseif ($this->show_snippet) {
$snippet = $issue_data->snippet;
if (!$this->use_color) {
$issue_string .= $snippet;
} else {
$selection_start = $issue_data->from - $issue_data->snippet_from;
$selection_length = $issue_data->to - $issue_data->from;
$issue_string .= substr($snippet, 0, $selection_start)
. ($is_error ? "\e[97;41m" : "\e[30;47m") . substr($snippet, $selection_start, $selection_length)
. "\e[0m" . substr($snippet, $selection_length + $selection_start) . "\n";
}
}
if ($issue_data->other_references) {
if ($this->show_snippet) {
$issue_string .= "\n";
}
$issue_string .= $this->getTaintSnippets($issue_data->other_references);
}
return $issue_string;
}
/**
* @param non-empty-list<DataFlowNodeData|array{label: string, entry_path_type: string}> $taint_trace
*/
private function getTaintSnippets(array $taint_trace) : string
{
$snippets = '';
foreach ($taint_trace as $node_data) {
if ($node_data instanceof DataFlowNodeData) {
$snippets .= ' ' . $node_data->label
. ' - ' . $node_data->file_name
. ':' . $node_data->line_from
. ':' . $node_data->column_from . "\n";
if ($this->show_snippet) {
$snippet = $node_data->snippet;
if (!$this->use_color) {
$snippets .= $snippet . "\n\n";
} else {
$selection_start = $node_data->from - $node_data->snippet_from;
$selection_length = $node_data->to - $node_data->from;
$snippets .= substr($snippet, 0, $selection_start)
. "\e[30;47m" . substr($snippet, $selection_start, $selection_length)
. "\e[0m" . substr($snippet, $selection_length + $selection_start) . "\n\n";
}
}
} else {
$snippets .= ' ' . $node_data['label'] . "\n";
$snippets .= ' <no known location>' . "\n\n";
}
}
return $snippets;
}
}
| 1 | 11,608 | Please use snake_case for properties to keep it consistent with the rest of the codebase. | vimeo-psalm | php |
@@ -304,7 +304,9 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
}
s.metrics.HandledStreamCount.Inc()
- s.logger.Infof("successfully connected to peer (inbound) %s", i.BzzAddress.ShortString())
+ s.logger.Debugf("successfully connected to peer (inbound) %s", i.BzzAddress.ShortString())
+ s.logger.Infof("successfully connected to peer (inbound)")
+
})
h.Network().SetConnHandler(func(_ network.Conn) { | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package libp2p
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"net"
"sync"
"time"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/bzz"
beecrypto "github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/libp2p/internal/blocklist"
"github.com/ethersphere/bee/pkg/p2p/libp2p/internal/breaker"
handshake "github.com/ethersphere/bee/pkg/p2p/libp2p/internal/handshake"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/libp2p/go-libp2p"
autonat "github.com/libp2p/go-libp2p-autonat"
crypto "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
libp2ppeer "github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/peerstore"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
tptu "github.com/libp2p/go-libp2p-transport-upgrader"
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
"github.com/libp2p/go-tcp-transport"
ws "github.com/libp2p/go-ws-transport"
ma "github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multistream"
)
var (
_ p2p.Service = (*Service)(nil)
_ p2p.DebugService = (*Service)(nil)
)
type Service struct {
ctx context.Context
host host.Host
natManager basichost.NATManager
natAddrResolver *staticAddressResolver
libp2pPeerstore peerstore.Peerstore
metrics metrics
networkID uint64
handshakeService *handshake.Service
addressbook addressbook.Putter
peers *peerRegistry
connectionBreaker breaker.Interface
blocklist *blocklist.Blocklist
protocols []p2p.ProtocolSpec
notifier p2p.Notifier
logger logging.Logger
tracer *tracing.Tracer
protocolsmu sync.RWMutex
}
type Options struct {
PrivateKey *ecdsa.PrivateKey
NATAddr string
EnableWS bool
EnableQUIC bool
Standalone bool
LightNode bool
WelcomeMessage string
}
func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay swarm.Address, addr string, ab addressbook.Putter, storer storage.StateStorer, logger logging.Logger, tracer *tracing.Tracer, o Options) (*Service, error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, fmt.Errorf("address: %w", err)
}
ip4Addr := "0.0.0.0"
ip6Addr := "::1"
if host != "" {
ip := net.ParseIP(host)
if ip4 := ip.To4(); ip4 != nil {
ip4Addr = ip4.String()
ip6Addr = ""
} else if ip6 := ip.To16(); ip6 != nil {
ip6Addr = ip6.String()
ip4Addr = ""
}
}
var listenAddrs []string
if ip4Addr != "" {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s", ip4Addr, port))
if o.EnableWS {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s/ws", ip4Addr, port))
}
if o.EnableQUIC {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/udp/%s/quic", ip4Addr, port))
}
}
if ip6Addr != "" {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s", ip6Addr, port))
if o.EnableWS {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s/ws", ip6Addr, port))
}
if o.EnableQUIC {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/udp/%s/quic", ip6Addr, port))
}
}
security := libp2p.DefaultSecurity
libp2pPeerstore := pstoremem.NewPeerstore()
var natManager basichost.NATManager
opts := []libp2p.Option{
libp2p.ListenAddrStrings(listenAddrs...),
security,
// Use dedicated peerstore instead the global DefaultPeerstore
libp2p.Peerstore(libp2pPeerstore),
}
if o.NATAddr == "" {
opts = append(opts,
libp2p.NATManager(func(n network.Network) basichost.NATManager {
natManager = basichost.NewNATManager(n)
return natManager
}),
)
}
if o.PrivateKey != nil {
opts = append(opts,
libp2p.Identity((*crypto.Secp256k1PrivateKey)(o.PrivateKey)),
)
}
transports := []libp2p.Option{
libp2p.Transport(func(u *tptu.Upgrader) *tcp.TcpTransport {
t := tcp.NewTCPTransport(u)
t.DisableReuseport = true
return t
}),
}
if o.EnableWS {
transports = append(transports, libp2p.Transport(ws.New))
}
if o.EnableQUIC {
transports = append(transports, libp2p.Transport(libp2pquic.NewTransport))
}
if o.Standalone {
opts = append(opts, libp2p.NoListenAddrs)
}
opts = append(opts, transports...)
h, err := libp2p.New(ctx, opts...)
if err != nil {
return nil, err
}
// Support same non default security and transport options as
// original host.
dialer, err := libp2p.New(ctx, append(transports, security)...)
if err != nil {
return nil, err
}
// If you want to help other peers to figure out if they are behind
// NATs, you can launch the server-side of AutoNAT too (AutoRelay
// already runs the client)
if _, err = autonat.New(ctx, h, autonat.EnableService(dialer.Network())); err != nil {
return nil, fmt.Errorf("autonat: %w", err)
}
var advertisableAddresser handshake.AdvertisableAddressResolver
var natAddrResolver *staticAddressResolver
if o.NATAddr == "" {
advertisableAddresser = &UpnpAddressResolver{
host: h,
}
} else {
natAddrResolver, err = newStaticAddressResolver(o.NATAddr)
if err != nil {
return nil, fmt.Errorf("static nat: %w", err)
}
advertisableAddresser = natAddrResolver
}
handshakeService, err := handshake.New(signer, advertisableAddresser, overlay, networkID, o.LightNode, o.WelcomeMessage, logger)
if err != nil {
return nil, fmt.Errorf("handshake service: %w", err)
}
peerRegistry := newPeerRegistry()
s := &Service{
ctx: ctx,
host: h,
natManager: natManager,
natAddrResolver: natAddrResolver,
handshakeService: handshakeService,
libp2pPeerstore: libp2pPeerstore,
metrics: newMetrics(),
networkID: networkID,
peers: peerRegistry,
addressbook: ab,
blocklist: blocklist.NewBlocklist(storer),
logger: logger,
tracer: tracer,
connectionBreaker: breaker.NewBreaker(breaker.Options{}), // use default options
}
peerRegistry.setDisconnecter(s)
// Construct protocols.
id := protocol.ID(p2p.NewSwarmStreamName(handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName))
matcher, err := s.protocolSemverMatcher(id)
if err != nil {
return nil, fmt.Errorf("protocol version match %s: %w", id, err)
}
// handshake
s.host.SetStreamHandlerMatch(id, matcher, func(stream network.Stream) {
peerID := stream.Conn().RemotePeer()
handshakeStream := NewStream(stream)
i, err := s.handshakeService.Handle(ctx, handshakeStream, stream.Conn().RemoteMultiaddr(), peerID)
if err != nil {
s.logger.Debugf("handshake: handle %s: %v", peerID, err)
s.logger.Errorf("unable to handshake with peer %v", peerID)
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(peerID)
return
}
blocked, err := s.blocklist.Exists(i.BzzAddress.Overlay)
if err != nil {
s.logger.Debugf("blocklisting: exists %s: %v", peerID, err)
s.logger.Errorf("internal error while connecting with peer %s", peerID)
_ = s.host.Network().ClosePeer(peerID)
return
}
if blocked {
s.logger.Errorf("blocked connection from blocklisted peer %s", peerID)
_ = s.host.Network().ClosePeer(peerID)
return
}
if exists := s.peers.addIfNotExists(stream.Conn(), i.BzzAddress.Overlay); exists {
if err = handshakeStream.FullClose(); err != nil {
s.logger.Debugf("handshake: could not close stream %s: %v", peerID, err)
s.logger.Errorf("unable to handshake with peer %v", peerID)
_ = s.Disconnect(i.BzzAddress.Overlay)
}
return
}
if err = handshakeStream.FullClose(); err != nil {
s.logger.Debugf("handshake: could not close stream %s: %v", peerID, err)
s.logger.Errorf("unable to handshake with peer %v", peerID)
_ = s.Disconnect(i.BzzAddress.Overlay)
return
}
err = s.addressbook.Put(i.BzzAddress.Overlay, *i.BzzAddress)
if err != nil {
s.logger.Debugf("handshake: addressbook put error %s: %v", peerID, err)
s.logger.Errorf("unable to persist peer %v", peerID)
_ = s.Disconnect(i.BzzAddress.Overlay)
return
}
peer := p2p.Peer{Address: i.BzzAddress.Overlay}
s.protocolsmu.RLock()
for _, tn := range s.protocols {
if tn.ConnectIn != nil {
if err := tn.ConnectIn(ctx, peer); err != nil {
s.logger.Debugf("connectIn: protocol: %s, version:%s, peer: %s: %v", tn.Name, tn.Version, i.BzzAddress.Overlay, err)
}
}
}
s.protocolsmu.RUnlock()
if s.notifier != nil {
if err := s.notifier.Connected(ctx, peer); err != nil {
s.logger.Debugf("notifier.Connected: peer: %s: %v", i.BzzAddress.Overlay, err)
}
}
s.metrics.HandledStreamCount.Inc()
s.logger.Infof("successfully connected to peer (inbound) %s", i.BzzAddress.ShortString())
})
h.Network().SetConnHandler(func(_ network.Conn) {
s.metrics.HandledConnectionCount.Inc()
})
h.Network().Notify(peerRegistry) // update peer registry on network events
h.Network().Notify(s.handshakeService) // update handshake service on network events
return s, nil
}
func (s *Service) SetNotifier(n p2p.Notifier) {
s.notifier = n
}
func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) {
for _, ss := range p.StreamSpecs {
ss := ss
id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name))
matcher, err := s.protocolSemverMatcher(id)
if err != nil {
return fmt.Errorf("protocol version match %s: %w", id, err)
}
s.host.SetStreamHandlerMatch(id, matcher, func(streamlibp2p network.Stream) {
peerID := streamlibp2p.Conn().RemotePeer()
overlay, found := s.peers.overlay(peerID)
if !found {
_ = s.Disconnect(overlay)
s.logger.Debugf("overlay address for peer %q not found", peerID)
return
}
stream := newStream(streamlibp2p)
// exchange headers
if err := handleHeaders(ss.Headler, stream); err != nil {
s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: handle headers: %v", p.Name, p.Version, ss.Name, overlay, err)
_ = stream.Reset()
return
}
ctx, cancel := context.WithCancel(s.ctx)
s.peers.addStream(peerID, streamlibp2p, cancel)
defer s.peers.removeStream(peerID, streamlibp2p)
// tracing: get span tracing context and add it to the context
// silently ignore if the peer is not providing tracing
ctx, err := s.tracer.WithContextFromHeaders(ctx, stream.Headers())
if err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: get tracing context: %v", p.Name, p.Version, ss.Name, overlay, err)
return
}
logger := tracing.NewLoggerWithTraceID(ctx, s.logger)
s.metrics.HandledStreamCount.Inc()
if err := ss.Handler(ctx, p2p.Peer{Address: overlay}, stream); err != nil {
var de *p2p.DisconnectError
if errors.As(err, &de) {
_ = s.Disconnect(overlay)
}
var bpe *p2p.BlockPeerError
if errors.As(err, &bpe) {
if err := s.blocklist.Add(overlay, bpe.Duration()); err != nil {
s.logger.Debugf("blocklist: could blocklist peer %s: %v", peerID, err)
s.logger.Errorf("unable to blocklist peer %v", peerID)
_ = s.Disconnect(overlay)
}
s.logger.Tracef("blocklisted a peer %s", peerID)
_ = s.Disconnect(overlay)
}
logger.Debugf("could not handle protocol %s/%s: stream %s: peer %s: error: %v", p.Name, p.Version, ss.Name, overlay, err)
return
}
})
}
s.protocolsmu.Lock()
s.protocols = append(s.protocols, p)
s.protocolsmu.Unlock()
return nil
}
func (s *Service) Addresses() (addreses []ma.Multiaddr, err error) {
for _, addr := range s.host.Addrs() {
a, err := buildUnderlayAddress(addr, s.host.ID())
if err != nil {
return nil, err
}
addreses = append(addreses, a)
}
if s.natAddrResolver != nil && len(addreses) > 0 {
a, err := s.natAddrResolver.Resolve(addreses[0])
if err != nil {
return nil, err
}
addreses = append(addreses, a)
}
return addreses, nil
}
func (s *Service) NATManager() basichost.NATManager {
return s.natManager
}
func (s *Service) Blocklist(overlay swarm.Address, duration time.Duration) error {
if err := s.blocklist.Add(overlay, duration); err != nil {
s.logger.Debugf("blocklist: blocklist peer %s: %v", overlay, err)
_ = s.Disconnect(overlay)
return err
}
_ = s.Disconnect(overlay)
return nil
}
func buildHostAddress(peerID libp2ppeer.ID) (ma.Multiaddr, error) {
return ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", peerID.Pretty()))
}
func buildUnderlayAddress(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) {
// Build host multiaddress
hostAddr, err := buildHostAddress(peerID)
if err != nil {
return nil, err
}
return addr.Encapsulate(hostAddr), nil
}
func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error) {
// Extract the peer ID from the multiaddr.
info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
if err != nil {
return nil, fmt.Errorf("addr from p2p: %w", err)
}
hostAddr, err := buildHostAddress(info.ID)
if err != nil {
return nil, fmt.Errorf("build host address: %w", err)
}
remoteAddr := addr.Decapsulate(hostAddr)
if overlay, found := s.peers.isConnected(info.ID, remoteAddr); found {
address = &bzz.Address{
Overlay: overlay,
Underlay: addr,
}
return address, p2p.ErrAlreadyConnected
}
if err := s.connectionBreaker.Execute(func() error { return s.host.Connect(ctx, *info) }); err != nil {
if errors.Is(err, breaker.ErrClosed) {
return nil, p2p.NewConnectionBackoffError(err, s.connectionBreaker.ClosedUntil())
}
return nil, err
}
stream, err := s.newStreamForPeerID(ctx, info.ID, handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName)
if err != nil {
_ = s.host.Network().ClosePeer(info.ID)
return nil, fmt.Errorf("connect new stream: %w", err)
}
handshakeStream := NewStream(stream)
i, err := s.handshakeService.Handshake(ctx, handshakeStream, stream.Conn().RemoteMultiaddr(), stream.Conn().RemotePeer())
if err != nil {
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(info.ID)
return nil, fmt.Errorf("handshake: %w", err)
}
blocked, err := s.blocklist.Exists(i.BzzAddress.Overlay)
if err != nil {
s.logger.Debugf("blocklisting: exists %s: %v", info.ID, err)
s.logger.Errorf("internal error while connecting with peer %s", info.ID)
_ = s.host.Network().ClosePeer(info.ID)
return nil, fmt.Errorf("peer blocklisted")
}
if blocked {
s.logger.Errorf("blocked connection from blocklisted peer %s", info.ID)
_ = s.host.Network().ClosePeer(info.ID)
return nil, fmt.Errorf("peer blocklisted")
}
if exists := s.peers.addIfNotExists(stream.Conn(), i.BzzAddress.Overlay); exists {
if err := handshakeStream.FullClose(); err != nil {
_ = s.Disconnect(i.BzzAddress.Overlay)
return nil, fmt.Errorf("peer exists, full close: %w", err)
}
return i.BzzAddress, nil
}
if err := handshakeStream.FullClose(); err != nil {
_ = s.Disconnect(i.BzzAddress.Overlay)
return nil, fmt.Errorf("connect full close %w", err)
}
err = s.addressbook.Put(i.BzzAddress.Overlay, *i.BzzAddress)
if err != nil {
_ = s.Disconnect(i.BzzAddress.Overlay)
return nil, fmt.Errorf("storing bzz address: %w", err)
}
s.protocolsmu.RLock()
for _, tn := range s.protocols {
if tn.ConnectOut != nil {
if err := tn.ConnectOut(ctx, p2p.Peer{Address: i.BzzAddress.Overlay}); err != nil {
s.logger.Debugf("connectOut: protocol: %s, version:%s, peer: %s: %v", tn.Name, tn.Version, i.BzzAddress.Overlay, err)
}
}
}
s.protocolsmu.RUnlock()
s.metrics.CreatedConnectionCount.Inc()
s.logger.Infof("successfully connected to peer (outbound) %s", i.BzzAddress.ShortString())
return i.BzzAddress, nil
}
func (s *Service) Disconnect(overlay swarm.Address) error {
found, peerID := s.peers.remove(overlay)
if !found {
return p2p.ErrPeerNotFound
}
_ = s.host.Network().ClosePeer(peerID)
peer := p2p.Peer{Address: overlay}
s.protocolsmu.RLock()
for _, tn := range s.protocols {
if tn.DisconnectOut != nil {
if err := tn.DisconnectOut(peer); err != nil {
s.logger.Debugf("disconnectOut: protocol: %s, version:%s, peer: %s: %v", tn.Name, tn.Version, overlay, err)
}
}
}
s.protocolsmu.RUnlock()
if s.notifier != nil {
s.notifier.Disconnected(peer)
}
return nil
}
// disconnected is a registered peer registry event
func (s *Service) disconnected(address swarm.Address) {
peer := p2p.Peer{Address: address}
s.protocolsmu.RLock()
for _, tn := range s.protocols {
if tn.DisconnectIn != nil {
if err := tn.DisconnectIn(peer); err != nil {
s.logger.Debugf("disconnectIn: protocol: %s, version:%s, peer: %s: %v", tn.Name, tn.Version, address.String(), err)
}
}
}
s.protocolsmu.RUnlock()
if s.notifier != nil {
s.notifier.Disconnected(peer)
}
}
func (s *Service) Peers() []p2p.Peer {
return s.peers.peers()
}
func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers p2p.Headers, protocolName, protocolVersion, streamName string) (p2p.Stream, error) {
peerID, found := s.peers.peerID(overlay)
if !found {
return nil, p2p.ErrPeerNotFound
}
streamlibp2p, err := s.newStreamForPeerID(ctx, peerID, protocolName, protocolVersion, streamName)
if err != nil {
return nil, fmt.Errorf("new stream for peerid: %w", err)
}
stream := newStream(streamlibp2p)
// tracing: add span context header
if headers == nil {
headers = make(p2p.Headers)
}
if err := s.tracer.AddContextHeader(ctx, headers); err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
return nil, err
}
// exchange headers
if err := sendHeaders(ctx, headers, stream); err != nil {
_ = stream.Reset()
return nil, fmt.Errorf("send headers: %w", err)
}
return stream, nil
}
func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID, protocolName, protocolVersion, streamName string) (network.Stream, error) {
swarmStreamName := p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName)
st, err := s.host.NewStream(ctx, peerID, protocol.ID(swarmStreamName))
if err != nil {
if st != nil {
s.logger.Debug("stream experienced unexpected early close")
_ = st.Close()
}
if err == multistream.ErrNotSupported || err == multistream.ErrIncorrectVersion {
return nil, p2p.NewIncompatibleStreamError(err)
}
return nil, fmt.Errorf("create stream %q to %q: %w", swarmStreamName, peerID, err)
}
s.metrics.CreatedStreamCount.Inc()
return st, nil
}
func (s *Service) Close() error {
if err := s.libp2pPeerstore.Close(); err != nil {
return err
}
return s.host.Close()
}
// SetWelcomeMessage sets the welcome message for the handshake protocol.
func (s *Service) SetWelcomeMessage(val string) error {
return s.handshakeService.SetWelcomeMessage(val)
}
// GetWelcomeMessage returns the value of the welcome message.
func (s *Service) GetWelcomeMessage() string {
return s.handshakeService.GetWelcomeMessage()
}
| 1 | 13,648 | i don't understand why we need this change. what information is this providing when running in less than Debug loglevel? I prefer to revert this | ethersphere-bee | go |
@@ -170,6 +170,9 @@ public class ExecutionFlowDaoTest {
final List<ExecutableFlow> flows = this.executionFlowDao
.fetchRecentlyFinishedFlows(RECENTLY_FINISHED_LIFETIME);
assertThat(flows.size()).isEqualTo(0);
+
+ //Restore the clock
+ DateTimeUtils.setCurrentMillisOffset(0);
}
@Test | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import azkaban.db.DatabaseOperator;
import azkaban.test.Utils;
import azkaban.test.executions.ExecutionsTestUtil;
import azkaban.utils.Pair;
import azkaban.utils.Props;
import azkaban.utils.TestUtils;
import java.io.File;
import java.sql.SQLException;
import java.time.Duration;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.joda.time.DateTimeUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
public class ExecutionFlowDaoTest {
private static final Duration RECENTLY_FINISHED_LIFETIME = Duration.ofMinutes(1);
private static final Duration FLOW_FINISHED_TIME = Duration.ofMinutes(2);
private static DatabaseOperator dbOperator;
private ExecutionFlowDao executionFlowDao;
private ExecutorDao executorDao;
private AssignExecutorDao assignExecutor;
private FetchActiveFlowDao fetchActiveFlowDao;
private ExecutionJobDao executionJobDao;
@BeforeClass
public static void setUp() throws Exception {
dbOperator = Utils.initTestDB();
}
@AfterClass
public static void destroyDB() throws Exception {
try {
dbOperator.update("DROP ALL OBJECTS");
dbOperator.update("SHUTDOWN");
} catch (final SQLException e) {
e.printStackTrace();
}
}
@Before
public void setup() {
this.executionFlowDao = new ExecutionFlowDao(dbOperator);
this.executorDao = new ExecutorDao(dbOperator);
this.assignExecutor = new AssignExecutorDao(dbOperator, this.executorDao);
this.fetchActiveFlowDao = new FetchActiveFlowDao(dbOperator);
this.executionJobDao = new ExecutionJobDao(dbOperator);
}
@After
public void clearDB() {
try {
dbOperator.update("DELETE FROM execution_flows");
dbOperator.update("DELETE FROM executors");
} catch (final SQLException e) {
e.printStackTrace();
}
}
private ExecutableFlow createTestFlow() throws Exception {
return TestUtils.createTestExecutableFlow("exectest1", "exec1");
}
@Test
public void testUploadAndFetchExecutionFlows() throws Exception {
final ExecutableFlow flow = createTestFlow();
this.executionFlowDao.uploadExecutableFlow(flow);
final ExecutableFlow fetchFlow =
this.executionFlowDao.fetchExecutableFlow(flow.getExecutionId());
assertThat(flow).isNotSameAs(fetchFlow);
assertTwoFlowSame(flow, fetchFlow);
}
@Test
public void testUpdateExecutableFlow() throws Exception {
final ExecutableFlow flow = createTestFlow();
this.executionFlowDao.uploadExecutableFlow(flow);
final ExecutableFlow fetchFlow =
this.executionFlowDao.fetchExecutableFlow(flow.getExecutionId());
fetchFlow.setEndTime(System.currentTimeMillis());
fetchFlow.setStatus(Status.SUCCEEDED);
this.executionFlowDao.updateExecutableFlow(fetchFlow);
final ExecutableFlow fetchFlow2 =
this.executionFlowDao.fetchExecutableFlow(flow.getExecutionId());
assertTwoFlowSame(fetchFlow, fetchFlow2);
}
@Test
public void fetchFlowHistory() throws Exception {
final ExecutableFlow flow = createTestFlow();
this.executionFlowDao.uploadExecutableFlow(flow);
final List<ExecutableFlow> flowList1 = this.executionFlowDao.fetchFlowHistory(0, 2);
assertThat(flowList1.size()).isEqualTo(1);
final List<ExecutableFlow> flowList2 = this.executionFlowDao
.fetchFlowHistory(flow.getProjectId(), flow.getId(), 0, 2);
assertThat(flowList2.size()).isEqualTo(1);
final ExecutableFlow fetchFlow =
this.executionFlowDao.fetchExecutableFlow(flow.getExecutionId());
assertTwoFlowSame(flowList1.get(0), flowList2.get(0));
assertTwoFlowSame(flowList1.get(0), fetchFlow);
}
@Test
public void testFetchRecentlyFinishedFlows() throws Exception {
final ExecutableFlow flow1 = createTestFlow();
this.executionFlowDao.uploadExecutableFlow(flow1);
flow1.setStatus(Status.SUCCEEDED);
flow1.setEndTime(System.currentTimeMillis());
this.executionFlowDao.updateExecutableFlow(flow1);
//Flow just finished. Fetch recently finished flows immediately. Should get it.
final List<ExecutableFlow> flows = this.executionFlowDao.fetchRecentlyFinishedFlows(
RECENTLY_FINISHED_LIFETIME);
assertThat(flows.size()).isEqualTo(1);
assertTwoFlowSame(flow1, flows.get(0));
}
@Test
public void testFetchEmptyRecentlyFinishedFlows() throws Exception {
final ExecutableFlow flow1 = createTestFlow();
this.executionFlowDao.uploadExecutableFlow(flow1);
flow1.setStatus(Status.SUCCEEDED);
flow1.setEndTime(DateTimeUtils.currentTimeMillis());
this.executionFlowDao.updateExecutableFlow(flow1);
//Todo jamiesjc: use java8.java.time api instead of jodatime
//Mock flow finished time to be 2 min ago.
DateTimeUtils.setCurrentMillisOffset(-FLOW_FINISHED_TIME.toMillis());
flow1.setEndTime(DateTimeUtils.currentTimeMillis());
this.executionFlowDao.updateExecutableFlow(flow1);
//Fetch recently finished flows within 1 min. Should be empty.
final List<ExecutableFlow> flows = this.executionFlowDao
.fetchRecentlyFinishedFlows(RECENTLY_FINISHED_LIFETIME);
assertThat(flows.size()).isEqualTo(0);
}
@Test
public void testFetchQueuedFlows() throws Exception {
final ExecutableFlow flow = createTestFlow();
flow.setStatus(Status.PREPARING);
this.executionFlowDao.uploadExecutableFlow(flow);
final ExecutableFlow flow2 = TestUtils.createTestExecutableFlow("exectest1", "exec2");
flow2.setStatus(Status.PREPARING);
this.executionFlowDao.uploadExecutableFlow(flow2);
final List<Pair<ExecutionReference, ExecutableFlow>> fetchedQueuedFlows = this.executionFlowDao
.fetchQueuedFlows();
assertThat(fetchedQueuedFlows.size()).isEqualTo(2);
final Pair<ExecutionReference, ExecutableFlow> fetchedFlow1 = fetchedQueuedFlows.get(0);
final Pair<ExecutionReference, ExecutableFlow> fetchedFlow2 = fetchedQueuedFlows.get(1);
assertTwoFlowSame(flow, fetchedFlow1.getSecond());
assertTwoFlowSame(flow2, fetchedFlow2.getSecond());
}
@Test
public void testAssignAndUnassignExecutor() throws Exception {
final String host = "localhost";
final int port = 12345;
final Executor executor = this.executorDao.addExecutor(host, port);
final ExecutableFlow flow = TestUtils.createTestExecutableFlow("exectest1", "exec1");
this.executionFlowDao.uploadExecutableFlow(flow);
this.assignExecutor.assignExecutor(executor.getId(), flow.getExecutionId());
final Executor fetchExecutor = this.executorDao
.fetchExecutorByExecutionId(flow.getExecutionId());
assertThat(fetchExecutor).isEqualTo(executor);
this.assignExecutor.unassignExecutor(flow.getExecutionId());
assertThat(this.executorDao.fetchExecutorByExecutionId(flow.getExecutionId())).isNull();
}
/* Test exception when assigning a non-existent executor to a flow */
@Test
public void testAssignExecutorInvalidExecutor() throws Exception {
final ExecutableFlow flow = TestUtils.createTestExecutableFlow("exectest1", "exec1");
this.executionFlowDao.uploadExecutableFlow(flow);
// Since we haven't inserted any executors, 1 should be non-existent executor id.
assertThatThrownBy(
() -> this.assignExecutor.assignExecutor(1, flow.getExecutionId()))
.isInstanceOf(ExecutorManagerException.class)
.hasMessageContaining("non-existent executor");
}
/* Test exception when assigning an executor to a non-existent flow execution */
@Test
public void testAssignExecutorInvalidExecution() throws Exception {
final String host = "localhost";
final int port = 12345;
final Executor executor = this.executorDao.addExecutor(host, port);
// Make 99 a random non-existent execution id.
assertThatThrownBy(
() -> this.assignExecutor.assignExecutor(executor.getId(), 99))
.isInstanceOf(ExecutorManagerException.class)
.hasMessageContaining("non-existent execution");
}
@Test
public void testFetchActiveFlowsExecutorAssigned() throws Exception {
// Upload flow1, executor assigned
final ExecutableFlow flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1");
this.executionFlowDao.uploadExecutableFlow(flow1);
final Executor executor = this.executorDao.addExecutor("test", 1);
this.assignExecutor.assignExecutor(executor.getId(), flow1.getExecutionId());
// Upload flow2, executor not assigned
final ExecutableFlow flow2 = TestUtils.createTestExecutableFlow("exectest1", "exec2");
this.executionFlowDao.uploadExecutableFlow(flow2);
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows1 =
this.fetchActiveFlowDao.fetchActiveFlows();
assertThat(activeFlows1.containsKey(flow1.getExecutionId())).isTrue();
assertThat(activeFlows1.containsKey(flow2.getExecutionId())).isFalse();
final ExecutableFlow flow1Result =
activeFlows1.get(flow1.getExecutionId()).getSecond();
assertTwoFlowSame(flow1Result, flow1);
}
@Test
public void testFetchActiveFlowsStatusChanged() throws Exception {
final ExecutableFlow flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1");
this.executionFlowDao.uploadExecutableFlow(flow1);
final Executor executor = this.executorDao.addExecutor("test", 1);
this.assignExecutor.assignExecutor(executor.getId(), flow1.getExecutionId());
Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows1 =
this.fetchActiveFlowDao.fetchActiveFlows();
assertThat(activeFlows1.containsKey(flow1.getExecutionId())).isTrue();
// When flow status becomes SUCCEEDED/KILLED/FAILED, it should not be in active state
flow1.setStatus(Status.SUCCEEDED);
this.executionFlowDao.updateExecutableFlow(flow1);
activeFlows1 = this.fetchActiveFlowDao.fetchActiveFlows();
assertThat(activeFlows1.containsKey(flow1.getExecutionId())).isFalse();
flow1.setStatus(Status.KILLED);
this.executionFlowDao.updateExecutableFlow(flow1);
activeFlows1 = this.fetchActiveFlowDao.fetchActiveFlows();
assertThat(activeFlows1.containsKey(flow1.getExecutionId())).isFalse();
flow1.setStatus(Status.FAILED);
this.executionFlowDao.updateExecutableFlow(flow1);
activeFlows1 = this.fetchActiveFlowDao.fetchActiveFlows();
assertThat(activeFlows1.containsKey(flow1.getExecutionId())).isFalse();
}
@Test
@Ignore
// TODO jamiesjc: Active_execution_flow table is already deprecated. we should remove related
// test methods as well.
public void testFetchActiveFlowsReferenceChanged() throws Exception {
}
@Test
@Ignore
// TODO jamiesjc: Active_execution_flow table is already deprecated. we should remove related
// test methods as well.
public void testFetchActiveFlowByExecId() throws Exception {
}
@Test
public void testUploadAndFetchExecutableNode() throws Exception {
final ExecutableFlow flow = TestUtils.createTestExecutableFlow("exectest1", "exec1");
flow.setExecutionId(10);
final File jobFile = ExecutionsTestUtil.getFlowFile("exectest1", "job10.job");
final Props props = new Props(null, jobFile);
props.put("test", "test2");
final ExecutableNode oldNode = flow.getExecutableNode("job10");
oldNode.setStartTime(System.currentTimeMillis());
this.executionJobDao.uploadExecutableNode(oldNode, props);
final ExecutableJobInfo info = this.executionJobDao.fetchJobInfo(10, "job10", 0);
assertThat(flow.getEndTime()).isEqualTo(info.getEndTime());
assertThat(flow.getProjectId()).isEqualTo(info.getProjectId());
assertThat(flow.getVersion()).isEqualTo(info.getVersion());
assertThat(flow.getFlowId()).isEqualTo(info.getFlowId());
assertThat(oldNode.getId()).isEqualTo(info.getJobId());
assertThat(oldNode.getStatus()).isEqualTo(info.getStatus());
assertThat(oldNode.getStartTime()).isEqualTo(info.getStartTime());
// Fetch props
final Props outputProps = new Props();
outputProps.put("hello", "output");
oldNode.setOutputProps(outputProps);
oldNode.setEndTime(System.currentTimeMillis());
this.executionJobDao.updateExecutableNode(oldNode);
final Props fInputProps = this.executionJobDao.fetchExecutionJobInputProps(10, "job10");
final Props fOutputProps = this.executionJobDao.fetchExecutionJobOutputProps(10, "job10");
final Pair<Props, Props> inOutProps = this.executionJobDao.fetchExecutionJobProps(10, "job10");
assertThat(fInputProps.get("test")).isEqualTo("test2");
assertThat(fOutputProps.get("hello")).isEqualTo("output");
assertThat(inOutProps.getFirst().get("test")).isEqualTo("test2");
assertThat(inOutProps.getSecond().get("hello")).isEqualTo("output");
}
private void assertTwoFlowSame(final ExecutableFlow flow1, final ExecutableFlow flow2) {
assertThat(flow1.getExecutionId()).isEqualTo(flow2.getExecutionId());
assertThat(flow1.getStatus()).isEqualTo(flow2.getStatus());
assertThat(flow1.getEndTime()).isEqualTo(flow2.getEndTime());
assertThat(flow1.getStartTime()).isEqualTo(flow2.getStartTime());
assertThat(flow1.getSubmitTime()).isEqualTo(flow2.getStartTime());
assertThat(flow1.getFlowId()).isEqualTo(flow2.getFlowId());
assertThat(flow1.getProjectId()).isEqualTo(flow2.getProjectId());
assertThat(flow1.getVersion()).isEqualTo(flow2.getVersion());
assertThat(flow1.getExecutionOptions().getFailureAction())
.isEqualTo(flow2.getExecutionOptions().getFailureAction());
assertThat(new HashSet<>(flow1.getEndNodes())).isEqualTo(new HashSet<>(flow2.getEndNodes()));
}
}
| 1 | 16,066 | Oh, so this test leaks DateTimeUtils offset? It would be safer to place the reset in an `@After` method to not cause side effects even if the test case fails. | azkaban-azkaban | java |
@@ -28,14 +28,6 @@ import { Grid, Cell, Row } from '../../../../../material-components';
import Pagination from './Pagination';
export default function Footer( { tab, footerText } ) {
- if ( ! footerText ) {
- return (
- <div className="googlesitekit-idea-hub__footer">
- <Pagination tab={ tab } />
- </div>
- );
- }
-
return (
<Grid className="googlesitekit-idea-hub__footer">
<Row> | 1 | /**
* Footer component
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import PropTypes from 'prop-types';
/**
* Internal dependencies
*/
import { Grid, Cell, Row } from '../../../../../material-components';
import Pagination from './Pagination';
export default function Footer( { tab, footerText } ) {
if ( ! footerText ) {
return (
<div className="googlesitekit-idea-hub__footer">
<Pagination tab={ tab } />
</div>
);
}
return (
<Grid className="googlesitekit-idea-hub__footer">
<Row>
<Cell
smSize={ 4 }
mdSize={ 4 }
lgSize={ 6 }
className="googlesitekit-idea-hub__footer--updated"
>
{ footerText }
</Cell>
<Cell smSize={ 4 } mdSize={ 4 } lgSize={ 6 }>
<Pagination tab={ tab } />
</Cell>
</Row>
</Grid>
);
}
Footer.propTypes = {
tab: PropTypes.string,
footerText: PropTypes.string,
};
Footer.defaultProps = {
tab: 'new-ideas',
};
| 1 | 40,919 | Kind of a nit-pick, but is there no way to solve this while still not rendering an empty div if `footerText` is empty? We could still use the `Row` to maintain the same layout but then only render the `Cell` for the pagination - I think it's possible to use specific classes to horizontally offset? | google-site-kit-wp | js |
@@ -23,12 +23,14 @@ import java.util.Objects;
public class DomainChangeMessage {
// Represent the changed object type
- enum ObjectType {
+ public enum ObjectType {
DOMAIN,
- ROLE,
+ ROLE,
+ GROUP,
POLICY,
- SERVICE,
- ENTITY
+ SERVICE,
+ ENTITY,
+ TEMPLATE
}
private String domainName; | 1 | /*
*
* * Copyright The Athenz Authors
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.yahoo.athenz.common.messaging;
import java.util.Objects;
public class DomainChangeMessage {
// Represent the changed object type
enum ObjectType {
DOMAIN,
ROLE,
POLICY,
SERVICE,
ENTITY
}
private String domainName;
// Domain change message id
private String uuid;
// The number of milliseconds since the epoch
private long published;
private ObjectType objectType;
// The changed object name based on {@param objectType}
private String objectName;
// The type of operation that was performed
private String apiName;
public String getDomainName() {
return domainName;
}
public DomainChangeMessage setDomainName(String domainName) {
this.domainName = domainName;
return this;
}
public String getUuid() {
return uuid;
}
public DomainChangeMessage setUuid(String uuid) {
this.uuid = uuid;
return this;
}
public long getPublished() {
return published;
}
public DomainChangeMessage setPublished(long published) {
this.published = published;
return this;
}
public ObjectType getObjectType() {
return objectType;
}
public DomainChangeMessage setObjectType(ObjectType objectType) {
this.objectType = objectType;
return this;
}
public String getObjectName() {
return objectName;
}
public DomainChangeMessage setObjectName(String objectName) {
this.objectName = objectName;
return this;
}
public String getApiName() {
return apiName;
}
public DomainChangeMessage setApiName(String apiName) {
this.apiName = apiName;
return this;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DomainChangeMessage that = (DomainChangeMessage) o;
return published == that.published &&
domainName.equals(that.domainName) &&
uuid.equals(that.uuid) &&
objectType == that.objectType &&
objectName.equals(that.objectName) &&
apiName.equals(that.apiName);
}
@Override
public int hashCode() {
return Objects.hash(domainName, uuid, published, objectType, objectName, apiName);
}
}
| 1 | 6,045 | Template is also not an object type so I don't expect to notify on templates. Instead when applying templates, we'll be updating roles/policies/services/groups. | AthenZ-athenz | java |
@@ -93,7 +93,8 @@ class iloc(Accessor):
rows, cols = index
if rows is Ellipsis:
rows = slice(None)
- data = dataset.interface.iloc(dataset.dataset, (rows, cols))
+
+ data = dataset.interface.iloc(dataset, (rows, cols))
kdims = dataset.kdims
vdims = dataset.vdims
if np.isscalar(data): | 1 | from __future__ import absolute_import
import warnings
import param
import numpy as np
from .. import util
from ..element import Element
from ..ndmapping import NdMapping
def get_array_types():
array_types = (np.ndarray,)
da = dask_array_module()
if da is not None:
array_types += (da.Array,)
return array_types
def dask_array_module():
try:
import dask.array as da
return da
except:
return None
def is_dask(array):
da = dask_array_module()
if da is None:
return False
return da and isinstance(array, da.Array)
class DataError(ValueError):
"DataError is raised when the data cannot be interpreted"
def __init__(self, msg, interface=None):
if interface is not None:
msg = '\n\n'.join([msg, interface.error()])
super(DataError, self).__init__(msg)
class Accessor(object):
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, index):
from ..data import Dataset
from ...operation.element import method
in_method = self.dataset._in_method
if not in_method:
self.dataset._in_method = True
try:
res = self._perform_getitem(self.dataset, index)
if not in_method and isinstance(res, Dataset):
getitem_op = method.instance(
input_type=type(self),
output_type=type(self.dataset),
method_name='_perform_getitem',
args=[index],
)
res._pipeline = self.dataset.pipeline.instance(
operations=self.dataset.pipeline.operations + [getitem_op],
output_type=type(self.dataset)
)
finally:
if not in_method:
self.dataset._in_method = False
return res
@classmethod
def _perform_getitem(cls, dataset, index):
raise NotImplementedError()
class iloc(Accessor):
"""
iloc is small wrapper object that allows row, column based
indexing into a Dataset using the ``.iloc`` property. It supports
the usual numpy and pandas iloc indexing semantics including
integer indices, slices, lists and arrays of values. For more
information see the ``Dataset.iloc`` property docstring.
"""
@classmethod
def _perform_getitem(cls, dataset, index):
index = util.wrap_tuple(index)
if len(index) == 1:
index = (index[0], slice(None))
elif len(index) > 2:
raise IndexError('Tabular index not understood, index '
'must be at most length 2.')
rows, cols = index
if rows is Ellipsis:
rows = slice(None)
data = dataset.interface.iloc(dataset.dataset, (rows, cols))
kdims = dataset.kdims
vdims = dataset.vdims
if np.isscalar(data):
return data
elif cols == slice(None):
pass
else:
if isinstance(cols, slice):
dims = dataset.dimensions()[index[1]]
elif np.isscalar(cols):
dims = [dataset.get_dimension(cols)]
else:
dims = [dataset.get_dimension(d) for d in cols]
kdims = [d for d in dims if d in kdims]
vdims = [d for d in dims if d in vdims]
datatype = [dt for dt in dataset.datatype
if dt in Interface.interfaces and
not Interface.interfaces[dt].gridded]
if not datatype: datatype = ['dataframe', 'dictionary']
return dataset.clone(data, kdims=kdims, vdims=vdims,
datatype=datatype)
class ndloc(Accessor):
"""
ndloc is a small wrapper object that allows ndarray-like indexing
for gridded Datasets using the ``.ndloc`` property. It supports
the standard NumPy ndarray indexing semantics including
integer indices, slices, lists and arrays of values. For more
information see the ``Dataset.ndloc`` property docstring.
"""
@classmethod
def _perform_getitem(cls, dataset, indices):
ds = dataset
indices = util.wrap_tuple(indices)
if not ds.interface.gridded:
raise IndexError('Cannot use ndloc on non nd-dimensional datastructure')
selected = dataset.interface.ndloc(ds, indices)
if np.isscalar(selected):
return selected
params = {}
if hasattr(ds, 'bounds'):
params['bounds'] = None
return dataset.clone(selected, datatype=[ds.interface.datatype]+ds.datatype, **params)
class Interface(param.Parameterized):
interfaces = {}
datatype = None
types = ()
# Denotes whether the interface expects gridded data
gridded = False
# Denotes whether the interface expects ragged data
multi = False
@classmethod
def loaded(cls):
"""
Indicates whether the required dependencies are loaded.
"""
return True
@classmethod
def applies(cls, obj):
"""
Indicates whether the interface is designed specifically to
handle the supplied object's type. By default simply checks
if the object is one of the types declared on the class,
however if the type is expensive to import at load time the
method may be overridden.
"""
return type(obj) in cls.types
@classmethod
def register(cls, interface):
cls.interfaces[interface.datatype] = interface
@classmethod
def cast(cls, datasets, datatype=None, cast_type=None):
"""
Given a list of Dataset objects, cast them to the specified
datatype (by default the format matching the current interface)
with the given cast_type (if specified).
"""
datatype = datatype or cls.datatype
cast = []
for ds in datasets:
if cast_type is not None or ds.interface.datatype != datatype:
ds = ds.clone(ds, datatype=[datatype], new_type=cast_type)
cast.append(ds)
return cast
@classmethod
def error(cls):
info = dict(interface=cls.__name__)
url = "http://holoviews.org/user_guide/%s_Datasets.html"
if cls.multi:
datatype = 'a list of tabular'
info['url'] = url % 'Tabular'
else:
if cls.gridded:
datatype = 'gridded'
else:
datatype = 'tabular'
info['url'] = url % datatype.capitalize()
info['datatype'] = datatype
return ("{interface} expects {datatype} data, for more information "
"on supported datatypes see {url}".format(**info))
@classmethod
def initialize(cls, eltype, data, kdims, vdims, datatype=None):
# Process params and dimensions
if isinstance(data, Element):
pvals = util.get_param_values(data)
kdims = pvals.get('kdims') if kdims is None else kdims
vdims = pvals.get('vdims') if vdims is None else vdims
# Process Element data
if (hasattr(data, 'interface') and issubclass(data.interface, Interface)):
if datatype is None:
datatype = [dt for dt in data.datatype if dt in eltype.datatype]
if not datatype:
datatype = eltype.datatype
interface = data.interface
if interface.datatype in datatype and interface.datatype in eltype.datatype:
data = data.data
elif interface.gridded and any(cls.interfaces[dt].gridded for dt in datatype):
new_data = []
for kd in data.kdims:
irregular = interface.irregular(data, kd)
coords = data.dimension_values(kd.name, expanded=irregular,
flat=not irregular)
new_data.append(coords)
for vd in data.vdims:
new_data.append(interface.values(data, vd, flat=False, compute=False))
data = tuple(new_data)
else:
data = tuple(data.columns().values())
elif isinstance(data, Element):
data = tuple(data.dimension_values(d) for d in kdims+vdims)
elif isinstance(data, util.generator_types):
data = list(data)
if datatype is None:
datatype = eltype.datatype
# Set interface priority order
prioritized = [cls.interfaces[p] for p in datatype
if p in cls.interfaces]
head = [intfc for intfc in prioritized if intfc.applies(data)]
if head:
# Prioritize interfaces which have matching types
prioritized = head + [el for el in prioritized if el != head[0]]
# Iterate over interfaces until one can interpret the input
priority_errors = []
for interface in prioritized:
if not interface.loaded() and len(datatype) != 1:
# Skip interface if it is not loaded and was not explicitly requested
continue
try:
(data, dims, extra_kws) = interface.init(eltype, data, kdims, vdims)
break
except DataError:
raise
except Exception as e:
if interface in head:
priority_errors.append((interface, e))
else:
error = ("None of the available storage backends were able "
"to support the supplied data format.")
if priority_errors:
intfc, e = priority_errors[0]
priority_error = ("%s raised following error:\n\n %s"
% (intfc.__name__, e))
error = ' '.join([error, priority_error])
raise DataError(error)
return data, interface, dims, extra_kws
@classmethod
def validate(cls, dataset, vdims=True):
dims = 'all' if vdims else 'key'
not_found = [d for d in dataset.dimensions(dims, label='name')
if d not in dataset.data]
if not_found:
raise DataError("Supplied data does not contain specified "
"dimensions, the following dimensions were "
"not found: %s" % repr(not_found), cls)
@classmethod
def expanded(cls, arrays):
return not any(array.shape not in [arrays[0].shape, (1,)] for array in arrays[1:])
@classmethod
def isscalar(cls, dataset, dim):
return len(cls.values(dataset, dim, expanded=False)) == 1
@classmethod
def dtype(cls, dataset, dimension):
name = dataset.get_dimension(dimension, strict=True).name
data = dataset.data[name]
if util.isscalar(data):
return np.array([data]).dtype
else:
return data.dtype
@classmethod
def select_mask(cls, dataset, selection):
"""
Given a Dataset object and a dictionary with dimension keys and
selection keys (i.e tuple ranges, slices, sets, lists or literals)
return a boolean mask over the rows in the Dataset object that
have been selected.
"""
mask = np.ones(len(dataset), dtype=np.bool)
for dim, sel in selection.items():
if isinstance(sel, tuple):
sel = slice(*sel)
arr = cls.values(dataset, dim)
if util.isdatetime(arr) and util.pd:
try:
sel = util.parse_datetime_selection(sel)
except:
pass
if isinstance(sel, slice):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered')
if sel.start is not None:
mask &= sel.start <= arr
if sel.stop is not None:
mask &= arr < sel.stop
elif isinstance(sel, (set, list)):
iter_slcs = []
for ik in sel:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered')
iter_slcs.append(arr == ik)
mask &= np.logical_or.reduce(iter_slcs)
elif callable(sel):
mask &= sel(arr)
else:
index_mask = arr == sel
if dataset.ndims == 1 and np.sum(index_mask) == 0:
data_index = np.argmin(np.abs(arr - sel))
mask = np.zeros(len(dataset), dtype=np.bool)
mask[data_index] = True
else:
mask &= index_mask
return mask
@classmethod
def indexed(cls, dataset, selection):
"""
Given a Dataset object and selection to be applied returns
boolean to indicate whether a scalar value has been indexed.
"""
selected = list(selection.keys())
all_scalar = all((not isinstance(sel, (tuple, slice, set, list))
and not callable(sel)) for sel in selection.values())
all_kdims = all(d in selected for d in dataset.kdims)
return all_scalar and all_kdims
@classmethod
def range(cls, dataset, dimension):
column = dataset.dimension_values(dimension)
if column.dtype.kind == 'M':
return column.min(), column.max()
elif len(column) == 0:
return np.NaN, np.NaN
else:
try:
assert column.dtype.kind not in 'SUO'
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
return (np.nanmin(column), np.nanmax(column))
except (AssertionError, TypeError):
column = [v for v in util.python2sort(column) if v is not None]
if not len(column):
return np.NaN, np.NaN
return column[0], column[-1]
@classmethod
def concatenate(cls, datasets, datatype=None, new_type=None):
"""
Utility function to concatenate an NdMapping of Dataset objects.
"""
from . import Dataset, default_datatype
new_type = new_type or Dataset
if isinstance(datasets, NdMapping):
dimensions = datasets.kdims
keys, datasets = zip(*datasets.data.items())
elif isinstance(datasets, list) and all(not isinstance(v, tuple) for v in datasets):
# Allow concatenating list of datasets (by declaring no dimensions and keys)
dimensions, keys = [], [()]*len(datasets)
else:
raise DataError('Concatenation only supported for NdMappings '
'and lists of Datasets, found %s.' % type(datasets).__name__)
template = datasets[0]
datatype = datatype or template.interface.datatype
# Handle non-general datatypes by casting to general type
if datatype == 'array':
datatype = default_datatype
elif datatype == 'image':
datatype = 'grid'
if len(datasets) > 1 and not dimensions and cls.interfaces[datatype].gridded:
raise DataError('Datasets with %s datatype cannot be concatenated '
'without defining the dimensions to concatenate along. '
'Ensure you pass in a NdMapping (e.g. a HoloMap) '
'of Dataset types, not a list.' % datatype)
datasets = template.interface.cast(datasets, datatype)
template = datasets[0]
data = list(zip(keys, datasets)) if keys else datasets
concat_data = template.interface.concat(data, dimensions, vdims=template.vdims)
return template.clone(concat_data, kdims=dimensions+template.kdims, new_type=new_type)
@classmethod
def reduce(cls, dataset, reduce_dims, function, **kwargs):
kdims = [kdim for kdim in dataset.kdims if kdim not in reduce_dims]
return cls.aggregate(dataset, kdims, function, **kwargs)
@classmethod
def array(cls, dataset, dimensions):
return Element.array(dataset, dimensions)
@classmethod
def dframe(cls, dataset, dimensions):
return Element.dframe(dataset, dimensions)
@classmethod
def columns(cls, dataset, dimensions):
return Element.columns(dataset, dimensions)
@classmethod
def shape(cls, dataset):
return dataset.data.shape
@classmethod
def length(cls, dataset):
return len(dataset.data)
@classmethod
def nonzero(cls, dataset):
return bool(cls.length(dataset))
@classmethod
def redim(cls, dataset, dimensions):
return dataset.data
@classmethod
def has_holes(cls, dataset):
return False
@classmethod
def holes(cls, dataset):
coords = cls.values(dataset, dataset.kdims[0])
splits = np.where(np.isnan(coords.astype('float')))[0]
return [[[]]*(len(splits)+1)]
@classmethod
def as_dframe(cls, dataset):
"""
Returns the data of a Dataset as a dataframe avoiding copying
if it already a dataframe type.
"""
return dataset.dframe()
| 1 | 23,238 | Had to fix this to get my tests passing (should have been a new PR sorry). | holoviz-holoviews | py |
@@ -91,6 +91,11 @@ type VaultIssuer struct {
Server string `json:"server"`
// Vault URL path to the certificate role
Path string `json:"path"`
+ // Base64 encoded CA bundle to validate Vault server certificate. Only used
+ // if the Server URL is using HTTPS protocol. This parameter is ignored for
+ // plain HTTP protocol connection. If not set the system root certificates
+ // are used to validate the TLS connection.
+ CABundle []byte `json:"caBundle"`
}
// Vault authentication can be configured: | 1 | /*
Copyright 2018 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=clusterissuers
type ClusterIssuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterIssuerList is a list of Issuers
type ClusterIssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []ClusterIssuer `json:"items"`
}
// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=issuers
type Issuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IssuerList is a list of Issuers
type IssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Issuer `json:"items"`
}
// IssuerSpec is the specification of an Issuer. This includes any
// configuration required for the issuer.
type IssuerSpec struct {
IssuerConfig `json:",inline"`
}
type IssuerConfig struct {
ACME *ACMEIssuer `json:"acme,omitempty"`
CA *CAIssuer `json:"ca,omitempty"`
Vault *VaultIssuer `json:"vault,omitempty"`
SelfSigned *SelfSignedIssuer `json:"selfSigned,omitempty"`
}
type SelfSignedIssuer struct {
}
type VaultIssuer struct {
// Vault authentication
Auth VaultAuth `json:"auth"`
// Server is the vault connection address
Server string `json:"server"`
// Vault URL path to the certificate role
Path string `json:"path"`
}
// Vault authentication can be configured:
// - With a secret containing a token. Cert-manager is using this token as-is.
// - With a secret containing a AppRole. This AppRole is used to authenticate to
// Vault and retrieve a token.
type VaultAuth struct {
// This Secret contains the Vault token key
TokenSecretRef SecretKeySelector `json:"tokenSecretRef,omitempty"`
// This Secret contains a AppRole and Secret
AppRole VaultAppRole `json:"appRole,omitempty"`
}
type VaultAppRole struct {
// Where the authentication path is mounted in Vault.
Path string `json:"path"`
RoleId string `json:"roleId"`
SecretRef SecretKeySelector `json:"secretRef"`
}
type CAIssuer struct {
// SecretName is the name of the secret used to sign Certificates issued
// by this Issuer.
SecretName string `json:"secretName"`
}
// ACMEIssuer contains the specification for an ACME issuer
type ACMEIssuer struct {
// Email is the email for this account
Email string `json:"email"`
// Server is the ACME server URL
Server string `json:"server"`
// If true, skip verifying the ACME server TLS certificate
SkipTLSVerify bool `json:"skipTLSVerify,omitempty"`
// PrivateKey is the name of a secret containing the private key for this
// user account.
PrivateKey SecretKeySelector `json:"privateKeySecretRef"`
// HTTP-01 config
HTTP01 *ACMEIssuerHTTP01Config `json:"http01,omitempty"`
// DNS-01 config
DNS01 *ACMEIssuerDNS01Config `json:"dns01,omitempty"`
}
// ACMEIssuerHTTP01Config is a structure containing the ACME HTTP configuration options
type ACMEIssuerHTTP01Config struct {
// Optional service type for Kubernetes solver service
ServiceType corev1.ServiceType `json:"serviceType,omitempty"`
}
// ACMEIssuerDNS01Config is a structure containing the ACME DNS configuration
// options
type ACMEIssuerDNS01Config struct {
Providers []ACMEIssuerDNS01Provider `json:"providers"`
}
type ACMEIssuerDNS01Provider struct {
Name string `json:"name"`
Akamai *ACMEIssuerDNS01ProviderAkamai `json:"akamai,omitempty"`
CloudDNS *ACMEIssuerDNS01ProviderCloudDNS `json:"clouddns,omitempty"`
Cloudflare *ACMEIssuerDNS01ProviderCloudflare `json:"cloudflare,omitempty"`
Route53 *ACMEIssuerDNS01ProviderRoute53 `json:"route53,omitempty"`
AzureDNS *ACMEIssuerDNS01ProviderAzureDNS `json:"azuredns,omitempty"`
AcmeDNS *ACMEIssuerDNS01ProviderAcmeDNS `json:"acmedns,omitempty"`
RFC2136 *ACMEIssuerDNS01ProviderRFC2136 `json:"rfc2136,omitempty"`
}
// ACMEIssuerDNS01ProviderAkamai is a structure containing the DNS
// configuration for Akamai DNS—Zone Record Management API
type ACMEIssuerDNS01ProviderAkamai struct {
ServiceConsumerDomain string `json:"serviceConsumerDomain"`
ClientToken SecretKeySelector `json:"clientTokenSecretRef"`
ClientSecret SecretKeySelector `json:"clientSecretSecretRef"`
AccessToken SecretKeySelector `json:"accessTokenSecretRef"`
}
// ACMEIssuerDNS01ProviderCloudDNS is a structure containing the DNS
// configuration for Google Cloud DNS
type ACMEIssuerDNS01ProviderCloudDNS struct {
ServiceAccount SecretKeySelector `json:"serviceAccountSecretRef"`
Project string `json:"project"`
}
// ACMEIssuerDNS01ProviderCloudflare is a structure containing the DNS
// configuration for Cloudflare
type ACMEIssuerDNS01ProviderCloudflare struct {
Email string `json:"email"`
APIKey SecretKeySelector `json:"apiKeySecretRef"`
}
// ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53
// configuration for AWS
type ACMEIssuerDNS01ProviderRoute53 struct {
AccessKeyID string `json:"accessKeyID"`
SecretAccessKey SecretKeySelector `json:"secretAccessKeySecretRef"`
HostedZoneID string `json:"hostedZoneID"`
Region string `json:"region"`
}
// ACMEIssuerDNS01ProviderAzureDNS is a structure containing the
// configuration for Azure DNS
type ACMEIssuerDNS01ProviderAzureDNS struct {
ClientID string `json:"clientID"`
ClientSecret SecretKeySelector `json:"clientSecretSecretRef"`
SubscriptionID string `json:"subscriptionID"`
TenantID string `json:"tenantID"`
ResourceGroupName string `json:"resourceGroupName"`
// + optional
HostedZoneName string `json:"hostedZoneName"`
}
// ACMEIssuerDNS01ProviderAcmeDNS is a structure containing the
// configuration for ACME-DNS servers
type ACMEIssuerDNS01ProviderAcmeDNS struct {
Host string `json:"host"`
AccountSecret SecretKeySelector `json:"accountSecretRef"`
}
// ACMEIssuerDNS01ProviderRFC2136 is a structure containing the
// configuration for RFC2136 DNS
type ACMEIssuerDNS01ProviderRFC2136 struct {
// The IP address of the DNS supporting RFC2136. Required.
// Note: FQDN is not a valid value, only IP.
Nameserver string `json:"nameserver"`
// The name of the secret containing the TSIG value.
// If ``tsigKeyName`` is defined, this field is required.
// +optional
TSIGSecret SecretKeySelector `json:"tsigSecretSecretRef"`
// The TSIG Key name configured in the DNS.
// If ``tsigSecretSecretRef`` is defined, this field is required.
// +optional
TSIGKeyName string `json:"tsigKeyName"`
// The TSIG Algorithm configured in the DNS supporting RFC2136. Used only
// when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined.
// Supported values are (case-insensitive): ``HMACMD5`` (default),
// ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.
// +optional
TSIGAlgorithm string `json:"tsigAlgorithm"`
}
// IssuerStatus contains status information about an Issuer
type IssuerStatus struct {
Conditions []IssuerCondition `json:"conditions"`
ACME *ACMEIssuerStatus `json:"acme,omitempty"`
}
type ACMEIssuerStatus struct {
// URI is the unique account identifier, which can also be used to retrieve
// account details from the CA
URI string `json:"uri"`
}
// IssuerCondition contains condition information for an Issuer.
type IssuerCondition struct {
// Type of the condition, currently ('Ready').
Type IssuerConditionType `json:"type"`
// Status of the condition, one of ('True', 'False', 'Unknown').
Status ConditionStatus `json:"status"`
// LastTransitionTime is the timestamp corresponding to the last status
// change of this condition.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a brief machine readable explanation for the condition's last
// transition.
Reason string `json:"reason"`
// Message is a human readable description of the details of the last
// transition, complementing reason.
Message string `json:"message"`
}
// IssuerConditionType represents an Issuer condition value.
type IssuerConditionType string
const (
// IssuerConditionReady represents the fact that a given Issuer condition
// is in ready state.
IssuerConditionReady IssuerConditionType = "Ready"
)
| 1 | 13,914 | Small nit, and sorry for not spotting sooner.. this should have `omitempty` on it, else when marshalling nil values into json, it will be `caBundle: null` which trips up some JSON parsers | jetstack-cert-manager | go |
@@ -1030,6 +1030,9 @@ class GenericLayoutPlot(GenericCompositePlot):
displays the elements in a cartesian grid in scanline order.
"""
+ transpose = param.Boolean(default=False, doc="""
+ Whether to transpose the layout when plotting""")
+
def __init__(self, layout, **params):
if not isinstance(layout, (NdLayout, Layout)):
raise ValueError("GenericLayoutPlot only accepts Layout objects.") | 1 | """
Public API for all plots supported by HoloViews, regardless of
plotting package or backend. Every plotting classes must be a subclass
of this Plot baseclass.
"""
from itertools import groupby, product
from collections import Counter, defaultdict
import numpy as np
import param
from ..core import OrderedDict
from ..core import util, traversal
from ..core.element import Element
from ..core.overlay import Overlay, CompositeOverlay
from ..core.layout import Empty, NdLayout, Layout
from ..core.options import Store, Compositor, SkipRendering
from ..core.overlay import NdOverlay
from ..core.spaces import HoloMap, DynamicMap
from ..core.util import stream_parameters
from ..element import Table
from .util import (get_dynamic_mode, initialize_sampled, dim_axis_label,
attach_streams, traverse_setter, get_nested_streams)
class Plot(param.Parameterized):
"""
Base class of all Plot classes in HoloViews, designed to be
general enough to use any plotting package or backend.
"""
# A list of style options that may be supplied to the plotting
# call
style_opts = []
# Sometimes matplotlib doesn't support the common aliases.
# Use this list to disable any invalid style options
_disabled_opts = []
def initialize_plot(self, ranges=None):
"""
Initialize the matplotlib figure.
"""
raise NotImplementedError
def update(self, key):
"""
Update the internal state of the Plot to represent the given
key tuple (where integers represent frames). Returns this
state.
"""
return self.state
@property
def state(self):
"""
The plotting state that gets updated via the update method and
used by the renderer to generate output.
"""
raise NotImplementedError
def __len__(self):
"""
Returns the total number of available frames.
"""
raise NotImplementedError
@classmethod
def lookup_options(cls, obj, group):
try:
plot_class = Store.renderers[cls.backend].plotting_class(obj)
style_opts = plot_class.style_opts
except SkipRendering:
style_opts = None
node = Store.lookup_options(cls.backend, obj, group)
if group == 'style' and style_opts:
return node.filtered(style_opts)
else:
return node
class PlotSelector(object):
"""
Proxy that allows dynamic selection of a plotting class based on a
function of the plotted object. Behaves like a Plot class and
presents the same parameterized interface.
"""
_disabled_opts = []
def __init__(self, selector, plot_classes, allow_mismatch=False):
"""
The selector function accepts a component instance and returns
the appropriate key to index plot_classes dictionary.
"""
self.selector = selector
self.plot_classes = OrderedDict(plot_classes)
interface = self._define_interface(self.plot_classes.values(), allow_mismatch)
self.style_opts, self.plot_options = interface
def _define_interface(self, plots, allow_mismatch):
parameters = [{k:v.precedence for k,v in plot.params().items()
if ((v.precedence is None) or (v.precedence >= 0))}
for plot in plots]
param_sets = [set(params.keys()) for params in parameters]
if not allow_mismatch and not all(pset == param_sets[0] for pset in param_sets):
raise Exception("All selectable plot classes must have identical plot options.")
styles= [plot.style_opts for plot in plots]
if not allow_mismatch and not all(style == styles[0] for style in styles):
raise Exception("All selectable plot classes must have identical style options.")
return styles[0], parameters[0]
def __call__(self, obj, **kwargs):
key = self.selector(obj)
if key not in self.plot_classes:
msg = "Key %s returned by selector not in set: %s"
raise Exception(msg % (key, ', '.join(self.plot_classes.keys())))
return self.plot_classes[key](obj, **kwargs)
def __setattr__(self, label, value):
try:
return super(PlotSelector, self).__setattr__(label, value)
except:
raise Exception("Please set class parameters directly on classes %s"
% ', '.join(str(cls) for cls in self.__dict__['plot_classes'].values()))
def params(self):
return self.plot_options
class DimensionedPlot(Plot):
"""
DimensionedPlot implements a number of useful methods
to compute dimension ranges and titles containing the
dimension values.
"""
fontsize = param.Parameter(default=None, allow_None=True, doc="""
Specifies various fontsizes of the displayed text.
Finer control is available by supplying a dictionary where any
unmentioned keys reverts to the default sizes, e.g:
{'ticks':20, 'title':15,
'ylabel':5, 'xlabel':5,
'legend':8, 'legend_title':13}
You can set the fontsize of both 'ylabel' and 'xlabel' together
using the 'labels' key.""")
#Allowed fontsize keys
_fontsize_keys = ['xlabel','ylabel', 'labels', 'ticks',
'title', 'legend', 'legend_title']
show_title = param.Boolean(default=True, doc="""
Whether to display the plot title.""")
title_format = param.String(default="{label} {group}\n{dimensions}", doc="""
The formatting string for the title of this plot, allows defining
a label group separator and dimension labels.""")
normalize = param.Boolean(default=True, doc="""
Whether to compute ranges across all Elements at this level
of plotting. Allows selecting normalization at different levels
for nested data containers.""")
projection = param.Parameter(default=None, doc="""
Allows supplying a custom projection to transform the axis
coordinates during display. Example projections include '3d'
and 'polar' projections supported by some backends. Depending
on the backend custom projection objects may be supplied.""")
def __init__(self, keys=None, dimensions=None, layout_dimensions=None,
uniform=True, subplot=False, adjoined=None, layout_num=0,
style=None, subplots=None, dynamic=False, renderer=None, **params):
self.subplots = subplots
self.adjoined = adjoined
self.dimensions = dimensions
self.layout_num = layout_num
self.layout_dimensions = layout_dimensions
self.subplot = subplot
self.keys = keys
self.uniform = uniform
self.dynamic = dynamic
self.drawn = False
self.handles = {}
self.group = None
self.label = None
self.current_frame = None
self.current_key = None
self.ranges = {}
self.renderer = renderer if renderer else Store.renderers[self.backend].instance()
self.comm = None
self._force = True
params = {k: v for k, v in params.items()
if k in self.params()}
super(DimensionedPlot, self).__init__(**params)
def __getitem__(self, frame):
"""
Get the state of the Plot for a given frame number.
"""
if not self.dynamic == 'open' and isinstance(frame, int) and frame > len(self):
self.warning("Showing last frame available: %d" % len(self))
if not self.drawn: self.handles['fig'] = self.initialize_plot()
if not self.dynamic == 'open' and not isinstance(frame, tuple):
frame = self.keys[frame]
self.update_frame(frame)
return self.state
def _get_frame(self, key):
"""
Required on each MPLPlot type to get the data corresponding
just to the current frame out from the object.
"""
pass
def matches(self, spec):
"""
Matches a specification against the current Plot.
"""
if callable(spec) and not isinstance(spec, type): return spec(self)
elif isinstance(spec, type): return isinstance(self, spec)
else:
raise ValueError("Matching specs have to be either a type or a callable.")
def traverse(self, fn=None, specs=None, full_breadth=True):
"""
Traverses any nested DimensionedPlot returning a list
of all plots that match the specs. The specs should
be supplied as a list of either Plot types or callables,
which should return a boolean given the plot class.
"""
accumulator = []
matches = specs is None
if not matches:
for spec in specs:
matches = self.matches(spec)
if matches: break
if matches:
accumulator.append(fn(self) if fn else self)
# Assumes composite objects are iterables
if hasattr(self, 'subplots') and self.subplots:
for el in self.subplots.values():
accumulator += el.traverse(fn, specs, full_breadth)
if not full_breadth: break
return accumulator
def _frame_title(self, key, group_size=2, separator='\n'):
"""
Returns the formatted dimension group strings
for a particular frame.
"""
if self.dynamic == 'open' and self.current_key:
key = self.current_key
if self.layout_dimensions is not None:
dimensions, key = zip(*self.layout_dimensions.items())
elif not self.dynamic and (not self.uniform or len(self) == 1) or self.subplot:
return ''
else:
key = key if isinstance(key, tuple) else (key,)
dimensions = self.dimensions
dimension_labels = [dim.pprint_value_string(k) for dim, k in
zip(dimensions, key)]
groups = [', '.join(dimension_labels[i*group_size:(i+1)*group_size])
for i in range(len(dimension_labels))]
return util.safe_unicode(separator.join(g for g in groups if g))
def _fontsize(self, key, label='fontsize', common=True):
if not self.fontsize: return {}
if not isinstance(self.fontsize, dict):
return {label:self.fontsize} if common else {}
unknown_keys = set(self.fontsize.keys()) - set(self._fontsize_keys)
if unknown_keys:
msg = "Popping unknown keys %r from fontsize dictionary.\nValid keys: %r"
self.warning(msg % (list(unknown_keys), self._fontsize_keys))
for key in unknown_keys: self.fontsize.pop(key, None)
if key in self.fontsize:
return {label:self.fontsize[key]}
elif key in ['ylabel', 'xlabel'] and 'labels' in self.fontsize:
return {label:self.fontsize['labels']}
else:
return {}
def compute_ranges(self, obj, key, ranges):
"""
Given an object, a specific key and the normalization options
this method will find the specified normalization options on
the appropriate OptionTree, group the elements according to
the selected normalization option (i.e. either per frame or
over the whole animation) and finally compute the dimension
ranges in each group. The new set of ranges is returned.
"""
all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element]))
if obj is None or not self.normalize or all_table:
return OrderedDict()
# Get inherited ranges
ranges = self.ranges if ranges is None else dict(ranges)
# Get element identifiers from current object and resolve
# with selected normalization options
norm_opts = self._get_norm_opts(obj)
# Traverse displayed object if normalization applies
# at this level, and ranges for the group have not
# been supplied from a composite plot
return_fn = lambda x: x if isinstance(x, Element) else None
for group, (axiswise, framewise) in norm_opts.items():
elements = []
# Skip if ranges are cached or already computed by a
# higher-level container object.
framewise = framewise or self.dynamic or len(elements) == 1
if group in ranges and (not framewise or ranges is not self.ranges):
continue
elif not framewise: # Traverse to get all elements
elements = obj.traverse(return_fn, [group])
elif key is not None: # Traverse to get elements for each frame
frame = self._get_frame(key)
elements = [] if frame is None else frame.traverse(return_fn, [group])
# Only compute ranges if not axiswise on a composite plot
# or not framewise on a Overlay or ElementPlot
if (not (axiswise and not isinstance(obj, HoloMap)) or
(not framewise and isinstance(obj, HoloMap))):
self._compute_group_range(group, elements, ranges)
self.ranges.update(ranges)
return ranges
def _get_norm_opts(self, obj):
"""
Gets the normalization options for a LabelledData object by
traversing the object for to find elements and their ids.
The id is then used to select the appropriate OptionsTree,
accumulating the normalization options into a dictionary.
Returns a dictionary of normalization options for each
element in the tree.
"""
norm_opts = {}
# Get all elements' type.group.label specs and ids
type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False),
util.label_sanitizer(x.label, escape=False))) \
if isinstance(x, Element) else None
element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn)
if idspec is not None}
# Group elements specs by ID and override normalization
# options sequentially
key_fn = lambda x: -1 if x[0] is None else x[0]
id_groups = groupby(sorted(element_specs, key=key_fn), key_fn)
for gid, element_spec_group in id_groups:
gid = None if gid == -1 else gid
group_specs = [el for _, el in element_spec_group]
backend = self.renderer.backend
optstree = Store.custom_options(
backend=backend).get(gid, Store.options(backend=backend))
# Get the normalization options for the current id
# and match against customizable elements
for opts in optstree:
path = tuple(opts.path.split('.')[1:])
applies = any(path == spec[:i] for spec in group_specs
for i in range(1, 4))
if applies and 'norm' in opts.groups:
nopts = opts['norm'].options
if 'axiswise' in nopts or 'framewise' in nopts:
norm_opts.update({path: (nopts.get('axiswise', False),
nopts.get('framewise', False))})
element_specs = [spec for _, spec in element_specs]
norm_opts.update({spec: (False, False) for spec in element_specs
if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))})
return norm_opts
@staticmethod
def _compute_group_range(group, elements, ranges):
# Iterate over all elements in a normalization group
# and accumulate their ranges into the supplied dictionary.
elements = [el for el in elements if el is not None]
group_ranges = OrderedDict()
for el in elements:
if isinstance(el, (Empty, Table)): continue
for dim in el.dimensions(label=True):
dim_range = el.range(dim)
if dim not in group_ranges:
group_ranges[dim] = []
group_ranges[dim].append(dim_range)
ranges[group] = OrderedDict((k, util.max_range(v)) for k, v in group_ranges.items())
@classmethod
def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True):
"""
Traverses the supplied object getting all options in opts for
the specified opt_type and specs. Also takes into account the
plotting class defaults for plot options. If a keyfn is
supplied the returned options will be grouped by the returned
keys.
"""
def lookup(x):
"""
Looks up options for object, including plot defaults,
keyfn determines returned key otherwise None key is used.
"""
options = cls.lookup_options(x, opt_type)
selected = {o: options.options[o]
for o in opts if o in options.options}
if opt_type == 'plot' and defaults:
plot = Store.registry[cls.backend].get(type(x))
selected['defaults'] = {o: getattr(plot, o) for o in opts
if o not in selected and hasattr(plot, o)}
key = keyfn(x) if keyfn else None
return (key, selected)
# Traverse object and accumulate options by key
traversed = obj.traverse(lookup, specs)
options = defaultdict(lambda: defaultdict(list))
default_opts = defaultdict(lambda: defaultdict(list))
for key, opts in traversed:
defaults = opts.pop('defaults', {})
for opt, v in opts.items():
options[key][opt].append(v)
for opt, v in defaults.items():
default_opts[key][opt].append(v)
# Merge defaults into dictionary if not explicitly specified
for key, opts in default_opts.items():
for opt, v in opts.items():
if opt not in options[key]:
options[key][opt] = v
return options if keyfn else options[None]
def _get_projection(cls, obj):
"""
Uses traversal to find the appropriate projection
for a nested object. Respects projections set on
Overlays before considering Element based settings,
before finally looking up the default projection on
the plot type. If more than one non-None projection
type is found an exception is raised.
"""
isoverlay = lambda x: isinstance(x, CompositeOverlay)
opts = cls._traverse_options(obj, 'plot', ['projection'],
[CompositeOverlay, Element],
keyfn=isoverlay)
from_overlay = not all(p is None for p in opts[True]['projection'])
projections = opts[from_overlay]['projection']
custom_projs = [p for p in projections if p is not None]
if len(set(custom_projs)) > 1:
raise Exception("An axis may only be assigned one projection type")
return custom_projs[0] if custom_projs else None
def update(self, key):
if len(self) == 1 and key == 0 and not self.drawn:
return self.initialize_plot()
return self.__getitem__(key)
def refresh(self, **kwargs):
"""
Refreshes the plot by rerendering it and then pushing
the updated data if the plot has an associated Comm.
"""
traverse_setter(self, '_force', True)
key = self.current_key if self.current_key else self.keys[0]
dim_streams = [stream for stream in self.streams
if any(c in self.dimensions for c in stream.contents)]
stream_params = stream_parameters(dim_streams)
key = tuple(None if d in stream_params else k
for d, k in zip(self.dimensions, key))
stream_key = util.wrap_tuple_streams(key, self.dimensions, self.streams)
self.update(stream_key)
if self.comm is not None:
self.push()
def push(self):
"""
Pushes updated plot data via the Comm.
"""
if self.comm is None:
raise Exception('Renderer does not have a comm.')
diff = self.renderer.diff(self)
self.comm.send(diff)
def init_comm(self, obj):
"""
Initializes comm and attaches streams.
"""
comm = None
if self.dynamic or self.renderer.widget_mode == 'live':
comm = self.renderer.comms[self.renderer.mode][0](self)
attach_streams(self, obj)
return comm
def __len__(self):
"""
Returns the total number of available frames.
"""
return len(self.keys)
class GenericElementPlot(DimensionedPlot):
"""
Plotting baseclass to render contents of an Element. Implements
methods to get the correct frame given a HoloMap, axis labels and
extents and titles.
"""
apply_ranges = param.Boolean(default=True, doc="""
Whether to compute the plot bounds from the data itself.""")
apply_extents = param.Boolean(default=True, doc="""
Whether to apply extent overrides on the Elements""")
# A dictionary mapping of the plot methods used to draw the
# glyphs corresponding to the ElementPlot, can support two
# keyword arguments a 'single' implementation to draw an individual
# plot and a 'batched' method to draw multiple Elements at once
_plot_methods = {}
# Declares the options that are propagated from sub-elements of the
# plot, mostly useful for inheriting options from individual
# Elements on an OverlayPlot. Enabled by default in v1.7.
_propagate_options = []
v17_option_propagation = True
def __init__(self, element, keys=None, ranges=None, dimensions=None,
batched=False, overlaid=0, cyclic_index=0, zorder=0, style=None,
overlay_dims={}, **params):
self.zorder = zorder
self.cyclic_index = cyclic_index
self.overlaid = overlaid
self.batched = batched
self.overlay_dims = overlay_dims
if not isinstance(element, (HoloMap, DynamicMap)):
self.hmap = HoloMap(initial_items=(0, element),
kdims=['Frame'], id=element.id)
else:
self.hmap = element
plot_element = self.hmap.last
if self.batched and not isinstance(self, GenericOverlayPlot):
plot_element = [el for el in plot_element if len(el) > 0][-1]
top_level = keys is None
if top_level:
dimensions = self.hmap.kdims
keys = list(self.hmap.data.keys())
self.style = self.lookup_options(plot_element, 'style') if style is None else style
plot_opts = self.lookup_options(plot_element, 'plot').options
if self.v17_option_propagation:
inherited = self._traverse_options(plot_element, 'plot',
self._propagate_options,
defaults=False)
plot_opts.update(**{k: v[0] for k, v in inherited.items()})
dynamic = False if not isinstance(element, DynamicMap) or element.sampled else element.mode
super(GenericElementPlot, self).__init__(keys=keys, dimensions=dimensions,
dynamic=dynamic,
**dict(params, **plot_opts))
if top_level:
self.comm = self.init_comm(element)
streams = []
if isinstance(self.hmap, DynamicMap):
streams = get_nested_streams(self.hmap)
self.streams = streams
# Update plot and style options for batched plots
if self.batched:
self.ordering = util.layer_sort(self.hmap)
overlay_opts = self.lookup_options(self.hmap.last, 'plot').options.items()
opts = {k: v for k, v in overlay_opts if k in self.params()}
self.set_param(**opts)
self.style = self.lookup_options(plot_element, 'style').max_cycles(len(self.ordering))
else:
self.ordering = []
def get_zorder(self, overlay, key, el):
"""
Computes the z-order of element in the NdOverlay
taking into account possible batching of elements.
"""
spec = util.get_overlay_spec(overlay, key, el)
try:
return self.ordering.index(spec)
except ValueError:
self.ordering = sorted(self.ordering+[spec])
return self.ordering.index(spec)
def _get_frame(self, key):
if isinstance(self.hmap, DynamicMap) and self.overlaid and self.current_frame:
self.current_key = key
return self.current_frame
elif self.dynamic:
key, frame = util.get_dynamic_item(self.hmap, self.dimensions, key)
traverse_setter(self, '_force', False)
if not isinstance(key, tuple): key = (key,)
key_map = dict(zip([d.name for d in self.hmap.kdims], key))
key = tuple(key_map.get(d.name, None) for d in self.dimensions)
if not key in self.keys:
self.keys.append(key)
self.current_frame = frame
self.current_key = key
return frame
if isinstance(key, int):
key = list(self.hmap.data.keys())[min([key, len(self.hmap)-1])]
self.current_key = key
if self.uniform:
if not isinstance(key, tuple): key = (key,)
kdims = [d.name for d in self.hmap.kdims]
if self.dimensions is None:
dimensions = kdims
else:
dimensions = [d.name for d in self.dimensions]
if kdims == ['Frame'] and kdims != dimensions:
select = dict(Frame=0)
else:
select = {d: key[dimensions.index(d)]
for d in kdims}
else:
select = dict(zip(self.hmap.dimensions('key', label=True), key))
try:
selection = self.hmap.select((HoloMap, DynamicMap), **select)
except KeyError:
selection = None
selection = selection.last if isinstance(selection, HoloMap) else selection
self.current_frame = selection
return selection
def get_extents(self, view, ranges):
"""
Gets the extents for the axes from the current View. The globally
computed ranges can optionally override the extents.
"""
ndims = len(view.dimensions())
num = 6 if self.projection == '3d' else 4
if self.apply_ranges:
if ranges:
dims = view.dimensions()
x0, x1 = ranges[dims[0].name]
if ndims > 1:
y0, y1 = ranges[dims[1].name]
else:
y0, y1 = (np.NaN, np.NaN)
if self.projection == '3d':
if len(dims) > 2:
z0, z1 = ranges[dims[2].name]
else:
z0, z1 = np.NaN, np.NaN
else:
x0, x1 = view.range(0)
y0, y1 = view.range(1) if ndims > 1 else (np.NaN, np.NaN)
if self.projection == '3d':
z0, z1 = view.range(2)
if self.projection == '3d':
range_extents = (x0, y0, z0, x1, y1, z1)
else:
range_extents = (x0, y0, x1, y1)
else:
range_extents = (np.NaN,) * num
if self.apply_extents:
norm_opts = self.lookup_options(view, 'norm').options
if norm_opts.get('framewise', False) or self.dynamic:
extents = view.extents
else:
extent_list = self.hmap.traverse(lambda x: x.extents, [Element])
extents = util.max_extents(extent_list, self.projection == '3d')
else:
extents = (np.NaN,) * num
if getattr(self, 'shared_axes', False) and self.subplot:
return util.max_extents([range_extents, extents], self.projection == '3d')
else:
max_extent = []
for l1, l2 in zip(range_extents, extents):
if (isinstance(l2, util.datetime_types)
or (l2 is not None and np.isfinite(l2))):
max_extent.append(l2)
else:
max_extent.append(l1)
return tuple(max_extent)
def _get_axis_labels(self, dimensions, xlabel=None, ylabel=None, zlabel=None):
if dimensions and xlabel is None:
xlabel = dim_axis_label(dimensions[0]) if dimensions[0] else ''
if len(dimensions) >= 2 and ylabel is None:
ylabel = dim_axis_label(dimensions[1]) if dimensions[1] else ''
if self.projection == '3d' and len(dimensions) >= 3 and zlabel is None:
zlabel = dim_axis_label(dimensions[2]) if dimensions[2] else ''
return xlabel, ylabel, zlabel
def _format_title(self, key, separator='\n'):
frame = self._get_frame(key)
if frame is None: return None
type_name = type(frame).__name__
group = frame.group if frame.group != type_name else ''
label = frame.label
dim_title = self._frame_title(key, separator=separator)
if self.layout_dimensions:
title = dim_title
else:
title_format = util.safe_unicode(self.title_format)
title = title_format.format(label=util.safe_unicode(label),
group=util.safe_unicode(group),
type=type_name,
dimensions=dim_title)
return title.strip(' \n')
def update_frame(self, key, ranges=None):
"""
Set the plot(s) to the given frame number. Operates by
manipulating the matplotlib objects held in the self._handles
dictionary.
If n is greater than the number of available frames, update
using the last available frame.
"""
class GenericOverlayPlot(GenericElementPlot):
"""
Plotting baseclass to render (Nd)Overlay objects. It implements
methods to handle the creation of ElementPlots, coordinating style
groupings and zorder for all layers across a HoloMap. It also
allows collapsing of layers via the Compositor.
"""
batched = param.Boolean(default=True, doc="""
Whether to plot Elements NdOverlay in a batched plotting call
if possible. Disables legends and zorder may not be preserved.""")
legend_limit = param.Integer(default=25, doc="""
Number of rendered glyphs before legends are disabled.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_grouping = param.Integer(default=2,
doc="""The length of the type.group.label
spec that will be used to group Elements into style groups, i.e.
a style_grouping value of 1 will group just by type, a value of 2
will group by type and group and a value of 3 will group by the
full specification.""")
_passed_handles = []
def __init__(self, overlay, ranges=None, batched=True, **params):
super(GenericOverlayPlot, self).__init__(overlay, ranges=ranges,
batched=batched, **params)
# Apply data collapse
self.hmap = Compositor.collapse(self.hmap, None, mode='data')
self.hmap = self._apply_compositor(self.hmap, ranges, self.keys)
self.subplots = self._create_subplots(ranges)
self.traverse(lambda x: setattr(x, 'comm', self.comm))
def _apply_compositor(self, holomap, ranges=None, keys=None, dimensions=None):
"""
Given a HoloMap compute the appropriate (mapwise or framewise)
ranges in order to apply the Compositor collapse operations in
display mode (data collapse should already have happened).
"""
# Compute framewise normalization
defaultdim = holomap.ndims == 1 and holomap.kdims[0].name != 'Frame'
if keys and ranges and dimensions and not defaultdim:
dim_inds = [dimensions.index(d) for d in holomap.kdims]
sliced_keys = [tuple(k[i] for i in dim_inds) for k in keys]
frame_ranges = OrderedDict([(slckey, self.compute_ranges(holomap, key, ranges[key]))
for key, slckey in zip(keys, sliced_keys) if slckey in holomap.data.keys()])
else:
mapwise_ranges = self.compute_ranges(holomap, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(holomap, key, mapwise_ranges))
for key in holomap.data.keys()])
ranges = frame_ranges.values()
return Compositor.collapse(holomap, (ranges, frame_ranges.keys()), mode='display')
def _create_subplots(self, ranges):
# Check if plot should be batched
ordering = util.layer_sort(self.hmap)
registry = Store.registry[self.renderer.backend]
batched = self.batched and type(self.hmap.last) is NdOverlay
if batched:
batchedplot = registry.get(type(self.hmap.last.last))
if (batched and batchedplot and 'batched' in batchedplot._plot_methods and
(not self.show_legend or len(ordering) > self.legend_limit)):
self.batched = True
keys, vmaps = [()], [self.hmap]
else:
self.batched = False
keys, vmaps = self.hmap.split_overlays()
# Compute global ordering
length = self.style_grouping
group_fn = lambda x: (x.type.__name__, x.last.group, x.last.label)
map_lengths = Counter()
for m in vmaps:
map_lengths[group_fn(m)[:length]] += 1
zoffset = 0
overlay_type = 1 if self.hmap.type == Overlay else 2
group_counter = Counter()
subplots = OrderedDict()
for (key, vmap) in zip(keys, vmaps):
opts = {'overlaid': overlay_type}
if self.hmap.type == Overlay:
style_key = (vmap.type.__name__,) + key
else:
if not isinstance(key, tuple): key = (key,)
style_key = group_fn(vmap) + key
opts['overlay_dims'] = OrderedDict(zip(self.hmap.last.kdims, key))
if self.batched:
vtype = type(vmap.last.last)
oidx = 0
else:
vtype = type(vmap.last)
oidx = ordering.index(style_key)
plottype = registry.get(vtype, None)
if plottype is None:
self.warning("No plotting class for %s type and %s backend "
"found. " % (vtype.__name__, self.renderer.backend))
continue
# Get zorder and style counter
group_key = style_key[:length]
zorder = self.zorder + oidx + zoffset
cyclic_index = group_counter[group_key]
group_counter[group_key] += 1
group_length = map_lengths[group_key]
if issubclass(plottype, GenericOverlayPlot):
opts['show_legend'] = self.show_legend
elif self.batched and 'batched' in plottype._plot_methods:
opts['batched'] = self.batched
opts['overlaid'] = self.overlaid
if len(ordering) > self.legend_limit:
opts['show_legend'] = False
style = self.lookup_options(vmap.last, 'style').max_cycles(group_length)
passed_handles = {k: v for k, v in self.handles.items()
if k in self._passed_handles}
plotopts = dict(opts, cyclic_index=cyclic_index,
invert_axes=self.invert_axes,
dimensions=self.dimensions, keys=self.keys,
layout_dimensions=self.layout_dimensions,
ranges=ranges, show_title=self.show_title,
style=style, uniform=self.uniform,
zorder=zorder, **passed_handles)
if not isinstance(key, tuple): key = (key,)
subplots[key] = plottype(vmap, **plotopts)
if (not isinstance(plottype, PlotSelector) and
issubclass(plottype, GenericOverlayPlot)):
zoffset += len(set([k for o in vmap for k in o.keys()])) - 1
if not subplots:
raise SkipRendering("%s backend could not plot any Elements "
"in the Overlay." % self.renderer.backend)
return subplots
def get_extents(self, overlay, ranges):
extents = []
items = overlay.items()
if self.batched and self.subplots:
subplot = list(self.subplots.values())[0]
subplots = [(k, subplot) for k in overlay.data.keys()]
else:
subplots = self.subplots.items()
for key, subplot in subplots:
found = False
layer = overlay.data.get(key, None)
if isinstance(self.hmap, DynamicMap) and layer is None:
for _, layer in items:
if isinstance(layer, subplot.hmap.type):
found = True
break
if not found:
layer = None
if layer and subplot.apply_ranges:
if isinstance(layer, CompositeOverlay):
sp_ranges = ranges
else:
sp_ranges = util.match_spec(layer, ranges) if ranges else {}
extents.append(subplot.get_extents(layer, sp_ranges))
return util.max_extents(extents, self.projection == '3d')
class GenericCompositePlot(DimensionedPlot):
def __init__(self, layout, keys=None, dimensions=None, **params):
if 'uniform' not in params:
params['uniform'] = traversal.uniform(layout)
top_level = keys is None
if top_level:
dimensions, keys = traversal.unique_dimkeys(layout)
dynamic, sampled = get_dynamic_mode(layout)
if sampled:
initialize_sampled(layout, dimensions, keys[0])
self.layout = layout
super(GenericCompositePlot, self).__init__(keys=keys,
dynamic=dynamic,
dimensions=dimensions,
**params)
if top_level:
self.comm = self.init_comm(layout)
self.traverse(lambda x: setattr(x, 'comm', self.comm))
nested_streams = layout.traverse(lambda x: get_nested_streams(x),
[DynamicMap])
self.streams = list(set([s for streams in nested_streams for s in streams]))
def _get_frame(self, key):
"""
Creates a clone of the Layout with the nth-frame for each
Element.
"""
layout_frame = self.layout.clone(shared_data=False)
keyisint = isinstance(key, int)
if not isinstance(key, tuple): key = (key,)
nthkey_fn = lambda x: zip(tuple(x.name for x in x.kdims),
list(x.data.keys())[min([key[0], len(x)-1])])
if key == self.current_key and not self._force:
return self.current_frame
else:
self.current_key = key
for path, item in self.layout.items():
if self.dynamic == 'open':
if keyisint:
counts = item.traverse(lambda x: x.counter, (DynamicMap,))
if key[0] >= counts[0]:
item.traverse(lambda x: next(x), (DynamicMap,))
dim_keys = item.traverse(nthkey_fn, (DynamicMap,))[0]
else:
dim_keys = zip([d.name for d in self.dimensions
if d in item.dimensions('key')], key)
self.current_key = tuple(k[1] for k in dim_keys)
elif item.traverse(lambda x: x, [DynamicMap]):
key, frame = util.get_dynamic_item(item, self.dimensions, key)
layout_frame[path] = frame
continue
elif self.uniform:
dim_keys = zip([d.name for d in self.dimensions
if d in item.dimensions('key')], key)
else:
dim_keys = item.traverse(nthkey_fn, (HoloMap,))[0]
if dim_keys:
obj = item.select((HoloMap,), **dict(dim_keys))
if isinstance(obj, HoloMap) and len(obj) == 0:
continue
else:
layout_frame[path] = obj
else:
layout_frame[path] = item
traverse_setter(self, '_force', False)
self.current_frame = layout_frame
return layout_frame
def __len__(self):
return len(self.keys)
def _format_title(self, key, separator='\n'):
dim_title = self._frame_title(key, 3, separator)
layout = self.layout
type_name = type(self.layout).__name__
group = util.safe_unicode(layout.group if layout.group != type_name else '')
label = util.safe_unicode(layout.label)
title = util.safe_unicode(self.title_format).format(label=label,
group=group,
type=type_name,
dimensions=dim_title)
return title.strip(' \n')
class GenericLayoutPlot(GenericCompositePlot):
"""
A GenericLayoutPlot accepts either a Layout or a NdLayout and
displays the elements in a cartesian grid in scanline order.
"""
def __init__(self, layout, **params):
if not isinstance(layout, (NdLayout, Layout)):
raise ValueError("GenericLayoutPlot only accepts Layout objects.")
if len(layout.values()) == 0:
raise SkipRendering(warn=False)
super(GenericLayoutPlot, self).__init__(layout, **params)
self.subplots = {}
self.rows, self.cols = layout.shape
self.coords = list(product(range(self.rows),
range(self.cols)))
| 1 | 16,400 | Would be good to say the normal behavior is scanline order (left to right, top to bottom) and that transposing makes it work top to bottom and left to right. | holoviz-holoviews | py |
@@ -326,6 +326,17 @@ Photos folder" option in your google drive settings. You can then copy
or move the photos locally and use the date the image was taken
(created) set as the modification date.`,
Advanced: true,
+ }, {
+ Name: "use_shared_date",
+ Default: false,
+ Help: `Use date file was shared instead of modified date.
+
+Note that, as with "--drive-use-created-date", this flag may have
+unexpected consequences when uploading/downloading files.
+
+If both this flag and "--drive-use-created-date" are set, the created
+date is used.`,
+ Advanced: true,
}, {
Name: "list_chunk",
Default: 1000, | 1 | // Package drive interfaces with the Google Drive object storage system
package drive
// FIXME need to deal with some corner cases
// * multiple files with the same name
// * files can be in multiple directories
// * can have directory loops
// * files with / in name
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
drive_v2 "google.golang.org/api/drive/v2"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
const enc = encodings.Drive
// Constants
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
driveFolderType = "application/vnd.google-apps.folder"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
defaultMinSleep = fs.Duration(100 * time.Millisecond)
defaultBurst = 100
defaultExportExtensions = "docx,xlsx,pptx,svg"
scopePrefix = "https://www.googleapis.com/auth/"
defaultScope = "drive"
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = 256 * fs.KibiByte
defaultChunkSize = 8 * fs.MebiByte
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
)
// Globals
var (
// Description of how to auth for this app
driveConfig = &oauth2.Config{
Scopes: []string{scopePrefix + "drive"},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
_mimeTypeToExtensionDuplicates = map[string]string{
"application/x-vnd.oasis.opendocument.presentation": ".odp",
"application/x-vnd.oasis.opendocument.spreadsheet": ".ods",
"application/x-vnd.oasis.opendocument.text": ".odt",
"image/jpg": ".jpg",
"image/x-bmp": ".bmp",
"image/x-png": ".png",
"text/rtf": ".rtf",
}
_mimeTypeToExtension = map[string]string{
"application/epub+zip": ".epub",
"application/json": ".json",
"application/msword": ".doc",
"application/pdf": ".pdf",
"application/rtf": ".rtf",
"application/vnd.ms-excel": ".xls",
"application/vnd.oasis.opendocument.presentation": ".odp",
"application/vnd.oasis.opendocument.spreadsheet": ".ods",
"application/vnd.oasis.opendocument.text": ".odt",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
"application/x-msmetafile": ".wmf",
"application/zip": ".zip",
"image/bmp": ".bmp",
"image/jpeg": ".jpg",
"image/pjpeg": ".pjpeg",
"image/png": ".png",
"image/svg+xml": ".svg",
"text/csv": ".csv",
"text/html": ".html",
"text/plain": ".txt",
"text/tab-separated-values": ".tsv",
}
_mimeTypeToExtensionLinks = map[string]string{
"application/x-link-desktop": ".desktop",
"application/x-link-html": ".link.html",
"application/x-link-url": ".url",
"application/x-link-webloc": ".webloc",
}
_mimeTypeCustomTransform = map[string]string{
"application/vnd.google-apps.script+json": "application/json",
}
fetchFormatsOnce sync.Once // make sure we fetch the export/import formats only once
_exportFormats map[string][]string // allowed export MIME type conversions
_importFormats map[string][]string // allowed import MIME type conversions
templatesOnce sync.Once // parse link templates only once
_linkTemplates map[string]*template.Template // available link types
)
// Parse the scopes option returning a slice of scopes
func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" {
scopesString = defaultScope
}
for _, scope := range strings.Split(scopesString, ",") {
scope = strings.TrimSpace(scope)
scopes = append(scopes, scopePrefix+scope)
}
return scopes
}
// Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder(scopes []string) bool {
for _, scope := range scopes {
if scope == scopePrefix+"drive.appfolder" {
return true
}
}
return false
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "drive",
Description: "Google Drive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
}
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" {
err = oauthutil.Config("drive", name, m, driveConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
}
err = configTeamDrive(ctx, opt, m, name)
if err != nil {
log.Fatalf("Failed to configure team drive: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nSetting your own is recommended.",
}, {
Name: "scope",
Help: "Scope that rclone should use when requesting access from drive.",
Examples: []fs.OptionExample{{
Value: "drive",
Help: "Full access all files, excluding Application Data Folder.",
}, {
Value: "drive.readonly",
Help: "Read-only access to file metadata and file contents.",
}, {
Value: "drive.file",
Help: "Access to files created by rclone only.\nThese are visible in the drive website.\nFile authorization is revoked when the user deauthorizes the app.",
}, {
Value: "drive.appfolder",
Help: "Allows read and write access to the Application Data folder.\nThis is not visible in the drive website.",
}, {
Value: "drive.metadata.readonly",
Help: "Allows read-only access to file metadata but\ndoes not allow any access to read or download file content.",
}},
}, {
Name: "root_folder_id",
Help: `ID of the root folder
Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
Note that if this is blank, the first time rclone runs it will fill it
in with the ID of the root folder.
`,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "team_drive",
Help: "ID of the Team Drive",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "auth_owner_only",
Default: false,
Help: "Only consider files owned by the authenticated user.",
Advanced: true,
}, {
Name: "use_trash",
Default: true,
Help: "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
Advanced: true,
}, {
Name: "skip_gdocs",
Default: false,
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
Advanced: true,
}, {
Name: "skip_checksum_gphotos",
Default: false,
Help: `Skip MD5 checksum on Google photos and videos only.
Use this if you get checksum errors when transferring Google photos or
videos.
Setting this flag will cause Google photos and videos to return a
blank MD5 checksum.
Google photos are identifed by being in the "photos" space.
Corrupted checksums are caused by Google modifying the image/video but
not updating the checksum.`,
Advanced: true,
}, {
Name: "shared_with_me",
Default: false,
Help: `Only show files that are shared with me.
Instructs rclone to operate on your "Shared with me" folder (where
Google Drive lets you access the files and folders others have shared
with you).
This works both with the "list" (lsd, lsl, etc) and the "copy"
commands (copy, sync, etc), and with all other commands too.`,
Advanced: true,
}, {
Name: "trashed_only",
Default: false,
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Advanced: true,
}, {
Name: "formats",
Default: "",
Help: "Deprecated: see export_formats",
Advanced: true,
Hide: fs.OptionHideConfigurator,
}, {
Name: "export_formats",
Default: defaultExportExtensions,
Help: "Comma separated list of preferred formats for downloading Google docs.",
Advanced: true,
}, {
Name: "import_formats",
Default: "",
Help: "Comma separated list of preferred formats for uploading Google docs.",
Advanced: true,
}, {
Name: "allow_import_name_change",
Default: false,
Help: "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
Advanced: true,
}, {
Name: "use_created_date",
Default: false,
Help: `Use file created date instead of modified date.,
Useful when downloading data and you want the creation date used in
place of the last modified date.
**WARNING**: This flag may have some unexpected consequences.
When uploading to your drive all files will be overwritten unless they
haven't been modified since their creation. And the inverse will occur
while downloading. This side effect can be avoided by using the
"--checksum" flag.
This feature was implemented to retain photos capture date as recorded
by google photos. You will first need to check the "Create a Google
Photos folder" option in your google drive settings. You can then copy
or move the photos locally and use the date the image was taken
(created) set as the modification date.`,
Advanced: true,
}, {
Name: "list_chunk",
Default: 1000,
Help: "Size of listing chunk 100-1000. 0 to disable.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: "Impersonate this user when using a service account.",
Advanced: true,
}, {
Name: "alternate_export",
Default: false,
Help: `Use alternate export URLs for google documents export.,
If this option is set this instructs rclone to use an alternate set of
export URLs for drive documents. Users have reported that the
official export URLs can't export large documents, whereas these
unofficial ones can.
See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background,
[this google drive issue](https://issuetracker.google.com/issues/36761333) and
[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
Advanced: true,
}, {
Name: "upload_cutoff",
Default: defaultChunkSize,
Help: "Cutoff for switching to chunked upload",
Advanced: true,
}, {
Name: "chunk_size",
Default: defaultChunkSize,
Help: `Upload chunk size. Must a power of 2 >= 256k.
Making this larger will improve performance, but note that each chunk
is buffered in memory one per transfer.
Reducing this will reduce memory usage but decrease performance.`,
Advanced: true,
}, {
Name: "acknowledge_abuse",
Default: false,
Help: `Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
If downloading a file returns the error "This file has been identified
as malware or spam and cannot be downloaded" with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone
will download it anyway.`,
Advanced: true,
}, {
Name: "keep_revision_forever",
Default: false,
Help: "Keep new head revision of each file forever.",
Advanced: true,
}, {
Name: "size_as_quota",
Default: false,
Help: `Show storage quota usage for file size.
The storage used by a file is the size of the current version plus any
older versions that have been set to keep forever.`,
Advanced: true,
}, {
Name: "v2_download_min_size",
Default: fs.SizeSuffix(-1),
Help: "If Object's are greater, use drive v2 API to download.",
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
Help: "Minimum time to sleep between API calls.",
Advanced: true,
}, {
Name: "pacer_burst",
Default: defaultBurst,
Help: "Number of API calls to allow without sleeping.",
Advanced: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different drive configs.
This can be useful if you wish to do a server side copy between two
different Google drives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
configurations.`,
Advanced: true,
}, {
Name: "disable_http2",
Default: true,
Help: `Disable drive using http2
There is currently an unsolved issue with the google drive backend and
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
but can be re-enabled here. When the issue is solved this flag will
be removed.
See: https://github.com/rclone/rclone/issues/3631
`,
Advanced: true,
}},
})
// register duplicate MIME types first
// this allows them to be used with mime.ExtensionsByType() but
// mime.TypeByExtension() will return the later registered MIME type
for _, m := range []map[string]string{
_mimeTypeToExtensionDuplicates, _mimeTypeToExtension, _mimeTypeToExtensionLinks,
} {
for mimeType, extension := range m {
if err := mime.AddExtensionType(extension, mimeType); err != nil {
log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
}
}
}
}
// Options defines the configuration for this backend
type Options struct {
Scope string `config:"scope"`
RootFolderID string `config:"root_folder_id"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
TeamDriveID string `config:"team_drive"`
AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"`
SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
TrashedOnly bool `config:"trashed_only"`
Extensions string `config:"formats"`
ExportExtensions string `config:"export_formats"`
ImportExtensions string `config:"import_formats"`
AllowImportNameChange bool `config:"allow_import_name_change"`
UseCreatedDate bool `config:"use_created_date"`
ListChunk int64 `config:"list_chunk"`
Impersonate string `config:"impersonate"`
AlternateExport bool `config:"alternate_export"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
KeepRevisionForever bool `config:"keep_revision_forever"`
SizeAsQuota bool `config:"size_as_quota"`
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
PacerBurst int `config:"pacer_burst"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
DisableHTTP2 bool `config:"disable_http2"`
}
// Fs represents a remote drive server
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
svc *drive.Service // the connection to the drive server
v2Svc *drive_v2.Service // used to create download links for the v2 api
client *http.Client // authorized client
rootFolderID string // the id of the root folder
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // To pace the API calls
exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive
}
type baseObject struct {
fs *Fs // what this object is part of
remote string // The remote path
id string // Drive Id of this object
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
}
type documentObject struct {
baseObject
url string // Download URL of this object
documentMimeType string // the original document MIME type
extLen int // The length of the added export extension
}
type linkObject struct {
baseObject
content []byte // The file content generated by a link template
extLen int // The length of the added export extension
}
// Object describes a drive object
type Object struct {
baseObject
url string // Download URL of this object
md5sum string // md5sum of the object
v2Download bool // generate v2 download link ondemand
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Google drive root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry determines whether a given err rates being retried
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
if fserrors.ShouldRetry(err) {
return true, err
}
switch gerr := err.(type) {
case *googleapi.Error:
if gerr.Code >= 500 && gerr.Code < 600 {
// All 5xx errors should be retried
return true, err
}
if len(gerr.Errors) > 0 {
reason := gerr.Errors[0].Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
return true, err
}
}
}
return false, err
}
// parseParse parses a drive 'url'
func parseDrivePath(path string) (root string, err error) {
root = strings.Trim(path, "/")
return
}
// User function to process a File item from list
//
// Should return true to finish processing
type listFn func(*drive.File) bool
func containsString(slice []string, s string) bool {
for _, e := range slice {
if e == s {
return true
}
}
return false
}
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID() (string, error) {
var info *drive.File
var err error
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get("root").
Fields("id").
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
}
return info.Id, nil
}
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
//
// Search params: https://developers.google.com/drive/search-parameters
func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
var query []string
if !includeAll {
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
if f.opt.TrashedOnly {
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
}
query = append(query, q)
}
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
// If we need to list file inside those shared folders, we must search it without sharedWithMe
parentsQuery := bytes.NewBufferString("(")
for _, dirID := range dirIDs {
if dirID == "" {
continue
}
if parentsQuery.Len() > 1 {
_, _ = parentsQuery.WriteString(" or ")
}
if f.opt.SharedWithMe && dirID == f.rootFolderID {
_, _ = parentsQuery.WriteString("sharedWithMe=true")
} else {
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
}
}
if parentsQuery.Len() > 1 {
_ = parentsQuery.WriteByte(')')
query = append(query, parentsQuery.String())
}
var stems []string
if title != "" {
searchTitle := enc.FromStandardName(title)
// Escaping the backslash isn't documented but seems to work
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
var titleQuery bytes.Buffer
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
if !directoriesOnly && !f.opt.SkipGdocs {
// If the search title has an extension that is in the export extensions add a search
// for the filename without the extension.
// Assume that export extensions don't contain escape sequences.
for _, ext := range f.exportExtensions {
if strings.HasSuffix(searchTitle, ext) {
stems = append(stems, title[:len(title)-len(ext)])
_, _ = fmt.Fprintf(&titleQuery, " or name='%s'", searchTitle[:len(searchTitle)-len(ext)])
}
}
}
_ = titleQuery.WriteByte(')')
query = append(query, titleQuery.String())
}
if directoriesOnly {
query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
}
if filesOnly {
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
}
list := f.svc.Files.List()
if len(query) > 0 {
list.Q(strings.Join(query, " and "))
// fmt.Printf("list Query = %q\n", query)
}
if f.opt.ListChunk > 0 {
list.PageSize(f.opt.ListChunk)
}
list.SupportsAllDrives(true)
list.IncludeItemsFromAllDrives(true)
if f.isTeamDrive {
list.DriveId(f.opt.TeamDriveID)
list.Corpora("drive")
}
// If using appDataFolder then need to add Spaces
if f.rootFolderID == "appDataFolder" {
list.Spaces("appDataFolder")
}
var fields = partialFields
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
if f.opt.SizeAsQuota {
fields += ",quotaBytesUsed"
}
fields = fmt.Sprintf("files(%s),nextPageToken", fields)
OUTER:
for {
var files *drive.FileList
err = f.pacer.Call(func() (bool, error) {
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return false, errors.Wrap(err, "couldn't list directory")
}
for _, item := range files.Files {
item.Name = enc.ToStandardName(item.Name)
// Check the case of items is correct since
// the `=` operator is case insensitive.
if title != "" && title != item.Name {
found := false
for _, stem := range stems {
if stem == item.Name {
found = true
break
}
}
if !found {
continue
}
_, exportName, _, _ := f.findExportFormat(item)
if exportName == "" || exportName != title {
continue
}
}
if fn(item) {
found = true
break OUTER
}
}
if files.NextPageToken == "" {
break
}
list.PageToken(files.NextPageToken)
}
return
}
// Returns true of x is a power of 2 or zero
func isPowerOfTwo(x int64) bool {
switch {
case x == 0:
return true
case x < 0:
return false
default:
return (x & (x - 1)) == 0
}
}
// add a charset parameter to all text/* MIME types
func fixMimeType(mimeTypeIn string) string {
if mimeTypeIn == "" {
return ""
}
mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
if err != nil {
return mimeTypeIn
}
mimeTypeOut := mimeTypeIn
if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
param["charset"] = "utf-8"
mimeTypeOut = mime.FormatMediaType(mediaType, param)
}
if mimeTypeOut == "" {
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
}
return mimeTypeOut
}
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
out = make(map[string][]string, len(in))
for k, v := range in {
for i, mt := range v {
v[i] = fixMimeType(mt)
}
out[fixMimeType(k)] = v
}
return out
}
func isInternalMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
}
func isLinkMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/x-link-")
}
// parseExtensions parses a list of comma separated extensions
// into a list of unique extensions with leading "." and a list of associated MIME types
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
for _, extensionText := range extensionsIn {
for _, extension := range strings.Split(extensionText, ",") {
extension = strings.ToLower(strings.TrimSpace(extension))
if extension == "" {
continue
}
if len(extension) > 0 && extension[0] != '.' {
extension = "." + extension
}
mt := mime.TypeByExtension(extension)
if mt == "" {
return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
}
if !containsString(extensions, extension) {
extensions = append(extensions, extension)
mimeTypes = append(mimeTypes, mt)
}
}
}
return
}
// Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
// Stop if we are running non-interactive config
if fs.Config.AutoConfirm {
return nil
}
if opt.TeamDriveID == "" {
fmt.Printf("Configure this as a team drive?\n")
} else {
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm(false) {
return nil
}
client, err := createOAuthClient(opt, name, m)
if err != nil {
return errors.Wrap(err, "config team drive failed to create oauth client")
}
svc, err := drive.New(client)
if err != nil {
return errors.Wrap(err, "config team drive failed to make drive client")
}
fmt.Printf("Fetching team drive list...\n")
var driveIDs, driveNames []string
listTeamDrives := svc.Teamdrives.List().PageSize(100)
listFailed := false
for {
var teamDrives *drive.TeamDriveList
err = newPacer(opt).Call(func() (bool, error) {
teamDrives, err = listTeamDrives.Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
fmt.Printf("Listing team drives failed: %v\n", err)
listFailed = true
break
}
for _, drive := range teamDrives.TeamDrives {
driveIDs = append(driveIDs, drive.Id)
driveNames = append(driveNames, drive.Name)
}
if teamDrives.NextPageToken == "" {
break
}
listTeamDrives.PageToken(teamDrives.NextPageToken)
}
var driveID string
if !listFailed && len(driveIDs) == 0 {
fmt.Printf("No team drives found in your account")
} else {
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
}
m.Set("team_drive", driveID)
opt.TeamDriveID = driveID
return nil
}
// newPacer makes a pacer configured for drive
func newPacer(opt *Options) *fs.Pacer {
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
}
// getClient makes an http client according to the options
func getClient(opt *Options) *http.Client {
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
if opt.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
}
})
return &http.Client{
Transport: t,
}
}
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
if opt.Impersonate != "" {
conf.Subject = opt.Impersonate
}
ctxWithSpecialClient := oauthutil.Context(getClient(opt))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
var oAuthClient *http.Client
var err error
// try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client from service account")
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client")
}
}
return oAuthClient, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if !isPowerOfTwo(int64(cs)) {
return errors.Errorf("%v isn't a power of two", cs)
}
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "drive: upload cutoff")
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "drive: chunk size")
}
oAuthClient, err := createOAuthClient(opt, name, m)
if err != nil {
return nil, errors.Wrap(err, "drive: failed when making oauth client")
}
root, err := parseDrivePath(path)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: newPacer(opt),
}
f.isTeamDrive = opt.TeamDriveID != ""
f.features = (&fs.Features{
DuplicateFiles: true,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f)
// Create a new authorized Drive client.
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive client")
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive v2 client")
}
}
// set root folder for a team drive or query the user root folder
if opt.RootFolderID != "" {
// override root folder if set or cached in the config
f.rootFolderID = opt.RootFolderID
} else if f.isTeamDrive {
f.rootFolderID = f.opt.TeamDriveID
} else {
// Look up the root ID and cache it in the config
rootID, err := f.getRootID()
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
} else {
return nil, err
}
}
f.rootFolderID = rootID
m.Set("root_folder_id", rootID)
}
f.dirCache = dircache.New(root, f.rootFolderID, f)
// Parse extensions
if opt.Extensions != "" {
if opt.ExportExtensions != defaultExportExtensions {
return nil, errors.New("only one of 'formats' and 'export_formats' can be specified")
}
opt.Extensions, opt.ExportExtensions = "", opt.Extensions
}
f.exportExtensions, _, err = parseExtensions(opt.ExportExtensions, defaultExportExtensions)
if err != nil {
return nil, err
}
_, f.importMimeTypes, err = parseExtensions(opt.ImportExtensions)
if err != nil {
return nil, err
}
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
// unable to list folder so return old f
return f, nil
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
return f, fs.ErrorIsFile
}
// fmt.Printf("Root id %s", f.dirCache.RootID())
return f, nil
}
func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate := info.ModifiedTime
if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime
}
size := info.Size
if f.opt.SizeAsQuota {
size = info.QuotaBytesUsed
}
return baseObject{
fs: f,
remote: remote,
id: info.Id,
modifiedDate: modifiedDate,
mimeType: info.MimeType,
bytes: size,
}
}
// newRegularObject creates a fs.Object for a normal drive.File
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos {
for _, space := range info.Spaces {
if space == "photos" {
info.Md5Checksum = ""
break
}
}
}
return &Object{
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
md5sum: strings.ToLower(info.Md5Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
}
}
// newDocumentObject creates a fs.Object for a google docs drive.File
func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
mediaType, _, err := mime.ParseMediaType(exportMimeType)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, info.Id, url.QueryEscape(mediaType))
if f.opt.AlternateExport {
switch info.MimeType {
case "application/vnd.google-apps.drawing":
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension[1:])
case "application/vnd.google-apps.document":
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension[1:])
case "application/vnd.google-apps.spreadsheet":
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension[1:])
case "application/vnd.google-apps.presentation":
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension[1:])
}
}
baseObject := f.newBaseObject(remote+extension, info)
baseObject.bytes = -1
baseObject.mimeType = exportMimeType
return &documentObject{
baseObject: baseObject,
url: url,
documentMimeType: info.MimeType,
extLen: len(extension),
}, nil
}
// newLinkObject creates a fs.Object that represents a link a google docs drive.File
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
t := linkTemplate(exportMimeType)
if t == nil {
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
}
var buf bytes.Buffer
err := t.Execute(&buf, struct {
URL, Title string
}{
info.WebViewLink, info.Name,
})
if err != nil {
return nil, errors.Wrap(err, "executing template failed")
}
baseObject := f.newBaseObject(remote+extension, info)
baseObject.bytes = int64(buf.Len())
baseObject.mimeType = exportMimeType
return &linkObject{
baseObject: baseObject,
content: buf.Bytes(),
extLen: len(extension),
}, nil
}
// newObjectWithInfo creates a fs.Object for any drive.File
//
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum or a length it is a file stored on drive
if info.Md5Checksum != "" || info.Size > 0 {
return f.newRegularObject(remote, info), nil
}
extension, exportName, exportMimeType, isDocument := f.findExportFormat(info)
return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
}
// newObjectWithExportInfo creates a fs.Object for any drive.File and the result of findExportFormat
//
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo(
remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool) (fs.Object, error) {
switch {
case info.Md5Checksum != "" || info.Size > 0:
// If item has MD5 sum or a length it is a file stored on drive
return f.newRegularObject(remote, info), nil
case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
return nil, nil
default:
// If item MimeType is in the ExportFormats then it is a google doc
if !isDocument {
fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType)
return nil, nil
}
if extension == "" {
fs.Debugf(remote, "No export formats found for %q", info.MimeType)
return nil, nil
}
if isLinkMimeType(exportMimeType) {
return f.newLinkObject(remote, info, extension, exportMimeType)
}
return f.newDocumentObject(remote, info, extension, exportMimeType)
}
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
if err != nil {
return nil, err
}
remote = remote[:len(remote)-len(extension)]
obj, err := f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
switch {
case err != nil:
return nil, err
case obj == nil:
return nil, fs.ErrorObjectNotFound
default:
return obj, nil
}
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
_, exportName, _, isDocument := f.findExportFormat(item)
if exportName == leaf {
pathIDOut = item.Id
return true
}
if isDocument {
return false
}
}
if item.Name == leaf {
pathIDOut = item.Id
return true
}
return false
})
return pathIDOut, found, err
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
leaf = enc.FromStandardName(leaf)
// fmt.Println("Making", path)
// Define the metadata for the directory we are going to create.
createInfo := &drive.File{
Name: leaf,
Description: leaf,
MimeType: driveFolderType,
Parents: []string{pathID},
}
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo).
Fields("id").
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return "", err
}
return info.Id, nil
}
// isAuthOwned checks if any of the item owners is the authenticated owner
func isAuthOwned(item *drive.File) bool {
for _, owner := range item.Owners {
if owner.Me {
return true
}
}
return false
}
// linkTemplate returns the Template for a MIME type or nil if the
// MIME type does not represent a link
func linkTemplate(mt string) *template.Template {
templatesOnce.Do(func() {
_linkTemplates = map[string]*template.Template{
"application/x-link-desktop": template.Must(
template.New("application/x-link-desktop").Parse(desktopTemplate)),
"application/x-link-html": template.Must(
template.New("application/x-link-html").Parse(htmlTemplate)),
"application/x-link-url": template.Must(
template.New("application/x-link-url").Parse(urlTemplate)),
"application/x-link-webloc": template.Must(
template.New("application/x-link-webloc").Parse(weblocTemplate)),
}
})
return _linkTemplates[mt]
}
func (f *Fs) fetchFormats() {
fetchFormatsOnce.Do(func() {
var about *drive.About
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().
Fields("exportFormats,importFormats").
Do()
return shouldRetry(err)
})
if err != nil {
fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
_exportFormats = map[string][]string{}
_importFormats = map[string][]string{}
return
}
_exportFormats = fixMimeTypeMap(about.ExportFormats)
_importFormats = fixMimeTypeMap(about.ImportFormats)
})
}
// exportFormats returns the export formats from drive, fetching them
// if necessary.
//
// if the fetch fails then it will not export any drive formats
func (f *Fs) exportFormats() map[string][]string {
f.fetchFormats()
return _exportFormats
}
// importFormats returns the import formats from drive, fetching them
// if necessary.
//
// if the fetch fails then it will not import any drive formats
func (f *Fs) importFormats() map[string][]string {
f.fetchFormats()
return _importFormats
}
// findExportFormatByMimeType works out the optimum export settings
// for the given MIME type.
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false)
func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
extension, mimeType string, isDocument bool) {
exportMimeTypes, isDocument := f.exportFormats()[itemMimeType]
if isDocument {
for _, _extension := range f.exportExtensions {
_mimeType := mime.TypeByExtension(_extension)
if isLinkMimeType(_mimeType) {
return _extension, _mimeType, true
}
for _, emt := range exportMimeTypes {
if emt == _mimeType {
return _extension, emt, true
}
if _mimeType == _mimeTypeCustomTransform[emt] {
return _extension, emt, true
}
}
}
}
// else return empty
return "", "", isDocument
}
// findExportFormatByMimeType works out the optimum export settings
// for the given drive.File.
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
extension, mimeType, isDocument = f.findExportFormatByMimeType(item.MimeType)
if extension != "" {
filename = item.Name + extension
}
return
}
// findImportFormat finds the matching upload MIME type for a file
// If the given MIME type is in importMimeTypes, the matching upload
// MIME type is returned
//
// When no match is found "" is returned.
func (f *Fs) findImportFormat(mimeType string) string {
mimeType = fixMimeType(mimeType)
ifs := f.importFormats()
for _, mt := range f.importMimeTypes {
if mt == mimeType {
importMimeTypes := ifs[mimeType]
if l := len(importMimeTypes); l > 0 {
if l > 1 {
fs.Infof(f, "found %d import formats for %q: %q", l, mimeType, importMimeTypes)
}
return importMimeTypes[0]
}
}
}
return ""
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
var iErr error
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
if err != nil {
iErr = err
return true
}
if entry != nil {
entries = append(entries, entry)
}
return false
})
if err != nil {
return nil, err
}
if iErr != nil {
return nil, iErr
}
return entries, nil
}
// listREntry is a task to be executed by a litRRunner
type listREntry struct {
id, path string
}
// listRSlices is a helper struct to sort two slices at once
type listRSlices struct {
dirs []string
paths []string
}
func (s listRSlices) Sort() {
sort.Sort(s)
}
func (s listRSlices) Len() int {
return len(s.dirs)
}
func (s listRSlices) Swap(i, j int) {
s.dirs[i], s.dirs[j] = s.dirs[j], s.dirs[i]
s.paths[i], s.paths[j] = s.paths[j], s.paths[i]
}
func (s listRSlices) Less(i, j int) bool {
return s.dirs[i] < s.dirs[j]
}
// listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry.
//
// In each cycle it will read up to grouping entries from the in channel without blocking.
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
// nil is send to the out channel and the function returns.
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
var dirs []string
var paths []string
for dir := range in {
dirs = append(dirs[:0], dir.id)
paths = append(paths[:0], dir.path)
waitloop:
for i := 1; i < grouping; i++ {
select {
case d, ok := <-in:
if !ok {
break waitloop
}
dirs = append(dirs, d.id)
paths = append(paths, d.path)
default:
}
}
listRSlices{dirs, paths}.Sort()
var iErr error
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
// shared with me items have no parents when at the root
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
item.Parents = dirs
}
for _, parent := range item.Parents {
// only handle parents that are in the requested dirs list
i := sort.SearchStrings(dirs, parent)
if i == len(dirs) || dirs[i] != parent {
continue
}
remote := path.Join(paths[i], item.Name)
entry, err := f.itemToDirEntry(remote, item)
if err != nil {
iErr = err
return true
}
err = cb(entry)
if err != nil {
iErr = err
return true
}
}
return false
})
for range dirs {
wg.Done()
}
if iErr != nil {
out <- iErr
return
}
if err != nil {
out <- err
return
}
}
out <- nil
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
const (
grouping = 50
inputBuffer = 1000
)
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
mu := sync.Mutex{} // protects in and overflow
wg := sync.WaitGroup{}
in := make(chan listREntry, inputBuffer)
out := make(chan error, fs.Config.Checkers)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
cb := func(entry fs.DirEntry) error {
mu.Lock()
defer mu.Unlock()
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
select {
case in <- listREntry{d.ID(), d.Remote()}:
wg.Add(1)
default:
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
}
}
return list.Add(entry)
}
wg.Add(1)
in <- listREntry{directoryID, dir}
for i := 0; i < fs.Config.Checkers; i++ {
go f.listRRunner(ctx, &wg, in, out, cb, grouping)
}
go func() {
// wait until the all directories are processed
wg.Wait()
// if the input channel overflowed add the collected entries to the channel now
for len(overflow) > 0 {
mu.Lock()
l := len(overflow)
// only fill half of the channel to prevent entries beeing put into overflow again
if l > inputBuffer/2 {
l = inputBuffer / 2
}
wg.Add(l)
for _, d := range overflow[:l] {
in <- d
}
overflow = overflow[l:]
mu.Unlock()
// wait again for the completion of all directories
wg.Wait()
}
mu.Lock()
if in != nil {
// notify all workers to exit
close(in)
in = nil
}
mu.Unlock()
}()
// wait until the all workers to finish
for i := 0; i < fs.Config.Checkers; i++ {
e := <-out
mu.Lock()
// if one worker returns an error early, close the input so all other workers exit
if e != nil && in != nil {
err = e
close(in)
in = nil
}
mu.Unlock()
}
close(out)
if err != nil {
return err
}
return list.Flush()
}
// itemToDirEntry converts a drive.File to a fs.DirEntry.
// When the drive.File cannot be represented as a fs.DirEntry
// (nil, nil) is returned.
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
switch {
case item.MimeType == driveFolderType:
// cache the directory ID for later lookups
f.dirCache.Put(remote, item.Id)
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
d := fs.NewDir(remote, when).SetID(item.Id)
return d, nil
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
// ignore object
default:
return f.newObjectWithInfo(remote, item)
}
return nil, nil
}
// Creates a drive.File info from the parameters passed in.
//
// Used to create new objects
func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
leaf = enc.FromStandardName(leaf)
// Define the metadata for the file we are going to create.
createInfo := &drive.File{
Name: leaf,
Description: leaf,
Parents: []string{directoryID},
ModifiedTime: modTime.Format(timeFormatOut),
}
return createInfo, nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
modTime := src.ModTime(ctx)
srcMimeType := fs.MimeTypeFromName(remote)
srcExt := path.Ext(remote)
exportExt := ""
importMimeType := ""
if f.importMimeTypes != nil && !f.opt.SkipGdocs {
importMimeType = f.findImportFormat(srcMimeType)
if isInternalMimeType(importMimeType) {
remote = remote[:len(remote)-len(srcExt)]
exportExt, _, _ = f.findExportFormatByMimeType(importMimeType)
if exportExt == "" {
return nil, errors.Errorf("No export format found for %q", importMimeType)
}
if exportExt != srcExt && !f.opt.AllowImportNameChange {
return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
}
}
}
createInfo, err := f.createFileInfo(ctx, remote, modTime)
if err != nil {
return nil, err
}
if importMimeType != "" {
createInfo.MimeType = importMimeType
} else {
createInfo.MimeType = fs.MimeTypeFromName(remote)
}
var info *drive.File
if size == 0 || size < int64(f.opt.UploadCutoff) {
// Make the API request to upload metadata and file data.
// Don't retry, return a retry error instead
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo).
Media(in, googleapi.ContentType(srcMimeType)).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
} else {
// Upload the file in chunks
info, err = f.Upload(ctx, in, size, srcMimeType, "", remote, createInfo)
if err != nil {
return nil, err
}
}
return f.newObjectWithInfo(remote, info)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) < 2 {
return nil
}
dstDir := dirs[0]
for _, srcDir := range dirs[1:] {
// list the the objects
infos := []*drive.File{}
_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
infos = append(infos, info)
return false
})
if err != nil {
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
}
// move them into place
for _, info := range infos {
fs.Infof(srcDir, "merging %q", info.Name)
// Move the file into the destination
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(info.Id, nil).
RemoveParents(srcDir.ID()).
AddParents(dstDir.ID()).
Fields("").
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.rmdir(ctx, srcDir.ID(), true)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
}
}
return nil
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// Rmdir deletes a directory unconditionally by ID
func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error {
return f.pacer.Call(func() (bool, error) {
var err error
if useTrash {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(directoryID, &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = f.svc.Files.Delete(directoryID).
Fields("").
SupportsAllDrives(true).
Do()
}
return shouldRetry(err)
})
}
// Rmdir deletes a directory
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
root := path.Join(f.root, dir)
dc := f.dirCache
directoryID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
}
var trashedFiles = false
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
if !item.Trashed {
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
return true
}
fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
trashedFiles = true
return false
})
if err != nil {
return err
}
if found {
return errors.Errorf("directory not empty")
}
if root != "" {
// trash the directory if it had trashed files
// in or the user wants to trash, otherwise
// delete it.
err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash)
if err != nil {
return err
}
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}
// Precision of the object storage system
func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
var srcObj *baseObject
ext := ""
switch src := src.(type) {
case *Object:
srcObj = &src.baseObject
case *documentObject:
srcObj, ext = &src.baseObject, src.ext()
case *linkObject:
srcObj, ext = &src.baseObject, src.ext()
default:
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
if ext != "" {
if !strings.HasSuffix(remote, ext) {
fs.Debugf(src, "Can't copy - not same document type")
return nil, fs.ErrorCantCopy
}
remote = remote[:len(remote)-len(ext)]
}
// Look to see if there is an existing object
existingObject, _ := f.NewObject(ctx, remote)
createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
if err != nil {
return nil, err
}
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(srcObj.id, createInfo).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
newObject, err := f.newObjectWithInfo(remote, info)
if err != nil {
return nil, err
}
if existingObject != nil {
err = existingObject.Remove(ctx)
if err != nil {
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
}
}
return newObject, nil
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
if f.root == "" {
return errors.New("can't purge root directory")
}
if f.opt.TrashedOnly {
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
}
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
err = f.pacer.Call(func() (bool, error) {
if f.opt.UseTrash {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = f.svc.Files.Delete(f.dirCache.RootID()).
Fields("").
SupportsAllDrives(true).
Do()
}
return shouldRetry(err)
})
f.dirCache.ResetRoot()
if err != nil {
return err
}
return nil
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
err := f.pacer.Call(func() (bool, error) {
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return err
}
return nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.isTeamDrive {
// Teamdrives don't appear to have a usage API so just return empty
return &fs.Usage{}, nil
}
var about *drive.About
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
}
q := about.StorageQuota
usage := &fs.Usage{
Used: fs.NewUsageValue(q.UsageInDrive), // bytes in use
Trashed: fs.NewUsageValue(q.UsageInDriveTrash), // bytes in trash
Other: fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage eg gmail in drive
}
if q.Limit > 0 {
usage.Total = fs.NewUsageValue(q.Limit) // quota of bytes that can be used
usage.Free = fs.NewUsageValue(q.Limit - q.Usage) // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
var srcObj *baseObject
ext := ""
switch src := src.(type) {
case *Object:
srcObj = &src.baseObject
case *documentObject:
srcObj, ext = &src.baseObject, src.ext()
case *linkObject:
srcObj, ext = &src.baseObject, src.ext()
default:
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
if ext != "" {
if !strings.HasSuffix(remote, ext) {
fs.Debugf(src, "Can't move - not same document type")
return nil, fs.ErrorCantMove
}
remote = remote[:len(remote)-len(ext)]
}
_, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false)
if err != nil {
return nil, err
}
// Temporary Object under construction
dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
if err != nil {
return nil, err
}
dstParents := strings.Join(dstInfo.Parents, ",")
dstInfo.Parents = nil
// Do the move
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Update(srcObj.id, dstInfo).
RemoveParents(srcParentID).
AddParents(dstParents).
Fields(partialFields).
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
return f.newObjectWithInfo(remote, info)
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
id, err := f.dirCache.FindDir(ctx, remote, false)
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(ctx, remote)
if err != nil {
return "", err
}
id = o.(fs.IDer).ID()
}
permission := &drive.Permission{
AllowFileDiscovery: false,
Role: "reader",
Type: "anyone",
}
err = f.pacer.Call(func() (bool, error) {
// TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
_, err = f.svc.Permissions.Create(id, permission).
Fields("").
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return "", err
}
return fmt.Sprintf("https://drive.google.com/open?id=%s", id), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, dstDirectoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src parent
var srcDirectoryID string
if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false)
}
if err != nil {
return err
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
// Do the move
patch := drive.File{
Name: leaf,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(srcID, &patch).
RemoveParents(srcDirectoryID).
AddParents(dstDirectoryID).
Fields("").
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return err
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the StartPageToken early so all changes from now on get processed
startPageToken, err := f.changeNotifyStartPageToken()
if err != nil {
fs.Infof(f, "Failed to get StartPageToken: %s", err)
}
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if startPageToken == "" {
startPageToken, err = f.changeNotifyStartPageToken()
if err != nil {
fs.Infof(f, "Failed to get StartPageToken: %s", err)
continue
}
}
fs.Debugf(f, "Checking for changes on remote")
startPageToken, err = f.changeNotifyRunner(ctx, notifyFunc, startPageToken)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) {
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
if f.isTeamDrive {
changes.DriveId(f.opt.TeamDriveID)
}
startPageToken, err = changes.Do()
return shouldRetry(err)
})
if err != nil {
return
}
return startPageToken.StartPageToken, nil
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
pageToken := startPageToken
for {
var changeList *drive.ChangeList
err = f.pacer.Call(func() (bool, error) {
changesCall := f.svc.Changes.List(pageToken).
Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
if f.opt.ListChunk > 0 {
changesCall.PageSize(f.opt.ListChunk)
}
changesCall.SupportsAllDrives(true)
changesCall.IncludeItemsFromAllDrives(true)
if f.isTeamDrive {
changesCall.DriveId(f.opt.TeamDriveID)
}
// If using appDataFolder then need to add Spaces
if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder")
}
changeList, err = changesCall.Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return
}
type entryType struct {
path string
entryType fs.EntryType
}
var pathsToClear []entryType
for _, change := range changeList.Changes {
// find the previous path
if path, ok := f.dirCache.GetInv(change.FileId); ok {
if change.File != nil && change.File.MimeType != driveFolderType {
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
} else {
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
}
}
// find the new path
if change.File != nil {
change.File.Name = enc.ToStandardName(change.File.Name)
changeType := fs.EntryDirectory
if change.File.MimeType != driveFolderType {
changeType = fs.EntryObject
}
// translate the parent dir of this object
if len(change.File.Parents) > 0 {
for _, parent := range change.File.Parents {
if parentPath, ok := f.dirCache.GetInv(parent); ok {
// and append the drive file name to compute the full file name
newPath := path.Join(parentPath, change.File.Name)
// this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: newPath, entryType: changeType})
}
}
} else { // a true root object that is changed
pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: changeType})
}
}
}
visitedPaths := make(map[string]struct{})
for _, entry := range pathsToClear {
if _, ok := visitedPaths[entry.path]; ok {
continue
}
visitedPaths[entry.path] = struct{}{}
notifyFunc(entry.path, entry.entryType)
}
switch {
case changeList.NewStartPageToken != "":
return changeList.NewStartPageToken, nil
case changeList.NextPageToken != "":
pageToken = changeList.NextPageToken
default:
return
}
}
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
f.dirCache.ResetRoot()
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *baseObject) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *baseObject) String() string {
return o.remote
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *baseObject) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return "", nil
}
// Size returns the size of an object in bytes
func (o *baseObject) Size() int64 {
return o.bytes
}
// getRemoteInfo returns a drive.File for the remote
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
return
}
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, "", "", "", false, fs.ErrorObjectNotFound
}
return nil, "", "", "", false, err
}
found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
if exportName == leaf {
info = item
return true
}
if isDocument {
return false
}
}
if item.Name == leaf {
info = item
return true
}
return false
})
if err != nil {
return nil, "", "", "", false, err
}
if !found {
return nil, "", "", "", false, fs.ErrorObjectNotFound
}
return
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *baseObject) ModTime(ctx context.Context) time.Time {
modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
if err != nil {
fs.Debugf(o, "Failed to read mtime from object: %v", err)
return time.Now()
}
return modTime
}
// SetModTime sets the modification time of the drive fs object
func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
// New metadata
updateInfo := &drive.File{
ModifiedTime: modTime.Format(timeFormatOut),
}
// Set modified date
var info *drive.File
err := o.fs.pacer.Call(func() (bool, error) {
var err error
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Fields(partialFields).
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return err
}
// Update info from read data
o.modifiedDate = info.ModifiedTime
return nil
}
// Storable returns a boolean as to whether this object is storable
func (o *baseObject) Storable() bool {
return true
}
// httpResponse gets an http.Response object for the object
// using the url and method passed in
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
if url == "" {
return nil, nil, errors.New("forbidden to download - check sharing permission")
}
req, err = http.NewRequest(method, url, nil)
if err != nil {
return req, nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.OpenOptionAddHTTPHeaders(req.Header, options)
if o.bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
delete(req.Header, "Range")
}
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
if err == nil {
err = googleapi.CheckResponse(res)
if err != nil {
_ = res.Body.Close() // ignore error
}
}
return shouldRetry(err)
})
if err != nil {
return req, nil, err
}
return req, res, nil
}
// openDocumentFile represents an documentObject open for reading.
// Updates the object size after read successfully.
type openDocumentFile struct {
o *documentObject // Object we are reading for
in io.ReadCloser // reading from here
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
errored bool // whether we have encountered an error during reading
}
// Read bytes from the object - see io.Reader
func (file *openDocumentFile) Read(p []byte) (n int, err error) {
n, err = file.in.Read(p)
file.bytes += int64(n)
if err != nil && err != io.EOF {
file.errored = true
}
if err == io.EOF {
file.eof = true
}
return
}
// Close the object and update bytes read
func (file *openDocumentFile) Close() (err error) {
// If end of file, update bytes read
if file.eof && !file.errored {
fs.Debugf(file.o, "Updating size of doc after download to %v", file.bytes)
file.o.bytes = file.bytes
}
return file.in.Close()
}
// Check it satisfies the interfaces
var _ io.ReadCloser = (*openDocumentFile)(nil)
// Checks to see if err is a googleapi.Error with of type what
func isGoogleError(err error, what string) bool {
if gerr, ok := err.(*googleapi.Error); ok {
for _, error := range gerr.Errors {
if error.Reason == what {
return true
}
}
}
return false
}
// open a url for reading
func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
_, res, err := o.httpResponse(ctx, url, "GET", options)
if err != nil {
if isGoogleError(err, "cannotDownloadAbusiveFile") {
if o.fs.opt.AcknowledgeAbuse {
// Retry acknowledging abuse
if strings.ContainsRune(url, '?') {
url += "&"
} else {
url += "?"
}
url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options)
} else {
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
}
}
if err != nil {
return nil, errors.Wrap(err, "open file failed")
}
}
return res.Body, nil
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.v2Download {
var v2File *drive_v2.File
err = o.fs.pacer.Call(func() (bool, error) {
v2File, err = o.fs.v2Svc.Files.Get(o.id).
Fields("downloadUrl").
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
o.url = v2File.DownloadUrl
o.v2Download = false
}
}
return o.baseObject.open(ctx, o.url, options...)
}
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted.
var offset, end int64 = 0, -1
var newOptions = options[:0]
for _, o := range options {
// Note that Range requests don't work on Google docs:
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
// So do a subset of them manually
switch x := o.(type) {
case *fs.RangeOption:
offset, end = x.Start, x.End
case *fs.SeekOption:
offset, end = x.Offset, -1
default:
newOptions = append(newOptions, o)
}
}
options = newOptions
if offset != 0 {
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
}
in, err = o.baseObject.open(ctx, o.url, options...)
if in != nil {
in = &openDocumentFile{o: o, in: in}
}
if end >= 0 {
in = readers.NewLimitedReadCloser(in, end-offset+1)
}
return
}
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
var data = o.content
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(int64(len(data)))
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if l := int64(len(data)); offset > l {
offset = l
}
data = data[offset:]
if limit != -1 && limit < int64(len(data)) {
data = data[:limit]
}
return ioutil.NopCloser(bytes.NewReader(data)), nil
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
src fs.ObjectInfo) (info *drive.File, err error) {
// Make the API request to upload metadata and file data.
size := src.Size()
if size == 0 || size < int64(o.fs.opt.UploadCutoff) {
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Media(in, googleapi.ContentType(uploadMimeType)).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
Do()
return shouldRetry(err)
})
return
}
// Upload the file in chunks
return o.fs.Upload(ctx, in, size, uploadMimeType, o.id, o.remote, updateInfo)
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src)
updateInfo := &drive.File{
MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
}
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
if err != nil {
return err
}
switch newO := newO.(type) {
case *Object:
*o = *newO
default:
return errors.New("object type changed by update")
}
return nil
}
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src)
importMimeType := ""
updateInfo := &drive.File{
MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
}
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
return errors.Errorf("can't update google document type without --drive-import-formats")
}
importMimeType = o.fs.findImportFormat(updateInfo.MimeType)
if importMimeType == "" {
return errors.Errorf("no import format found for %q", srcMimeType)
}
if importMimeType != o.documentMimeType {
return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
}
updateInfo.MimeType = importMimeType
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
if err != nil {
return err
}
remote := src.Remote()
remote = remote[:len(remote)-o.extLen]
newO, err := o.fs.newObjectWithInfo(remote, info)
if err != nil {
return err
}
switch newO := newO.(type) {
case *documentObject:
*o = *newO
default:
return errors.New("object type changed by update")
}
return nil
}
func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errors.New("cannot update link files")
}
// Remove an object
func (o *baseObject) Remove(ctx context.Context) error {
var err error
err = o.fs.pacer.Call(func() (bool, error) {
if o.fs.opt.UseTrash {
info := drive.File{
Trashed: true,
}
_, err = o.fs.svc.Files.Update(o.id, &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = o.fs.svc.Files.Delete(o.id).
Fields("").
SupportsAllDrives(true).
Do()
}
return shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *baseObject) MimeType(ctx context.Context) string {
return o.mimeType
}
// ID returns the ID of the Object if known, or "" if not
func (o *baseObject) ID() string {
return o.id
}
func (o *documentObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}
func (o *linkObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}
// templates for document link files
const (
urlTemplate = `[InternetShortcut]{{"\r"}}
URL={{ .URL }}{{"\r"}}
`
weblocTemplate = `<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>URL</key>
<string>{{ .URL }}</string>
</dict>
</plist>
`
desktopTemplate = `[Desktop Entry]
Encoding=UTF-8
Name={{ .Title }}
URL={{ .URL }}
Icon=text-html
Type=Link
`
htmlTemplate = `<html>
<head>
<meta http-equiv="refresh" content="0; url={{ .URL }}" />
<title>{{ .Title }}</title>
</head>
<body>
Loading <a href="{{ .URL }}">{{ .Title }}</a>
</body>
</html>
`
)
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.Object = (*documentObject)(nil)
_ fs.MimeTyper = (*documentObject)(nil)
_ fs.IDer = (*documentObject)(nil)
_ fs.Object = (*linkObject)(nil)
_ fs.MimeTyper = (*linkObject)(nil)
_ fs.IDer = (*linkObject)(nil)
)
| 1 | 9,952 | Perhaps note here that `--drive-use-created-date` takes precedence if both set? | rclone-rclone | go |
@@ -128,7 +128,7 @@ class DynamicRoIHead(StandardRoIHead):
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
- def update_hyperparameters(self):
+ def update_hyperparameters(self, eps=1e-15):
"""Update hyperparameters like IoU thresholds for assigner and beta for
SmoothL1 loss based on the training statistics.
| 1 | import numpy as np
import torch
from mmdet.core import bbox2roi
from mmdet.models.losses import SmoothL1Loss
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class DynamicRoIHead(StandardRoIHead):
"""RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_."""
def __init__(self, **kwargs):
super(DynamicRoIHead, self).__init__(**kwargs)
assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)
# the IoU history of the past `update_iter_interval` iterations
self.iou_history = []
# the beta history of the past `update_iter_interval` iterations
self.beta_history = []
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""Forward function for training.
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
cur_iou = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
# record the `iou_topk`-th largest IoU in an image
iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,
len(assign_result.max_overlaps))
ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)
cur_iou.append(ious[-1].item())
sampling_results.append(sampling_result)
# average the current IoUs over images
cur_iou = np.mean(cur_iou)
self.iou_history.append(cur_iou)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
# update IoU threshold and SmoothL1 beta
update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval
if len(self.iou_history) % update_iter_interval == 0:
new_iou_thr, new_beta = self.update_hyperparameters()
return losses
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
num_imgs = len(img_metas)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
# record the `beta_topk`-th smallest target
# `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
# and bbox_weights, respectively
pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
num_pos = len(pos_inds)
cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
num_pos)
cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
self.beta_history.append(cur_target)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def update_hyperparameters(self):
"""Update hyperparameters like IoU thresholds for assigner and beta for
SmoothL1 loss based on the training statistics.
Returns:
tuple[float]: the updated ``iou_thr`` and ``beta``.
"""
new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
np.mean(self.iou_history))
self.iou_history = []
self.bbox_assigner.pos_iou_thr = new_iou_thr
self.bbox_assigner.neg_iou_thr = new_iou_thr
self.bbox_assigner.min_pos_iou = new_iou_thr
new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
np.median(self.beta_history))
self.beta_history = []
self.bbox_head.loss_bbox.beta = new_beta
return new_iou_thr, new_beta
| 1 | 22,059 | Use EPS=1e-15 as that in atss_head or FCOS head. | open-mmlab-mmdetection | py |
@@ -40,7 +40,7 @@ func TestBlobSaver(t *testing.T) {
tmb, ctx := tomb.WithContext(ctx)
saver := &saveFail{
- idx: repository.NewIndex(),
+ idx: repository.NewMasterIndex(),
}
b := NewBlobSaver(ctx, tmb, saver, uint(runtime.NumCPU())) | 1 | package archiver
import (
"context"
"fmt"
"runtime"
"sync/atomic"
"testing"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
tomb "gopkg.in/tomb.v2"
)
var errTest = errors.New("test error")
type saveFail struct {
idx restic.Index
cnt int32
failAt int32
}
func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicates bool) (restic.ID, bool, error) {
val := atomic.AddInt32(&b.cnt, 1)
if val == b.failAt {
return restic.ID{}, false, errTest
}
return id, false, nil
}
func (b *saveFail) Index() restic.Index {
return b.idx
}
func TestBlobSaver(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tmb, ctx := tomb.WithContext(ctx)
saver := &saveFail{
idx: repository.NewIndex(),
}
b := NewBlobSaver(ctx, tmb, saver, uint(runtime.NumCPU()))
var results []FutureBlob
for i := 0; i < 20; i++ {
buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))}
fb := b.Save(ctx, restic.DataBlob, buf)
results = append(results, fb)
}
for i, blob := range results {
blob.Wait(ctx)
if blob.Known() {
t.Errorf("blob %v is known, that should not be the case", i)
}
}
tmb.Kill(nil)
err := tmb.Wait()
if err != nil {
t.Fatal(err)
}
}
func TestBlobSaverError(t *testing.T) {
var tests = []struct {
blobs int
failAt int
}{
{20, 2},
{20, 5},
{20, 15},
{200, 150},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tmb, ctx := tomb.WithContext(ctx)
saver := &saveFail{
idx: repository.NewIndex(),
failAt: int32(test.failAt),
}
b := NewBlobSaver(ctx, tmb, saver, uint(runtime.NumCPU()))
var results []FutureBlob
for i := 0; i < test.blobs; i++ {
buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))}
fb := b.Save(ctx, restic.DataBlob, buf)
results = append(results, fb)
}
tmb.Kill(nil)
err := tmb.Wait()
if err == nil {
t.Errorf("expected error not found")
}
if err != errTest {
t.Fatalf("unexpected error found: %v", err)
}
})
}
}
| 1 | 13,063 | Is there a need to replace the Index with a MasterIndex? | restic-restic | go |
@@ -389,6 +389,7 @@ func TestExecCommandAgent(t *testing.T) {
verifyExecCmdAgentExpectedMounts(t, ctx, client, testTaskId, cid, testContainerName, testExecCmdHostBinDir)
pidA := verifyMockExecCommandAgentIsRunning(t, client, cid)
+ verifyExecAgentRunningStateChange(t, taskEngine)
seelog.Infof("Verified mock ExecCommandAgent is running (pidA=%s)", pidA)
killMockExecCommandAgent(t, client, cid, pidA)
seelog.Infof("kill signal sent to ExecCommandAgent (pidA=%s)", pidA) | 1 | // +build linux,sudo
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/containermetadata"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/data"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
"github.com/aws/amazon-ecs-agent/agent/engine/execcmd"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
cgroup "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control"
"github.com/aws/amazon-ecs-agent/agent/taskresource/firelens"
taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper"
"github.com/aws/amazon-ecs-agent/agent/utils/retry"
"github.com/cihub/seelog"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
sdkClient "github.com/docker/docker/client"
"github.com/pborman/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
endpoint = utils.DefaultIfBlank(os.Getenv(DockerEndpointEnvVariable), DockerDefaultEndpoint)
)
const (
testLogSenderImage = "amazonlinux:2"
testFluentbitImage = "amazon/aws-for-fluent-bit:latest"
testVolumeImage = "127.0.0.1:51670/amazon/amazon-ecs-volumes-test:latest"
testCluster = "testCluster"
validTaskArnPrefix = "arn:aws:ecs:region:account-id:task/"
testDataDir = "/var/lib/ecs/data/"
testDataDirOnHost = "/var/lib/ecs/"
testInstanceID = "testInstanceID"
testTaskDefFamily = "testFamily"
testTaskDefVersion = "1"
testECSRegion = "us-east-1"
testLogGroupName = "test-fluentbit"
testLogGroupPrefix = "firelens-fluentbit-"
testExecCommandAgentImage = "127.0.0.1:51670/amazon/amazon-ecs-exec-command-agent-test:latest"
testExecCommandAgentSleepBin = "/sleep"
testExecCommandAgentKillBin = "/kill"
)
func TestStartStopWithCgroup(t *testing.T) {
cfg := defaultTestConfigIntegTest()
cfg.TaskCleanupWaitDuration = 1 * time.Second
cfg.TaskCPUMemLimit.Value = config.ExplicitlyEnabled
cfg.CgroupPath = "/cgroup"
taskEngine, done, _ := setup(cfg, nil, t)
defer done()
stateChangeEvents := taskEngine.StateChangeEvents()
taskArn := "arn:aws:ecs:us-east-1:123456789012:task/testCgroup"
testTask := createTestTask(taskArn)
testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource)
for _, container := range testTask.Containers {
container.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet)
}
control := cgroup.New()
commonResources := &taskresource.ResourceFieldsCommon{
IOUtil: ioutilwrapper.NewIOUtil(),
}
taskEngine.(*DockerTaskEngine).resourceFields = &taskresource.ResourceFields{
Control: control,
ResourceFieldsCommon: commonResources,
}
go taskEngine.AddTask(testTask)
verifyContainerRunningStateChange(t, taskEngine)
verifyTaskIsRunning(stateChangeEvents, testTask)
verifyContainerStoppedStateChange(t, taskEngine)
verifyTaskIsStopped(stateChangeEvents, testTask)
// Should be stopped, let's verify it's still listed...
task, ok := taskEngine.(*DockerTaskEngine).State().TaskByArn(taskArn)
assert.True(t, ok, "Expected task to be present still, but wasn't")
cgroupRoot, err := testTask.BuildCgroupRoot()
assert.Nil(t, err)
assert.True(t, control.Exists(cgroupRoot))
task.SetSentStatus(apitaskstatus.TaskStopped) // cleanupTask waits for TaskStopped to be sent before cleaning
time.Sleep(cfg.TaskCleanupWaitDuration)
for i := 0; i < 60; i++ {
_, ok = taskEngine.(*DockerTaskEngine).State().TaskByArn(taskArn)
if !ok {
break
}
time.Sleep(1 * time.Second)
}
assert.False(t, ok, "Expected container to have been swept but was not")
assert.False(t, control.Exists(cgroupRoot))
}
func TestLocalHostVolumeMount(t *testing.T) {
cfg := defaultTestConfigIntegTest()
taskEngine, done, _ := setup(cfg, nil, t)
defer done()
// creates a task with local volume
testTask := createTestLocalVolumeMountTask()
stateChangeEvents := taskEngine.StateChangeEvents()
go taskEngine.AddTask(testTask)
verifyContainerRunningStateChange(t, taskEngine)
verifyTaskIsRunning(stateChangeEvents, testTask)
verifyContainerStoppedStateChange(t, taskEngine)
verifyTaskIsStopped(stateChangeEvents, testTask)
assert.NotNil(t, testTask.Containers[0].GetKnownExitCode(), "No exit code found")
assert.Equal(t, 0, *testTask.Containers[0].GetKnownExitCode(), "Wrong exit code")
data, err := ioutil.ReadFile(filepath.Join("/var/lib/docker/volumes/", testTask.Volumes[0].Volume.Source(), "/_data", "hello-from-container"))
assert.Nil(t, err, "Unexpected error")
assert.Equal(t, "empty-data-volume", strings.TrimSpace(string(data)), "Incorrect file contents")
}
func createTestLocalVolumeMountTask() *apitask.Task {
testTask := createTestTask("testLocalHostVolumeMount")
testTask.Volumes = []apitask.TaskVolume{{Name: "test-tmp", Volume: &taskresourcevolume.LocalDockerVolume{}}}
testTask.Containers[0].Image = testVolumeImage
testTask.Containers[0].MountPoints = []apicontainer.MountPoint{{ContainerPath: "/host/tmp", SourceVolume: "test-tmp"}}
testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource)
testTask.Containers[0].TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet)
testTask.Containers[0].Command = []string{`echo -n "empty-data-volume" > /host/tmp/hello-from-container;`}
return testTask
}
func TestFirelensFluentbit(t *testing.T) {
// Skipping the test for arm as they do not have official support for Arm images
if runtime.GOARCH == "arm64" {
t.Skip("Skipping test, unsupported image for arm64")
}
cfg := defaultTestConfigIntegTest()
cfg.DataDir = testDataDir
cfg.DataDirOnHost = testDataDirOnHost
cfg.TaskCleanupWaitDuration = 1 * time.Second
cfg.Cluster = testCluster
taskEngine, done, _ := setup(cfg, nil, t)
defer done()
testTask := createFirelensTask(t)
taskEngine.(*DockerTaskEngine).resourceFields = &taskresource.ResourceFields{
ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{
EC2InstanceID: testInstanceID,
},
}
go taskEngine.AddTask(testTask)
testEvents := InitEventCollection(taskEngine)
//Verify logsender container is running
err := VerifyContainerStatus(apicontainerstatus.ContainerRunning, testTask.Arn+":logsender", testEvents, t)
assert.NoError(t, err, "Verify logsender container is running")
//Verify firelens container is running
err = VerifyContainerStatus(apicontainerstatus.ContainerRunning, testTask.Arn+":firelens", testEvents, t)
assert.NoError(t, err, "Verify firelens container is running")
//Verify task is in running state
err = VerifyTaskStatus(apitaskstatus.TaskRunning, testTask.Arn, testEvents, t)
assert.NoError(t, err, "Not verified task running")
//Verify logsender container is stopped
err = VerifyContainerStatus(apicontainerstatus.ContainerStopped, testTask.Arn+":logsender", testEvents, t)
assert.NoError(t, err)
//Verify firelens container is stopped
err = VerifyContainerStatus(apicontainerstatus.ContainerStopped, testTask.Arn+":firelens", testEvents, t)
assert.NoError(t, err)
//Verify the task itself has stopped
err = VerifyTaskStatus(apitaskstatus.TaskStopped, testTask.Arn, testEvents, t)
assert.NoError(t, err)
taskID, err := testTask.GetID()
//declare a cloudwatch client
cwlClient := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(testECSRegion))
params := &cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(testLogGroupName),
LogStreamName: aws.String(fmt.Sprintf("firelens-fluentbit-logsender-firelens-%s", taskID)),
}
// wait for the cloud watch logs
resp, err := waitCloudwatchLogs(cwlClient, params)
require.NoError(t, err)
// there should only be one event as we are echoing only one thing that part of the include-filter
assert.Equal(t, 1, len(resp.Events))
message := aws.StringValue(resp.Events[0].Message)
jsonBlob := make(map[string]string)
err = json.Unmarshal([]byte(message), &jsonBlob)
require.NoError(t, err)
assert.Equal(t, "stdout", jsonBlob["source"])
assert.Equal(t, "include", jsonBlob["log"])
assert.Contains(t, jsonBlob, "container_id")
assert.Contains(t, jsonBlob["container_name"], "logsender")
assert.Equal(t, testCluster, jsonBlob["ecs_cluster"])
assert.Equal(t, testTask.Arn, jsonBlob["ecs_task_arn"])
testTask.SetSentStatus(apitaskstatus.TaskStopped)
time.Sleep(3 * cfg.TaskCleanupWaitDuration)
for i := 0; i < 60; i++ {
_, ok := taskEngine.(*DockerTaskEngine).State().TaskByArn(testTask.Arn)
if !ok {
break
}
time.Sleep(1 * time.Second)
}
// Make sure all the resource is cleaned up
_, err = ioutil.ReadDir(filepath.Join(testDataDir, "firelens", testTask.Arn))
assert.Error(t, err)
}
func createFirelensTask(t *testing.T) *apitask.Task {
testTask := createTestTask(validTaskArnPrefix + uuid.New())
rawHostConfigInputForLogSender := dockercontainer.HostConfig{
LogConfig: dockercontainer.LogConfig{
Type: logDriverTypeFirelens,
Config: map[string]string{
"Name": "cloudwatch",
"exclude-pattern": "exclude",
"include-pattern": "include",
"log_group_name": testLogGroupName,
"log_stream_prefix": testLogGroupPrefix,
"region": testECSRegion,
"auto_create_group": "true",
},
},
}
rawHostConfigForLogSender, err := json.Marshal(&rawHostConfigInputForLogSender)
require.NoError(t, err)
testTask.Containers = []*apicontainer.Container{
{
Name: "logsender",
Image: testLogSenderImage,
Essential: true,
// TODO: the firelens router occasionally failed to send logs when it's shut down very quickly after started.
// Let the task run for a while with a sleep helps avoid that failure, but still needs to figure out the
// root cause.
Command: []string{"sh", "-c", "echo exclude; echo include; sleep 10;"},
DockerConfig: apicontainer.DockerConfig{
HostConfig: func() *string {
s := string(rawHostConfigForLogSender)
return &s
}(),
},
DependsOnUnsafe: []apicontainer.DependsOn{
{
ContainerName: "firelens",
Condition: "START",
},
},
},
{
Name: "firelens",
Image: testFluentbitImage,
Essential: true,
FirelensConfig: &apicontainer.FirelensConfig{
Type: firelens.FirelensConfigTypeFluentbit,
Options: map[string]string{
"enable-ecs-log-metadata": "true",
},
},
Environment: map[string]string{
"AWS_EXECUTION_ENV": "AWS_ECS_EC2",
"FLB_LOG_LEVEL": "debug",
},
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
}
testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource)
return testTask
}
func waitCloudwatchLogs(client *cloudwatchlogs.CloudWatchLogs, params *cloudwatchlogs.GetLogEventsInput) (*cloudwatchlogs.GetLogEventsOutput, error) {
// The test could fail for timing issue, so retry for 30 seconds to make this test more stable
for i := 0; i < 30; i++ {
resp, err := client.GetLogEvents(params)
if err != nil {
awsError, ok := err.(awserr.Error)
if !ok || awsError.Code() != "ResourceNotFoundException" {
return nil, err
}
} else if len(resp.Events) > 0 {
return resp, nil
}
time.Sleep(time.Second)
}
return nil, fmt.Errorf("timeout waiting for the logs to be sent to cloud watch logs")
}
// TestExecCommandAgent validates ExecCommandAgent start and monitor processes. The algorithm to test is as follows:
// 1. Pre-setup: the make file in ../../misc/exec-command-agent-test will create a special docker sleeper image
// based on a scratch image. This image simulates a customer image and contains pre-baked /sleep and /kill binaries.
// /sleep is the main process used to launch the test container; /kill is an application that kills a process running in
// the container given a PID.
// The make file will also create a fake amazon-ssm-agent which is a go program that only sleeps for a certain time specified.
//
// 2. Setup: Create a new docker task engine with a modified path pointing to our fake amazon-ssm-agent binary
// 3. Create and start our test task using our test image
// 4. Wait for the task to start and verify that the expected ExecCommandAgent bind mounts are present in the containers
// 5. Verify that our fake amazon-ssm-agent was started inside the container using docker top, and retrieve its PID
// 6. Kill the fake amazon-ssm-agent using the PID retrieved in previous step
// 7. Verify that the engine restarted our fake amazon-ssm-agent by doing docker top one more time (a new PID should popup)
func TestExecCommandAgent(t *testing.T) {
const (
testTaskId = "exec-command-agent-test-task"
testContainerName = "exec-command-agent-test-container"
sleepFor = time.Minute * 2
)
client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String()))
require.NoError(t, err, "Creating go docker client failed")
testExecCmdHostBinDir, err := filepath.Abs("../../misc/exec-command-agent-test")
require.NoError(t, err)
taskEngine, done, _ := setupEngineForExecCommandAgent(t, testExecCmdHostBinDir)
stateChangeEvents := taskEngine.StateChangeEvents()
defer done()
testTask := createTestExecCommandAgentTask(testTaskId, testContainerName, sleepFor)
execAgentLogPath := filepath.Join("/log/exec", testTaskId)
err = os.MkdirAll(execAgentLogPath, 0644)
require.NoError(t, err, "error creating execAgent log file")
_, err = os.Stat(execAgentLogPath)
require.NoError(t, err, "execAgent log dir doesn't exist")
go taskEngine.AddTask(testTask)
verifyContainerRunningStateChange(t, taskEngine)
verifyTaskRunningStateChange(t, taskEngine)
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn)
cid := containerMap[testTask.Containers[0].Name].DockerID
verifyExecCmdAgentExpectedMounts(t, ctx, client, testTaskId, cid, testContainerName, testExecCmdHostBinDir)
pidA := verifyMockExecCommandAgentIsRunning(t, client, cid)
seelog.Infof("Verified mock ExecCommandAgent is running (pidA=%s)", pidA)
killMockExecCommandAgent(t, client, cid, pidA)
seelog.Infof("kill signal sent to ExecCommandAgent (pidA=%s)", pidA)
verifyMockExecCommandAgentIsStopped(t, client, cid, pidA)
seelog.Infof("Verified mock ExecCommandAgent was killed (pidA=%s)", pidA)
pidB := verifyMockExecCommandAgentIsRunning(t, client, cid)
seelog.Infof("Verified mock ExecCommandAgent was restarted (pidB=%s)", pidB)
require.NotEqual(t, pidA, pidB, "ExecCommandAgent PID did not change after restart")
taskUpdate := createTestExecCommandAgentTask(testTaskId, testContainerName, sleepFor)
taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped)
go taskEngine.AddTask(taskUpdate)
ctx, cancel = context.WithTimeout(context.Background(), time.Second*20)
go func() {
verifyTaskIsStopped(stateChangeEvents, testTask)
cancel()
}()
<-ctx.Done()
require.NotEqual(t, context.DeadlineExceeded, ctx.Err(), "Timed out waiting for task (%s) to stop", testTaskId)
assert.NotNil(t, testTask.Containers[0].GetKnownExitCode(), "No exit code found")
taskEngine.(*DockerTaskEngine).deleteTask(testTask)
_, err = os.Stat(execAgentLogPath)
assert.True(t, os.IsNotExist(err), "execAgent log cleanup failed")
}
func createTestExecCommandAgentTask(taskId, containerName string, sleepFor time.Duration) *apitask.Task {
testTask := createTestTask("arn:aws:ecs:us-west-2:1234567890:task/" + taskId)
testTask.ExecCommandAgentEnabledUnsafe = true
testTask.PIDMode = ecs.PidModeHost
testTask.Containers[0].Name = containerName
testTask.Containers[0].Image = testExecCommandAgentImage
testTask.Containers[0].Command = []string{testExecCommandAgentSleepBin, "-time=" + sleepFor.String()}
return testTask
}
// setupEngineForExecCommandAgent creates a new TaskEngine with a custom execcmd.Manager that will attempt to read the
// host binaries from the directory passed as parameter (as opposed to the default directory).
// Additionally, it overrides the engine's monitorExecAgentsInterval to one second.
func setupEngineForExecCommandAgent(t *testing.T, hostBinDir string) (TaskEngine, func(), credentials.Manager) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
skipIntegTestIfApplicable(t)
cfg := defaultTestConfigIntegTest()
sdkClientFactory := sdkclientfactory.NewFactory(ctx, dockerEndpoint)
dockerClient, err := dockerapi.NewDockerGoClient(sdkClientFactory, cfg, context.Background())
if err != nil {
t.Fatalf("Error creating Docker client: %v", err)
}
credentialsManager := credentials.NewManager()
state := dockerstate.NewTaskEngineState()
imageManager := NewImageManager(cfg, dockerClient, state)
imageManager.SetDataClient(data.NewNoopClient())
metadataManager := containermetadata.NewManager(dockerClient, cfg)
execCmdMgr := execcmd.NewManagerWithBinDir(hostBinDir)
taskEngine := NewDockerTaskEngine(cfg, dockerClient, credentialsManager,
eventstream.NewEventStream("ENGINEINTEGTEST", context.Background()), imageManager, state, metadataManager,
nil, execCmdMgr)
taskEngine.monitorExecAgentsInterval = time.Second
taskEngine.MustInit(context.TODO())
return taskEngine, func() {
taskEngine.Shutdown()
}, credentialsManager
}
func verifyExecCmdAgentExpectedMounts(t *testing.T, ctx context.Context, client *sdkClient.Client, testTaskId, containerId, containerName, testExecCmdHostBinDir string) {
inspectState, _ := client.ContainerInspect(ctx, containerId)
expectedMounts := []struct {
source string
dest string
readOnly bool
}{
{
source: filepath.Join(testExecCmdHostBinDir, execcmd.BinName),
dest: filepath.Join(execcmd.ContainerBinDir, execcmd.BinName),
readOnly: true,
},
{
source: filepath.Join(testExecCmdHostBinDir, execcmd.SessionWorkerBinName),
dest: filepath.Join(execcmd.ContainerBinDir, execcmd.SessionWorkerBinName),
readOnly: true,
},
{
source: execcmd.HostCertFile,
dest: execcmd.ContainerCertFile,
readOnly: true,
},
{
source: filepath.Join(testExecCmdHostBinDir, execcmd.ConfigFileName),
dest: execcmd.ContainerConfigFile,
readOnly: true,
},
{
source: filepath.Join(execcmd.HostLogDir, testTaskId, containerName),
dest: execcmd.ContainerLogDir,
readOnly: false,
},
}
for _, em := range expectedMounts {
var found *types.MountPoint
for _, m := range inspectState.Mounts {
if m.Source == em.source {
found = &m
break
}
}
require.NotNil(t, found, "Expected mount point not found (%s)", em.source)
require.Equal(t, em.dest, found.Destination, "Destination for mount point (%s) is invalid expected: %s, actual: %s", em.source, em.dest, found.Destination)
if em.readOnly {
require.Equal(t, "ro", found.Mode, "Destination for mount point (%s) should be read only", em.source)
} else {
require.True(t, found.RW, "Destination for mount point (%s) should be writable", em.source)
}
require.Equal(t, "bind", string(found.Type), "Destination for mount point (%s) is not of type bind", em.source)
}
require.Equal(t, len(expectedMounts), len(inspectState.Mounts), "Wrong number of bind mounts detected in container (%s)", containerName)
}
func verifyMockExecCommandAgentIsRunning(t *testing.T, client *sdkClient.Client, containerId string) string {
return verifyMockExecCommandAgentStatus(t, client, containerId, "", true)
}
func verifyMockExecCommandAgentIsStopped(t *testing.T, client *sdkClient.Client, containerId, pid string) {
verifyMockExecCommandAgentStatus(t, client, containerId, pid, false)
}
func verifyMockExecCommandAgentStatus(t *testing.T, client *sdkClient.Client, containerId, expectedPid string, checkIsRunning bool) string {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
res := make(chan string, 1)
go func() {
for {
top, err := client.ContainerTop(ctx, containerId, nil)
if err != nil {
continue
}
cmdPos := -1
pidPos := -1
for i, t := range top.Titles {
if strings.ToUpper(t) == "CMD" {
cmdPos = i
}
if strings.ToUpper(t) == "PID" {
pidPos = i
}
}
require.NotEqual(t, -1, cmdPos, "CMD title not found in the container top response")
require.NotEqual(t, -1, pidPos, "PID title not found in the container top response")
for _, proc := range top.Processes {
if proc[cmdPos] == filepath.Join(execcmd.ContainerBinDir, execcmd.BinName) {
res <- proc[pidPos]
return
}
}
select {
case <-ctx.Done():
return
case <-time.After(retry.AddJitter(time.Second, time.Second*5)):
}
}
}()
var (
isRunning bool
pid string
)
select {
case <-ctx.Done():
case r := <-res:
if r != "" {
pid = r
isRunning = true
if expectedPid != "" && pid != expectedPid {
isRunning = false
}
}
}
require.Equal(t, checkIsRunning, isRunning, "ExecCmdAgent was not in the desired running-status")
return pid
}
func killMockExecCommandAgent(t *testing.T, client *sdkClient.Client, containerId, pid string) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
create, err := client.ContainerExecCreate(ctx, containerId, types.ExecConfig{
Detach: true,
Cmd: []string{testExecCommandAgentKillBin, "-pid=" + pid},
})
require.NoError(t, err)
err = client.ContainerExecStart(ctx, create.ID, types.ExecStartCheck{
Detach: true,
})
require.NoError(t, err)
}
func verifyTaskRunningStateChange(t *testing.T, taskEngine TaskEngine) {
stateChangeEvents := taskEngine.StateChangeEvents()
event := <-stateChangeEvents
assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskRunning,
"Expected task to be RUNNING")
}
| 1 | 25,476 | are we adding stop state change in a different PR? | aws-amazon-ecs-agent | go |
@@ -23,9 +23,9 @@ class PlansController < ApplicationController
# Get all of the available funders and non-funder orgs
@funders = Org.funder
.joins(:templates)
- .where(templates: { published: true }).uniq.sort(&:name)
+ .where(templates: { published: true }).uniq.sort_by(&:name)
@orgs = (Org.organisation + Org.institution + Org.managing_orgs).flatten
- .uniq.sort(&:name)
+ .uniq.sort_by(&:name)
# Get the current user's org
@default_org = current_user.org if @orgs.include?(current_user.org) | 1 | # frozen_string_literal: true
class PlansController < ApplicationController
include ConditionalUserMailer
helper PaginableHelper
helper SettingsTemplateHelper
after_action :verify_authorized, except: [:overview]
def index
authorize Plan
@plans = Plan.active(current_user).page(1)
@organisationally_or_publicly_visible =
Plan.organisationally_or_publicly_visible(current_user).page(1)
end
# GET /plans/new
def new
@plan = Plan.new
authorize @plan
# Get all of the available funders and non-funder orgs
@funders = Org.funder
.joins(:templates)
.where(templates: { published: true }).uniq.sort(&:name)
@orgs = (Org.organisation + Org.institution + Org.managing_orgs).flatten
.uniq.sort(&:name)
# Get the current user's org
@default_org = current_user.org if @orgs.include?(current_user.org)
if params.key?(:test)
flash[:notice] = "#{_('This is a')} <strong>#{_('test plan')}</strong>"
end
@is_test = params[:test] ||= false
respond_to :html
end
# POST /plans
def create
@plan = Plan.new
authorize @plan
# We set these ids to -1 on the page to trick ariatiseForm into allowing the
# autocomplete to be blank if the no org/funder checkboxes are checked off
org_id = (plan_params[:org_id] == "-1" ? "" : plan_params[:org_id])
funder_id = (plan_params[:funder_id] == "-1" ? "" : plan_params[:funder_id])
# If the template_id is blank then we need to look up the available templates and
# return JSON
if plan_params[:template_id].blank?
# Something went wrong there should always be a template id
respond_to do |format|
flash[:alert] = _("Unable to identify a suitable template for your plan.")
format.html { redirect_to new_plan_path }
end
else
# Otherwise create the plan
@plan.principal_investigator = if current_user.surname.blank?
nil
else
"#{current_user.firstname} #{current_user.surname}"
end
@plan.principal_investigator_email = current_user.email
orcid = current_user.identifier_for(IdentifierScheme.find_by(name: "orcid"))
@plan.principal_investigator_identifier = orcid.identifier unless orcid.nil?
@plan.funder_name = plan_params[:funder_name]
@plan.visibility = if plan_params["visibility"].blank?
Rails.application.config.default_plan_visibility
else
plan_params[:visibility]
end
@plan.template = Template.find(plan_params[:template_id])
if plan_params[:title].blank?
@plan.title = if current_user.firstname.blank?
_("My Plan") + "(" + @plan.template.title + ")"
else
current_user.firstname + "'s" + _(" Plan")
end
else
@plan.title = plan_params[:title]
end
if @plan.save
@plan.assign_creator(current_user)
# pre-select org's guidance and the default org's guidance
ids = (Org.managing_orgs << org_id).flatten.uniq
ggs = GuidanceGroup.where(org_id: ids, optional_subset: false, published: true)
if !ggs.blank? then @plan.guidance_groups << ggs end
default = Template.default
msg = "#{success_message(_('plan'), _('created'))}<br />"
if !default.nil? && default == @plan.template
# We used the generic/default template
msg += " #{_('This plan is based on the default template.')}"
elsif [email protected]_of.nil?
# rubocop:disable Metrics/LineLength
# We used a customized version of the the funder template
# rubocop:disable Metrics/LineLength
msg += " #{_('This plan is based on the')} #{plan_params[:funder_name]}: '#{@plan.template.title}' #{_('template with customisations by the')} #{plan_params[:org_name]}"
# rubocop:enable Metrics/LineLength
else
# rubocop:disable Metrics/LineLength
# We used the specified org's or funder's template
# rubocop:disable Metrics/LineLength
msg += " #{_('This plan is based on the')} #{@plan.template.org.name}: '#{@plan.template.title}' template."
# rubocop:enable Metrics/LineLength
end
respond_to do |format|
flash[:notice] = msg
format.html { redirect_to plan_path(@plan) }
end
else
# Something went wrong so report the issue to the user
respond_to do |format|
flash[:alert] = failed_create_error(@plan, "Plan")
format.html { redirect_to new_plan_path }
end
end
end
end
# GET /plans/show
def show
@plan = Plan.includes(
template: { phases: { sections: { questions: :answers } } },
plans_guidance_groups: { guidance_group: :guidances }
).find(params[:id])
authorize @plan
@visibility = if @plan.visibility.present?
@plan.visibility.to_s
else
Rails.application.config.default_plan_visibility
end
@editing = (!params[:editing].nil? && @plan.administerable_by?(current_user.id))
# Get all Guidance Groups applicable for the plan and group them by org
@all_guidance_groups = @plan.guidance_group_options
@all_ggs_grouped_by_org = @all_guidance_groups.sort.group_by(&:org)
@selected_guidance_groups = @plan.guidance_groups
# Important ones come first on the page - we grab the user's org's GGs and
# "Organisation" org type GGs
@important_ggs = []
if @all_ggs_grouped_by_org.include?(current_user.org)
@important_ggs << [current_user.org, @all_ggs_grouped_by_org[current_user.org]]
end
@all_ggs_grouped_by_org.each do |org, ggs|
if org.organisation?
@important_ggs << [org, ggs]
end
# If this is one of the already selected guidance groups its important!
if !(ggs & @selected_guidance_groups).empty?
@important_ggs << [org, ggs] unless @important_ggs.include?([org, ggs])
end
end
# Sort the rest by org name for the accordion
@important_ggs = @important_ggs.sort_by { |org, gg| (org.nil? ? "" : org.name) }
@all_ggs_grouped_by_org = @all_ggs_grouped_by_org.sort_by do |org, gg|
(org.nil? ? "" : org.name)
end
@selected_guidance_groups = @selected_guidance_groups.ids
@based_on = if @plan.template.customization_of.nil?
@plan.template
else
Template.where(family_id: @plan.template.customization_of).first
end
respond_to :html
end
# GET /plans/:plan_id/phases/:id/edit
def edit
plan = Plan.find(params[:id])
authorize plan
plan, phase = Plan.load_for_phase(params[:id], params[:phase_id])
guidance_groups = GuidanceGroup.where(published: true, id: plan.guidance_group_ids)
render_phases_edit(plan, phase, guidance_groups)
end
# PUT /plans/1
# PUT /plans/1.json
def update
@plan = Plan.find(params[:id])
authorize @plan
attrs = plan_params
# rubocop:disable Metrics/BlockLength
respond_to do |format|
begin
# Save the guidance group selections
guidance_group_ids = if params[:guidance_group_ids].blank?
[]
else
params[:guidance_group_ids].map(&:to_i).uniq
end
@plan.guidance_groups = GuidanceGroup.where(id: guidance_group_ids)
@plan.save
if @plan.update_attributes(attrs)
format.html do
redirect_to overview_plan_path(@plan),
notice: success_message(_("plan"), _("saved"))
end
format.json do
render json: { code: 1, msg: success_message(_("plan"), _("saved")) }
end
else
flash[:alert] = failed_update_error(@plan, _("plan"))
format.html do
render_phases_edit(@plan, @plan.phases.first, @plan.guidance_groups)
end
format.json do
render json: { code: 0, msg: flash[:alert] }
end
end
rescue Exception
flash[:alert] = failed_update_error(@plan, _("plan"))
format.html do
render_phases_edit(@plan, @plan.phases.first, @plan.guidance_groups)
end
format.json do
render json: { code: 0, msg: flash[:alert] }
end
end
end
# rubocop:enable Metrics/BlockLength
end
def share
@plan = Plan.find(params[:id])
if @plan.present?
authorize @plan
# Get the roles where the user is not a reviewer
@plan_roles = @plan.roles.select { |r| !r.reviewer? }
else
redirect_to(plans_path)
end
end
def destroy
@plan = Plan.find(params[:id])
authorize @plan
if @plan.destroy
respond_to do |format|
format.html do
redirect_to plans_url,
notice: success_message(_("plan"), _("deleted"))
end
end
else
respond_to do |format|
flash[:alert] = failed_create_error(@plan, _("plan"))
format.html { render action: "edit" }
end
end
end
def answer
@plan = Plan.find(params[:id])
authorize @plan
if !params[:q_id].nil?
respond_to do |format|
format.json do
render json: @plan.answer(params[:q_id], false).to_json(include: :options)
end
end
else
respond_to do |format|
format.json { render json: {} }
end
end
end
def download
@plan = Plan.find(params[:id])
authorize @plan
@phase_options = @plan.phases.order(:number).pluck(:title, :id)
@export_settings = @plan.settings(:export)
render "download"
end
def export
@plan = Plan.includes(:answers).find(params[:id])
authorize @plan
@selected_phase = @plan.phases.find(params[:phase_id])
@show_coversheet = params[:export][:project_details].present?
@show_sections_questions = params[:export][:question_headings].present?
@show_unanswered = params[:export][:unanswered_questions].present?
@show_custom_sections = params[:export][:custom_sections].present?
@public_plan = false
@hash = @plan.as_pdf(@show_coversheet)
@formatting = params[:export][:formatting] || @plan.settings(:export).formatting
file_name = @plan.title.gsub(/ /, "_")
# rubocop:disable Metrics/BlockLength
respond_to do |format|
format.html { render layout: false }
format.csv { send_data @plan.as_csv(@show_sections_questions, @show_unanswered, @selected_phase, @show_custom_sections, @show_coversheet), filename: "#{file_name}.csv" }
format.text { send_data render_to_string(partial: 'shared/export/plan_txt'), filename: "#{file_name}.txt" }
format.docx { render docx: "#{file_name}.docx", content: render_to_string(partial: 'shared/export/plan') }
format.pdf do
# rubocop:disable Metrics/LineLength
render pdf: file_name,
margin: @formatting[:margin],
footer: {
center: _("Created using the %{application_name}. Last modified %{date}") % {
application_name: Rails.configuration.branding[:application][:name],
date: l(@plan.updated_at.to_date, formats: :short)
},
font_size: 8,
spacing: (Integer(@formatting[:margin][:bottom]) / 2) - 4,
right: "[page] of [topage]"
}
# rubocop:enable Metrics/LineLength
end
end
# rubocop:enable Metrics/BlockLength
end
def duplicate
plan = Plan.find(params[:id])
authorize plan
@plan = Plan.deep_copy(plan)
respond_to do |format|
if @plan.save
@plan.assign_creator(current_user)
format.html { redirect_to @plan, notice: success_message(_("plan"), _("copied")) }
else
format.html { redirect_to plans_path, alert: failed_create_error(@plan, "Plan") }
end
end
end
# POST /plans/:id/visibility
def visibility
plan = Plan.find(params[:id])
if plan.present?
authorize plan
if plan.visibility_allowed?
plan.visibility = plan_params[:visibility]
if plan.save
deliver_if(recipients: plan.owner_and_coowners,
key: "owners_and_coowners.visibility_changed") do |r|
UserMailer.plan_visibility(r, plan).deliver_now()
end
render status: :ok,
json: { msg: success_message(_("plan's visibility"), _("changed")) }
else
# rubocop:disable Metrics/LineLength
render status: :internal_server_error,
json: {
msg: _("Error raised while saving the visibility for plan id %{plan_id}") % { plan_id: params[:id] }
}
# rubocop:enable Metrics/LineLength
end
else
# rubocop:disable Metrics/LineLength
render status: :forbidden, json: {
msg: _("Unable to change the plan's status since it is needed at least %{percentage} percentage responded") % {
percentage: Rails.application.config.default_plan_percentage_answered
}
}
# rubocop:enable Metrics/LineLength
end
else
render status: :not_found,
json: { msg: _("Unable to find plan id %{plan_id}") % {
plan_id: params[:id] }
}
end
end
def set_test
plan = Plan.find(params[:id])
authorize plan
plan.visibility = (params[:is_test] === "1" ? :is_test : :privately_visible)
# rubocop:disable Metrics/LineLength
if plan.save
render json: {
code: 1,
msg: (plan.is_test? ? _("Your project is now a test.") : _("Your project is no longer a test."))
}
else
render status: :bad_request, json: {
code: 0, msg: _("Unable to change the plan's test status")
}
end
# rubocop:enable Metrics/LineLength
end
def overview
begin
plan = Plan.includes(:phases, :sections, :questions, template: [ :org ])
.find(params[:id])
authorize plan
render(:overview, locals: { plan: plan })
rescue ActiveRecord::RecordNotFound
flash[:alert] = _("There is no plan associated with id %{id}") % {
id: params[:id]
}
redirect_to(action: :index)
end
end
private
def plan_params
params.require(:plan)
.permit(:org_id, :org_name, :funder_id, :funder_name, :template_id,
:title, :visibility, :grant_number, :description, :identifier,
:principal_investigator_phone, :principal_investigator,
:principal_investigator_email, :data_contact,
:principal_investigator_identifier, :data_contact_email,
:data_contact_phone, :guidance_group_ids)
end
# different versions of the same template have the same family_id
# but different version numbers so for each set of templates with the
# same family_id choose the highest version number.
def get_most_recent(templates)
groups = Hash.new
templates.each do |t|
k = t.family_id
if !groups.has_key?(k)
groups[k] = t
else
other = groups[k]
if other.version < t.version
groups[k] = t
end
end
end
groups.values
end
# find all object under src_plan_key
# merge them into the items under obj_plan_key using
# super_id = id
# so we have answers which each have a question_id
# rollup(plan, "answers", "quesiton_id", "questions")
# will put the answers into the right questions.
def rollup(plan, src_plan_key, super_id, obj_plan_key)
id_to_obj = Hash.new()
plan[src_plan_key].each do |o|
id = o[super_id]
if !id_to_obj.has_key?(id)
id_to_obj[id] = Array.new
end
id_to_obj[id] << o
end
plan[obj_plan_key].each do |o|
id = o["id"]
if id_to_obj.has_key?(id)
o[src_plan_key] = id_to_obj[ id ]
end
end
plan.delete(src_plan_key)
end
private
# ============================
# = Private instance methods =
# ============================
def render_phases_edit(plan, phase, guidance_groups)
readonly = !plan.editable_by?(current_user.id)
# Since the answers have been pre-fetched through plan (see Plan.load_for_phase)
# we create a hash whose keys are question id and value is the answer associated
answers = plan.answers.reduce({}) { |m, a| m[a.question_id] = a; m }
render("/phases/edit", locals: {
base_template_org: phase.template.base_org,
plan: plan,
phase: phase,
readonly: readonly,
guidance_groups: guidance_groups,
answers: answers,
guidance_presenter: GuidancePresenter.new(plan)
})
end
end
| 1 | 18,061 | How much difference is there between sort and sort_by ? | DMPRoadmap-roadmap | rb |
@@ -231,6 +231,7 @@ STATIC int opae_plugin_mgr_initialize_all(void)
int res;
opae_api_adapter_table *aptr;
int errors = 0;
+ int count = 0;
for (aptr = adapter_list; aptr; aptr = aptr->next) {
| 1 | // Copyright(c) 2018, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <dlfcn.h>
#include <sys/types.h>
#include <dirent.h>
#include <linux/limits.h>
#include <pthread.h>
#include <pwd.h>
#include <unistd.h>
#include <json-c/json.h>
#include "safe_string/safe_string.h"
#include "pluginmgr.h"
#include "opae_int.h"
#define OPAE_PLUGIN_CONFIGURE "opae_plugin_configure"
typedef int (*opae_plugin_configure_t)(opae_api_adapter_table *, const char *);
typedef struct _platform_data {
uint16_t vendor_id;
uint16_t device_id;
const char *native_plugin;
uint32_t flags;
#define OPAE_PLATFORM_DATA_DETECTED 0x00000001
#define OPAE_PLATFORM_DATA_LOADED 0x00000002
} platform_data;
static platform_data platform_data_table[] = {
{ 0x8086, 0xbcbd, "libxfpga.so", 0 },
{ 0x8086, 0xbcc0, "libxfpga.so", 0 },
{ 0x8086, 0xbcc1, "libxfpga.so", 0 },
{ 0x8086, 0x09c4, "libxfpga.so", 0 },
{ 0x8086, 0x09c5, "libxfpga.so", 0 },
{ 0x8086, 0x0b2b, "libxfpga.so", 0 },
{ 0x8086, 0x0b2c, "libxfpga.so", 0 },
{ 0x8086, 0x0b30, "libxfpga.so", 0 },
{ 0x8086, 0x0b31, "libxfpga.so", 0 },
{ 0, 0, NULL, 0 },
};
static int initialized;
STATIC opae_api_adapter_table *adapter_list = (void *)0;
static pthread_mutex_t adapter_list_lock =
PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
#define MAX_PLUGINS PLUGIN_SUPPORTED_DEVICES_MAX
STATIC plugin_cfg *opae_plugin_mgr_config_list;
STATIC int opae_plugin_mgr_plugin_count;
#define HOME_CFG_PATHS 3
STATIC const char *_opae_home_cfg_files[HOME_CFG_PATHS] = {
"/.local/opae.cfg",
"/.local/opae/opae.cfg",
"/.config/opae/opae.cfg",
};
#define SYS_CFG_PATHS 2
STATIC const char *_opae_sys_cfg_files[SYS_CFG_PATHS] = {
"/usr/local/etc/opae/opae.cfg",
"/etc/opae/opae.cfg",
};
// Find the canonicalized configuration file. If null, the file was not found.
// Otherwise, it's the first configuration file found from a list of possible
// paths. Note: The char * returned is allocated here, caller must free.
STATIC char *find_cfg()
{
int i = 0;
char *file_name = NULL;
char home_cfg[PATH_MAX] = {0};
char *home_cfg_ptr = &home_cfg[0];
// get the user's home directory
struct passwd *user_passwd = getpwuid(getuid());
// first look in possible paths in the users home directory
for (i = 0; i < HOME_CFG_PATHS; ++i) {
if (strcpy_s(home_cfg, PATH_MAX, user_passwd->pw_dir)) {
OPAE_ERR("error copying pw_dir string");
return NULL;
}
home_cfg_ptr = home_cfg + strlen(home_cfg);
if (strcpy_s(home_cfg_ptr, PATH_MAX, _opae_home_cfg_files[i])) {
OPAE_ERR("error copying opae cfg dir string: %s",
_opae_home_cfg_files[i]);
return NULL;
}
file_name = canonicalize_file_name(home_cfg);
if (file_name) {
return file_name;
} else {
home_cfg[0] = '\0';
}
}
// now look in possible system paths
for (i = 0; i < SYS_CFG_PATHS; ++i) {
strcpy_s(home_cfg, PATH_MAX, _opae_sys_cfg_files[i]);
file_name = canonicalize_file_name(home_cfg);
if (file_name) {
return file_name;
}
}
return NULL;
}
STATIC opae_api_adapter_table *opae_plugin_mgr_alloc_adapter(const char *lib_path)
{
void *dl_handle;
opae_api_adapter_table *adapter;
dl_handle = dlopen(lib_path, RTLD_LAZY | RTLD_LOCAL);
if (!dl_handle) {
char *err = dlerror();
OPAE_ERR("failed to load \"%s\" %s", lib_path, err ? err : "");
return NULL;
}
adapter = (opae_api_adapter_table *)calloc(
1, sizeof(opae_api_adapter_table));
if (!adapter) {
dlclose(dl_handle);
OPAE_ERR("out of memory");
return NULL;
}
adapter->plugin.path = (char *)lib_path;
adapter->plugin.dl_handle = dl_handle;
return adapter;
}
STATIC int opae_plugin_mgr_free_adapter(opae_api_adapter_table *adapter)
{
int res;
char *err;
res = dlclose(adapter->plugin.dl_handle);
if (res) {
err = dlerror();
OPAE_ERR("dlclose failed with %d %s", res, err ? err : "");
}
free(adapter);
return res;
}
STATIC int opae_plugin_mgr_configure_plugin(opae_api_adapter_table *adapter,
const char *config)
{
opae_plugin_configure_t cfg;
cfg = (opae_plugin_configure_t)dlsym(adapter->plugin.dl_handle,
OPAE_PLUGIN_CONFIGURE);
if (!cfg) {
OPAE_ERR("failed to find %s in \"%s\"", OPAE_PLUGIN_CONFIGURE,
adapter->plugin.path);
return 1;
}
return cfg(adapter, config);
}
STATIC void opae_plugin_mgr_reset_cfg(void)
{
plugin_cfg *ptr = opae_plugin_mgr_config_list;
plugin_cfg *tmp = NULL;
while (ptr) {
tmp = ptr;
ptr = ptr->next;
free(tmp->cfg);
free(tmp);
}
opae_plugin_mgr_config_list = NULL;
opae_plugin_mgr_plugin_count = 0;
}
STATIC void opae_plugin_mgr_add_plugin(plugin_cfg *cfg)
{
plugin_cfg *ptr = opae_plugin_mgr_config_list;
cfg->next = NULL;
if (!ptr) {
opae_plugin_mgr_config_list = cfg;
} else {
while (ptr->next) {
ptr = ptr->next;
}
ptr->next = cfg;
}
opae_plugin_mgr_plugin_count++;
}
STATIC int opae_plugin_mgr_initialize_all(void)
{
int res;
opae_api_adapter_table *aptr;
int errors = 0;
for (aptr = adapter_list; aptr; aptr = aptr->next) {
if (aptr->initialize) {
res = aptr->initialize();
if (res) {
OPAE_MSG("\"%s\" initialize() routine failed",
aptr->plugin.path);
++errors;
}
}
}
return errors;
}
int opae_plugin_mgr_finalize_all(void)
{
int res;
opae_api_adapter_table *aptr;
int errors = 0;
int i = 0;
opae_mutex_lock(res, &adapter_list_lock);
for (aptr = adapter_list; aptr;) {
opae_api_adapter_table *trash;
if (aptr->finalize) {
res = aptr->finalize();
if (res) {
OPAE_MSG("\"%s\" finalize() routine failed",
aptr->plugin.path);
++errors;
}
}
trash = aptr;
aptr = aptr->next;
if (opae_plugin_mgr_free_adapter(trash))
++errors;
}
adapter_list = NULL;
// reset platforms detected to 0
for (i = 0 ; platform_data_table[i].native_plugin ; ++i) {
platform_data_table[i].flags = 0;
}
initialized = 0;
opae_plugin_mgr_reset_cfg();
opae_mutex_unlock(res, &adapter_list_lock);
return errors;
}
#define JSON_GET(_jobj, _key, _jvar) \
do { \
if (!json_object_object_get_ex(_jobj, _key, _jvar)) { \
OPAE_ERR("Error getting object: %s", _key); \
return 1; \
} \
} while (0)
#define MAX_PLUGIN_CFG_SIZE 1024
STATIC int process_plugin(const char *name, json_object *j_config)
{
json_object *j_plugin = NULL;
json_object *j_plugin_cfg = NULL;
json_object *j_enabled = NULL;
const char *stringified = NULL;
plugin_cfg *cfg = malloc(sizeof(plugin_cfg));
if (!cfg) {
OPAE_ERR("Could not allocate memory for plugin cfg");
return 1;
}
JSON_GET(j_config, "plugin", &j_plugin);
JSON_GET(j_config, "configuration", &j_plugin_cfg);
JSON_GET(j_config, "enabled", &j_enabled);
if (json_object_get_string_len(j_plugin) > PLUGIN_NAME_MAX) {
OPAE_ERR("plugin name too long");
return 1;
}
stringified = json_object_to_json_string_ext(j_plugin_cfg, JSON_C_TO_STRING_PLAIN);
if (!stringified) {
OPAE_ERR("error getting plugin configuration");
return 1;
}
cfg->cfg_size = strlen(stringified);
cfg->cfg = malloc(cfg->cfg_size);
if (!cfg->cfg) {
OPAE_ERR("error allocating memory for plugin configuration");
cfg->cfg_size = 0;
return 1;
}
if (strncpy_s(cfg->cfg, MAX_PLUGIN_CFG_SIZE, stringified, cfg->cfg_size)) {
OPAE_ERR("error copying plugin configuration");
goto out_err;
}
if (strcpy_s(cfg->name, PLUGIN_NAME_MAX, name)) {
OPAE_ERR("error copying plugin name");
goto out_err;
}
if (strcpy_s(cfg->plugin, PLUGIN_NAME_MAX, json_object_get_string(j_plugin))) {
OPAE_ERR("error copying plugin file name");
goto out_err;
}
cfg->enabled = json_object_get_boolean(j_enabled);
opae_plugin_mgr_add_plugin(cfg);
return 0;
out_err:
if (cfg->cfg) {
free(cfg->cfg);
cfg->cfg = NULL;
}
free(cfg);
return 1;
}
STATIC int process_cfg_buffer(const char *buffer, const char *filename)
{
int num_plugins = 0;
int num_errors = 0;
int i = 0;
int res = 1;
json_object *root = NULL;
json_object *j_plugins = NULL;
json_object *j_configs = NULL;
json_object *j_plugin = NULL;
json_object *j_config = NULL;
const char *plugin_name = NULL;
enum json_tokener_error j_err = json_tokener_success;
root = json_tokener_parse_verbose(buffer, &j_err);
if (!root) {
OPAE_ERR("Error parsing config file: '%s' - %s", filename,
json_tokener_error_desc(j_err));
goto out_free;
}
if (!json_object_object_get_ex(root, "plugins", &j_plugins)) {
OPAE_ERR("Error parsing config file: '%s' - missing 'plugins'", filename);
goto out_free;
}
if (!json_object_object_get_ex(root, "configurations", &j_configs)) {
OPAE_ERR("Error parsing config file: '%s' - missing 'configs'", filename);
goto out_free;
}
if (!json_object_is_type(j_plugins, json_type_array)) {
OPAE_ERR("'plugins' JSON object not array type");
goto out_free;
}
num_plugins = json_object_array_length(j_plugins);
num_errors = 0;
for (i = 0; i < num_plugins; ++i) {
j_plugin = json_object_array_get_idx(j_plugins, i);
plugin_name = json_object_get_string(j_plugin);
if (json_object_object_get_ex(j_configs, plugin_name, &j_config)) {
num_errors += process_plugin(plugin_name, j_config);
} else {
OPAE_ERR("Could not find plugin configuration for '%s'", plugin_name);
num_errors += 1;
}
}
res = num_errors;
out_free:
json_object_put(root);
return res;
}
#define MAX_CFG_SIZE 4096
STATIC int opae_plugin_mgr_parse_config(const char *filename)
{
char buffer[MAX_CFG_SIZE] = { 0 };
char *ptr = &buffer[0];
size_t bytes_read = 0, total_read = 0;
FILE *fp = NULL;
if (filename) {
fp = fopen(filename, "r");
} else {
OPAE_MSG("config file is NULL");
return 1;
}
if (!fp) {
OPAE_ERR("Error opening config file: %s", filename);
return 1;
}
while ((bytes_read = fread(ptr + total_read, 1, 1, fp))
&& total_read < MAX_CFG_SIZE) {
total_read += bytes_read;
}
if (ferror(fp)) {
OPAE_ERR("Error reading config file: %s - %s", filename, strerror(errno));
goto out_err;
}
if (!feof(fp)) {
OPAE_ERR("Unknown error reading config file: %s", filename);
goto out_err;
}
fclose(fp);
fp = NULL;
return process_cfg_buffer(buffer, filename);
out_err:
fclose(fp);
fp = NULL;
return 1;
}
STATIC int opae_plugin_mgr_register_adapter(opae_api_adapter_table *adapter)
{
opae_api_adapter_table *aptr;
adapter->next = NULL;
if (!adapter_list) {
adapter_list = adapter;
return 0;
}
// new entries go to the end of the list.
for (aptr = adapter_list; aptr->next; aptr = aptr->next)
/* find the last entry */;
aptr->next = adapter;
return 0;
}
STATIC void opae_plugin_mgr_detect_platform(uint16_t vendor, uint16_t device)
{
int i;
for (i = 0 ; platform_data_table[i].native_plugin ; ++i) {
if (platform_data_table[i].vendor_id == vendor &&
platform_data_table[i].device_id == device) {
OPAE_DBG("platform detected: vid=0x%04x did=0x%04x -> %s",
vendor, device,
platform_data_table[i].native_plugin);
platform_data_table[i].flags |= OPAE_PLATFORM_DATA_DETECTED;
}
}
}
STATIC int opae_plugin_mgr_detect_platforms(void)
{
DIR *dir;
char base_dir[PATH_MAX];
char file_path[PATH_MAX];
struct dirent *dirent;
int res;
int errors = 0;
// Iterate over the directories in /sys/bus/pci/devices.
// This directory contains symbolic links to device directories
// where 'vendor' and 'device' files exist.
strncpy_s(base_dir, sizeof(base_dir),
"/sys/bus/pci/devices", 21);
dir = opendir(base_dir);
if (!dir) {
OPAE_ERR("Failed to open %s. Aborting platform detection.", base_dir);
return 1;
}
while ((dirent = readdir(dir)) != NULL) {
FILE *fp;
unsigned vendor = 0;
unsigned device = 0;
if (EOK != strcmp_s(dirent->d_name, sizeof(dirent->d_name),
".", &res)) {
OPAE_ERR("strcmp_s failed");
++errors;
goto out_close;
}
if (0 == res) // don't process .
continue;
if (EOK != strcmp_s(dirent->d_name, sizeof(dirent->d_name),
"..", &res)) {
OPAE_ERR("strcmp_s failed");
++errors;
goto out_close;
}
if (0 == res) // don't process ..
continue;
// Read the 'vendor' file.
snprintf_s_ss(file_path, sizeof(file_path),
"%s/%s/vendor", base_dir, dirent->d_name);
fp = fopen(file_path, "r");
if (!fp) {
OPAE_ERR("Failed to open %s. Aborting platform detection.", file_path);
++errors;
goto out_close;
}
if (EOF == fscanf(fp, "%x", &vendor)) {
OPAE_ERR("Failed to read %s. Aborting platform detection.", file_path);
fclose(fp);
++errors;
goto out_close;
}
fclose(fp);
// Read the 'device' file.
snprintf_s_ss(file_path, sizeof(file_path),
"%s/%s/device", base_dir, dirent->d_name);
fp = fopen(file_path, "r");
if (!fp) {
OPAE_ERR("Failed to open %s. Aborting platform detection.", file_path);
++errors;
goto out_close;
}
if (EOF == fscanf(fp, "%x", &device)) {
OPAE_ERR("Failed to read %s. Aborting platform detection.", file_path);
fclose(fp);
++errors;
goto out_close;
}
fclose(fp);
// Detect platform for this (vendor, device).
opae_plugin_mgr_detect_platform((uint16_t) vendor, (uint16_t) device);
}
out_close:
closedir(dir);
return errors;
}
STATIC int opae_plugin_mgr_load_cfg_plugin(plugin_cfg *cfg)
{
int res = 0;
opae_api_adapter_table *adapter = NULL;
if (cfg->enabled && cfg->cfg && cfg->cfg_size) {
adapter = opae_plugin_mgr_alloc_adapter(cfg->plugin);
if (!adapter) {
OPAE_ERR("malloc failed");
return 1;
}
res = opae_plugin_mgr_configure_plugin(adapter, cfg->cfg);
if (res) {
opae_plugin_mgr_free_adapter(adapter);
OPAE_ERR("failed to configure plugin \"%s\"",
cfg->name);
return 1;
}
res = opae_plugin_mgr_register_adapter(adapter);
if (res) {
opae_plugin_mgr_free_adapter(adapter);
OPAE_ERR("Failed to register \"%s\"", cfg->name);
return 1;
}
}
return 0;
}
STATIC int opae_plugin_mgr_load_cfg_plugins(void)
{
plugin_cfg *ptr = opae_plugin_mgr_config_list;
int errors = 0;
while (ptr) {
errors += opae_plugin_mgr_load_cfg_plugin(ptr);
ptr = ptr->next;
}
return errors;
}
STATIC int opae_plugin_mgr_load_dflt_plugins(int *platforms_detected)
{
int i = 0, j = 0;
int res = 0;
opae_api_adapter_table *adapter = NULL;
int errors = opae_plugin_mgr_detect_platforms();
if (errors)
return errors;
// Load each of the native plugins that were detected.
*platforms_detected = 0;
for (i = 0 ; platform_data_table[i].native_plugin ; ++i) {
const char *native_plugin;
int already_loaded;
if (!(platform_data_table[i].flags & OPAE_PLATFORM_DATA_DETECTED))
continue; // This platform was not detected.
native_plugin = platform_data_table[i].native_plugin;
(*platforms_detected)++;
// Iterate over the table again to prevent multiple loads
// of the same native plugin.
already_loaded = 0;
for (j = 0 ; platform_data_table[j].native_plugin ; ++j) {
if (EOK != strcmp_s(native_plugin, strnlen_s(native_plugin, 256),
platform_data_table[j].native_plugin, &res)) {
OPAE_ERR("strcmp_s failed");
return ++errors;
}
if (!res &&
(platform_data_table[j].flags & OPAE_PLATFORM_DATA_LOADED)) {
already_loaded = 1;
break;
}
}
if (already_loaded)
continue;
adapter = opae_plugin_mgr_alloc_adapter(native_plugin);
if (!adapter) {
OPAE_ERR("malloc failed");
return ++errors;
}
// TODO: pass serialized json for native plugin
res = opae_plugin_mgr_configure_plugin(adapter, "");
if (res) {
opae_plugin_mgr_free_adapter(adapter);
OPAE_ERR("failed to configure plugin \"%s\"",
native_plugin);
++errors;
continue; // Keep going.
}
res = opae_plugin_mgr_register_adapter(adapter);
if (res) {
opae_plugin_mgr_free_adapter(adapter);
OPAE_ERR("Failed to register \"%s\"", native_plugin);
++errors;
continue; // Keep going.
}
platform_data_table[i].flags |= OPAE_PLATFORM_DATA_LOADED;
}
return errors;
}
int opae_plugin_mgr_initialize(const char *cfg_file)
{
int res;
int errors = 0;
int platforms_detected = 0;
opae_plugin_mgr_plugin_count = 0;
char *found_cfg = NULL;
const char *use_cfg = NULL;
opae_mutex_lock(res, &adapter_list_lock);
if (initialized) { // prevent multiple init.
opae_mutex_unlock(res, &adapter_list_lock);
return 0;
}
found_cfg = find_cfg();
use_cfg = cfg_file ? cfg_file : found_cfg;
if (use_cfg) {
opae_plugin_mgr_parse_config(use_cfg);
if (found_cfg) {
free(found_cfg);
}
}
if (opae_plugin_mgr_plugin_count) {
errors = opae_plugin_mgr_load_cfg_plugins();
} else {
// fail-safe, try to detect plugins based on supported devices
errors = opae_plugin_mgr_load_dflt_plugins(&platforms_detected);
}
if (errors)
goto out_unlock;
// Call each plugin's initialization routine.
errors += opae_plugin_mgr_initialize_all();
if (!errors && (opae_plugin_mgr_plugin_count || platforms_detected))
initialized = 1;
out_unlock:
opae_mutex_unlock(res, &adapter_list_lock);
return errors;
}
int opae_plugin_mgr_for_each_adapter
(int (*callback)(const opae_api_adapter_table *, void *), void *context)
{
int res;
int cb_res = OPAE_ENUM_CONTINUE;
opae_api_adapter_table *aptr;
if (!callback) {
OPAE_ERR("NULL callback passed to %s()", __func__);
return OPAE_ENUM_STOP;
}
opae_mutex_lock(res, &adapter_list_lock);
for (aptr = adapter_list; aptr; aptr = aptr->next) {
cb_res = callback(aptr, context);
if (cb_res)
break;
}
opae_mutex_unlock(res, &adapter_list_lock);
return cb_res;
}
| 1 | 18,522 | count doesn't seem to be used. Let's remove it. | OPAE-opae-sdk | c |
@@ -18,7 +18,11 @@ module Mongoid
#
# @since 4.0.0
def cache_table
- Thread.current["[mongoid]:query_cache"] ||= {}
+ if defined?(Mongo::QueryCache)
+ Mongo::QueryCache.cache_table
+ else
+ Thread.current["[mongoid]:query_cache"] ||= {}
+ end
end
# Clear the query cache. | 1 | # frozen_string_literal: true
# encoding: utf-8
module Mongoid
# A cache of database queries on a per-request basis.
#
# @since 4.0.0
module QueryCache
class << self
# Get the cached queries.
#
# @example Get the cached queries from the current thread.
# QueryCache.cache_table
#
# @return [ Hash ] The hash of cached queries.
#
# @since 4.0.0
def cache_table
Thread.current["[mongoid]:query_cache"] ||= {}
end
# Clear the query cache.
#
# @example Clear the cache.
# QueryCache.clear_cache
#
# @return [ nil ] Always nil.
#
# @since 4.0.0
def clear_cache
Thread.current["[mongoid]:query_cache"] = nil
end
# Set whether the cache is enabled.
#
# @example Set if the cache is enabled.
# QueryCache.enabled = true
#
# @param [ true, false ] value The enabled value.
#
# @since 4.0.0
def enabled=(value)
Thread.current["[mongoid]:query_cache:enabled"] = value
end
# Is the query cache enabled on the current thread?
#
# @example Is the query cache enabled?
# QueryCache.enabled?
#
# @return [ true, false ] If the cache is enabled.
#
# @since 4.0.0
def enabled?
!!Thread.current["[mongoid]:query_cache:enabled"]
end
# Execute the block while using the query cache.
#
# @example Execute with the cache.
# QueryCache.cache { collection.find }
#
# @return [ Object ] The result of the block.
#
# @since 4.0.0
def cache
enabled = QueryCache.enabled?
QueryCache.enabled = true
yield
ensure
QueryCache.enabled = enabled
end
# Execute the block with the query cache disabled.
#
# @example Execute without the cache.
# QueryCache.uncached { collection.find }
#
# @return [ Object ] The result of the block.
def uncached
enabled = QueryCache.enabled?
QueryCache.enabled = false
yield
ensure
QueryCache.enabled = enabled
end
end
# The middleware to be added to a rack application in order to activate the
# query cache.
#
# @since 4.0.0
class Middleware
# Instantiate the middleware.
#
# @example Create the new middleware.
# Middleware.new(app)
#
# @param [ Object ] app The rack applciation stack.
#
# @since 4.0.0
def initialize(app)
@app = app
end
# Execute the request, wrapping in a query cache.
#
# @example Execute the request.
# middleware.call(env)
#
# @param [ Object ] env The environment.
#
# @return [ Object ] The result of the call.
#
# @since 4.0.0
def call(env)
QueryCache.cache { @app.call(env) }
ensure
QueryCache.clear_cache
end
end
# A Cursor that attempts to load documents from memory first before hitting
# the database if the same query has already been executed.
#
# @since 5.0.0
class CachedCursor < Mongo::Cursor
# We iterate over the cached documents if they exist already in the
# cursor otherwise proceed as normal.
#
# @example Iterate over the documents.
# cursor.each do |doc|
# # ...
# end
#
# @since 5.0.0
def each
if @cached_documents
@cached_documents.each do |doc|
yield doc
end
else
super
end
end
# Get a human-readable string representation of +Cursor+.
#
# @example Inspect the cursor.
# cursor.inspect
#
# @return [ String ] A string representation of a +Cursor+ instance.
#
# @since 2.0.0
def inspect
"#<Mongoid::QueryCache::CachedCursor:0x#{object_id} @view=#{@view.inspect}>"
end
private
def process(result)
@remaining -= result.returned_count if limited?
@cursor_id = result.cursor_id
@coll_name ||= result.namespace.sub("#{database.name}.", '') if result.namespace
documents = result.documents
if @cursor_id.zero? && !@after_first_batch
@cached_documents ||= []
@cached_documents.concat(documents)
end
@after_first_batch = true
documents
end
end
# Included to add behavior for clearing out the query cache on certain
# operations.
#
# @since 4.0.0
module Base
def alias_query_cache_clear(*method_names)
method_names.each do |method_name|
define_method("#{method_name}_with_clear_cache") do |*args|
QueryCache.clear_cache
send("#{method_name}_without_clear_cache", *args)
end
alias_method "#{method_name}_without_clear_cache", method_name
alias_method method_name, "#{method_name}_with_clear_cache"
end
end
end
# Contains enhancements to the Mongo::Collection::View in order to get a
# cached cursor or a regular cursor on iteration.
#
# @since 5.0.0
module View
extend ActiveSupport::Concern
included do
extend QueryCache::Base
alias_query_cache_clear :delete_one,
:delete_many,
:update_one,
:update_many,
:replace_one,
:find_one_and_delete,
:find_one_and_replace,
:find_one_and_update
end
# Override the default enumeration to handle if the cursor can be cached
# or not.
#
# @example Iterate over the view.
# view.each do |doc|
# # ...
# end
#
# @since 5.0.0
def each
if system_collection? || !QueryCache.enabled?
super
else
unless cursor = cached_cursor
read_with_retry do
server = server_selector.select_server(cluster)
cursor = CachedCursor.new(view, send_initial_query(server), server)
QueryCache.cache_table[cache_key] = cursor
end
end
cursor.each do |doc|
yield doc
end if block_given?
cursor
end
end
private
def cached_cursor
if limit
key = [ collection.namespace, selector, nil, skip, sort, projection, collation ]
cursor = QueryCache.cache_table[key]
if cursor
cursor.to_a[0...limit.abs]
end
end
cursor || QueryCache.cache_table[cache_key]
end
def cache_key
[ collection.namespace, selector, limit, skip, sort, projection, collation ]
end
def system_collection?
collection.namespace =~ /\Asystem./
end
end
# Adds behavior to the query cache for collections.
#
# @since 5.0.0
module Collection
extend ActiveSupport::Concern
included do
extend QueryCache::Base
alias_query_cache_clear :insert_one, :insert_many
end
end
# Bypass the query cache when reloading a document.
module Document
def reload
QueryCache.uncached { super }
end
end
end
end
Mongo::Collection.__send__(:include, Mongoid::QueryCache::Collection)
Mongo::Collection::View.__send__(:include, Mongoid::QueryCache::View)
Mongoid::Document.__send__(:include, Mongoid::QueryCache::Document)
| 1 | 12,756 | Mongo is a hard dependency for Mongoid. Why do we need an if-statement to if it's defined? We should always use Mongo::QueryCache | mongodb-mongoid | rb |
@@ -0,0 +1,6 @@
+class AddFunderAndOrgToPlans < ActiveRecord::Migration
+ def change
+ add_reference :plans, :org, foreign_key: true
+ add_column :plans, :funder_id, :integer, index: true
+ end
+end | 1 | 1 | 18,842 | Does this point out at a ROR funder id? If so, could we get rid of the `funder_name` field on the plan and just use the `name` of the funder with id `funder_id`? | DMPRoadmap-roadmap | rb |
|
@@ -3,8 +3,12 @@ var assert = require('assert');
var fs = require('fs');
var flatbuffers = require('../js/flatbuffers').flatbuffers;
+global.flatbuffers = flatbuffers;
+
var MyGame = require(process.argv[2]).MyGame;
+var isTsTest = process.env.FB_TS_TEST ? true : false;
+
function main() {
// First, let's test reading a FlatBuffer generated by C++ code: | 1 | // Run this using JavaScriptTest.sh
var assert = require('assert');
var fs = require('fs');
var flatbuffers = require('../js/flatbuffers').flatbuffers;
var MyGame = require(process.argv[2]).MyGame;
function main() {
// First, let's test reading a FlatBuffer generated by C++ code:
// This file was generated from monsterdata_test.json
var data = new Uint8Array(fs.readFileSync('monsterdata_test.mon'));
// Now test it:
var bb = new flatbuffers.ByteBuffer(data);
testBuffer(bb);
// Second, let's create a FlatBuffer from scratch in JavaScript, and test it also.
// We use an initial size of 1 to exercise the reallocation algorithm,
// normally a size larger than the typical FlatBuffer you generate would be
// better for performance.
var fbb = new flatbuffers.Builder(1);
createMonster(fbb);
serializeAndTest(fbb);
// clear the builder, repeat tests
var clearIterations = 100;
var startingCapacity = fbb.bb.capacity();
for (var i = 0; i < clearIterations; i++) {
fbb.clear();
createMonster(fbb);
serializeAndTest(fbb);
}
// the capacity of our buffer shouldn't increase with the same size payload
assert.strictEqual(fbb.bb.capacity(), startingCapacity);
test64bit();
testUnicode();
fuzzTest1();
console.log('FlatBuffers test: completed successfully');
}
function createMonster(fbb) {
// We set up the same values as monsterdata.json:
var str = fbb.createString('MyMonster');
var inv = MyGame.Example.Monster.createInventoryVector(fbb, [0, 1, 2, 3, 4]);
var fred = fbb.createString('Fred');
MyGame.Example.Monster.startMonster(fbb);
MyGame.Example.Monster.addName(fbb, fred);
var mon2 = MyGame.Example.Monster.endMonster(fbb);
MyGame.Example.Monster.startTest4Vector(fbb, 2);
MyGame.Example.Test.createTest(fbb, 10, 20);
MyGame.Example.Test.createTest(fbb, 30, 40);
var test4 = fbb.endVector();
var testArrayOfString = MyGame.Example.Monster.createTestarrayofstringVector(fbb, [
fbb.createString('test1'),
fbb.createString('test2')
]);
MyGame.Example.Monster.startMonster(fbb);
MyGame.Example.Monster.addPos(fbb, MyGame.Example.Vec3.createVec3(fbb, 1, 2, 3, 3, MyGame.Example.Color.Green, 5, 6));
MyGame.Example.Monster.addHp(fbb, 80);
MyGame.Example.Monster.addName(fbb, str);
MyGame.Example.Monster.addInventory(fbb, inv);
MyGame.Example.Monster.addTestType(fbb, MyGame.Example.Any.Monster);
MyGame.Example.Monster.addTest(fbb, mon2);
MyGame.Example.Monster.addTest4(fbb, test4);
MyGame.Example.Monster.addTestarrayofstring(fbb, testArrayOfString);
MyGame.Example.Monster.addTestbool(fbb, true);
var mon = MyGame.Example.Monster.endMonster(fbb);
MyGame.Example.Monster.finishMonsterBuffer(fbb, mon);
}
function serializeAndTest(fbb) {
// Write the result to a file for debugging purposes:
// Note that the binaries are not necessarily identical, since the JSON
// parser may serialize in a slightly different order than the above
// JavaScript code. They are functionally equivalent though.
fs.writeFileSync('monsterdata_javascript_wire.mon', new Buffer(fbb.asUint8Array()));
// Tests mutation first. This will verify that we did not trample any other
// part of the byte buffer.
testMutation(fbb.dataBuffer());
testBuffer(fbb.dataBuffer());
}
function testMutation(bb) {
var monster = MyGame.Example.Monster.getRootAsMonster(bb);
monster.mutate_hp(120);
assert.strictEqual(monster.hp(), 120);
monster.mutate_hp(80);
assert.strictEqual(monster.hp(), 80);
var manaRes = monster.mutate_mana(10);
assert.strictEqual(manaRes, false); // Field was NOT present, because default value.
// TODO: There is not the availability to mutate structs or vectors.
}
function testBuffer(bb) {
assert.ok(MyGame.Example.Monster.bufferHasIdentifier(bb));
var monster = MyGame.Example.Monster.getRootAsMonster(bb);
assert.strictEqual(monster.hp(), 80);
assert.strictEqual(monster.mana(), 150); // default
assert.strictEqual(monster.name(), 'MyMonster');
var pos = monster.pos();
assert.strictEqual(pos.x(), 1);
assert.strictEqual(pos.y(), 2);
assert.strictEqual(pos.z(), 3);
assert.strictEqual(pos.test1(), 3);
assert.strictEqual(pos.test2(), MyGame.Example.Color.Green);
var t = pos.test3();
assert.strictEqual(t.a(), 5);
assert.strictEqual(t.b(), 6);
assert.strictEqual(monster.testType(), MyGame.Example.Any.Monster);
var monster2 = new MyGame.Example.Monster();
assert.strictEqual(monster.test(monster2) != null, true);
assert.strictEqual(monster2.name(), 'Fred');
assert.strictEqual(monster.inventoryLength(), 5);
var invsum = 0;
for (var i = 0; i < monster.inventoryLength(); i++) {
invsum += monster.inventory(i);
}
assert.strictEqual(invsum, 10);
var invsum2 = 0;
var invArr = monster.inventoryArray();
for (var i = 0; i < invArr.length; i++) {
invsum2 += invArr[i];
}
assert.strictEqual(invsum2, 10);
var test_0 = monster.test4(0);
var test_1 = monster.test4(1);
assert.strictEqual(monster.test4Length(), 2);
assert.strictEqual(test_0.a() + test_0.b() + test_1.a() + test_1.b(), 100);
assert.strictEqual(monster.testarrayofstringLength(), 2);
assert.strictEqual(monster.testarrayofstring(0), 'test1');
assert.strictEqual(monster.testarrayofstring(1), 'test2');
assert.strictEqual(monster.testbool(), true);
}
function test64bit() {
var fbb = new flatbuffers.Builder();
var required = fbb.createString('required');
MyGame.Example.Stat.startStat(fbb);
var stat2 = MyGame.Example.Stat.endStat(fbb);
MyGame.Example.Monster.startMonster(fbb);
MyGame.Example.Monster.addName(fbb, required);
MyGame.Example.Monster.addTestempty(fbb, stat2);
var mon2 = MyGame.Example.Monster.endMonster(fbb);
MyGame.Example.Stat.startStat(fbb);
// 2541551405100253985 = 0x87654321(low part) + 0x23456789 * 0x100000000(high part);
MyGame.Example.Stat.addVal(fbb, new flatbuffers.Long(0x87654321, 0x23456789)); // the low part is Uint32
var stat = MyGame.Example.Stat.endStat(fbb);
MyGame.Example.Monster.startMonster(fbb);
MyGame.Example.Monster.addName(fbb, required);
MyGame.Example.Monster.addEnemy(fbb, mon2);
MyGame.Example.Monster.addTestempty(fbb, stat);
var mon = MyGame.Example.Monster.endMonster(fbb);
MyGame.Example.Monster.finishMonsterBuffer(fbb, mon);
var bytes = fbb.asUint8Array();
////////////////////////////////////////////////////////////////
var bb = new flatbuffers.ByteBuffer(bytes);
assert.ok(MyGame.Example.Monster.bufferHasIdentifier(bb));
var mon = MyGame.Example.Monster.getRootAsMonster(bb);
var stat = mon.testempty();
assert.strictEqual(stat != null, true);
assert.strictEqual(stat.val() != null, true);
assert.strictEqual(stat.val().toFloat64(), 2541551405100253985);
var mon2 = mon.enemy();
assert.strictEqual(mon2 != null, true);
stat = mon2.testempty();
assert.strictEqual(stat != null, true);
assert.strictEqual(stat.val() != null, true);
assert.strictEqual(stat.val().low, 0); // default value
assert.strictEqual(stat.val().high, 0);
}
function testUnicode() {
var correct = fs.readFileSync('unicode_test.mon');
var json = JSON.parse(fs.readFileSync('unicode_test.json', 'utf8'));
// Test reading
function testReadingUnicode(bb) {
var monster = MyGame.Example.Monster.getRootAsMonster(bb);
assert.strictEqual(monster.name(), json.name);
assert.deepEqual(new Buffer(monster.name(flatbuffers.Encoding.UTF8_BYTES)), new Buffer(json.name));
assert.strictEqual(monster.testarrayoftablesLength(), json.testarrayoftables.length);
json.testarrayoftables.forEach(function(table, i) {
var value = monster.testarrayoftables(i);
assert.strictEqual(value.name(), table.name);
assert.deepEqual(new Buffer(value.name(flatbuffers.Encoding.UTF8_BYTES)), new Buffer(table.name));
});
assert.strictEqual(monster.testarrayofstringLength(), json.testarrayofstring.length);
json.testarrayofstring.forEach(function(string, i) {
assert.strictEqual(monster.testarrayofstring(i), string);
assert.deepEqual(new Buffer(monster.testarrayofstring(i, flatbuffers.Encoding.UTF8_BYTES)), new Buffer(string));
});
}
testReadingUnicode(new flatbuffers.ByteBuffer(new Uint8Array(correct)));
// Test writing
var fbb = new flatbuffers.Builder();
var name = fbb.createString(json.name);
var testarrayoftablesOffsets = json.testarrayoftables.map(function(table) {
var name = fbb.createString(new Uint8Array(new Buffer(table.name)));
MyGame.Example.Monster.startMonster(fbb);
MyGame.Example.Monster.addName(fbb, name);
return MyGame.Example.Monster.endMonster(fbb);
});
var testarrayoftablesOffset = MyGame.Example.Monster.createTestarrayoftablesVector(fbb,
testarrayoftablesOffsets);
var testarrayofstringOffset = MyGame.Example.Monster.createTestarrayofstringVector(fbb,
json.testarrayofstring.map(function(string) { return fbb.createString(string); }));
MyGame.Example.Monster.startMonster(fbb);
MyGame.Example.Monster.addTestarrayofstring(fbb, testarrayofstringOffset);
MyGame.Example.Monster.addTestarrayoftables(fbb, testarrayoftablesOffset);
MyGame.Example.Monster.addName(fbb, name);
MyGame.Example.Monster.finishSizePrefixedMonsterBuffer(fbb, MyGame.Example.Monster.endMonster(fbb));
var bb = new flatbuffers.ByteBuffer(fbb.asUint8Array())
bb.setPosition(4);
testReadingUnicode(bb);
}
var __imul = Math.imul ? Math.imul : function(a, b) {
var ah = a >> 16 & 65535;
var bh = b >> 16 & 65535;
var al = a & 65535;
var bl = b & 65535;
return al * bl + (ah * bl + al * bh << 16) | 0;
};
// Include simple random number generator to ensure results will be the
// same cross platform.
// http://en.wikipedia.org/wiki/Park%E2%80%93Miller_random_number_generator
var lcg_seed = 48271;
function lcg_rand() {
return lcg_seed = (__imul(lcg_seed, 279470273) >>> 0) % 4294967291;
}
function lcg_reset() {
lcg_seed = 48271;
}
// Converts a Field ID to a virtual table offset.
function fieldIndexToOffset(field_id) {
// Should correspond to what EndTable() below builds up.
var fixed_fields = 2; // Vtable size and Object Size.
return (field_id + fixed_fields) * 2;
}
// Low level stress/fuzz test: serialize/deserialize a variety of
// different kinds of data in different combinations
function fuzzTest1() {
// Values we're testing against: chosen to ensure no bits get chopped
// off anywhere, and also be different from eachother.
var bool_val = true;
var char_val = -127; // 0x81
var uchar_val = 0xFF;
var short_val = -32222; // 0x8222;
var ushort_val = 0xFEEE;
var int_val = 0x83333333 | 0;
var uint_val = 0xFDDDDDDD;
var long_val = new flatbuffers.Long(0x44444444, 0x84444444);
var ulong_val = new flatbuffers.Long(0xCCCCCCCC, 0xFCCCCCCC);
var float_val = new Float32Array([3.14159])[0];
var double_val = 3.14159265359;
var test_values_max = 11;
var fields_per_object = 4;
var num_fuzz_objects = 10000; // The higher, the more thorough :)
var builder = new flatbuffers.Builder();
lcg_reset(); // Keep it deterministic.
var objects = [];
// Generate num_fuzz_objects random objects each consisting of
// fields_per_object fields, each of a random type.
for (var i = 0; i < num_fuzz_objects; i++) {
builder.startObject(fields_per_object);
for (var f = 0; f < fields_per_object; f++) {
var choice = lcg_rand() % test_values_max;
switch (choice) {
case 0: builder.addFieldInt8(f, bool_val, 0); break;
case 1: builder.addFieldInt8(f, char_val, 0); break;
case 2: builder.addFieldInt8(f, uchar_val, 0); break;
case 3: builder.addFieldInt16(f, short_val, 0); break;
case 4: builder.addFieldInt16(f, ushort_val, 0); break;
case 5: builder.addFieldInt32(f, int_val, 0); break;
case 6: builder.addFieldInt32(f, uint_val, 0); break;
case 7: builder.addFieldInt64(f, long_val, flatbuffers.Long.ZERO); break;
case 8: builder.addFieldInt64(f, ulong_val, flatbuffers.Long.ZERO); break;
case 9: builder.addFieldFloat32(f, float_val, 0); break;
case 10: builder.addFieldFloat64(f, double_val, 0); break;
}
}
objects.push(builder.endObject());
}
builder.prep(8, 0); // Align whole buffer.
lcg_reset(); // Reset.
builder.finish(objects[objects.length - 1]);
var bytes = new Uint8Array(builder.asUint8Array());
var view = new DataView(bytes.buffer);
// Test that all objects we generated are readable and return the
// expected values. We generate random objects in the same order
// so this is deterministic.
for (var i = 0; i < num_fuzz_objects; i++) {
var offset = bytes.length - objects[i];
for (var f = 0; f < fields_per_object; f++) {
var choice = lcg_rand() % test_values_max;
var vtable_offset = fieldIndexToOffset(f);
var vtable = offset - view.getInt32(offset, true);
assert.ok(vtable_offset < view.getInt16(vtable, true));
var field_offset = offset + view.getInt16(vtable + vtable_offset, true);
switch (choice) {
case 0: assert.strictEqual(!!view.getInt8(field_offset), bool_val); break;
case 1: assert.strictEqual(view.getInt8(field_offset), char_val); break;
case 2: assert.strictEqual(view.getUint8(field_offset), uchar_val); break;
case 3: assert.strictEqual(view.getInt16(field_offset, true), short_val); break;
case 4: assert.strictEqual(view.getUint16(field_offset, true), ushort_val); break;
case 5: assert.strictEqual(view.getInt32(field_offset, true), int_val); break;
case 6: assert.strictEqual(view.getUint32(field_offset, true), uint_val); break;
case 7: assert.strictEqual(view.getInt32(field_offset, true), long_val.low); assert.strictEqual(view.getInt32(field_offset + 4, true), long_val.high); break;
case 8: assert.strictEqual(view.getInt32(field_offset, true), ulong_val.low); assert.strictEqual(view.getInt32(field_offset + 4, true), ulong_val.high); break;
case 9: assert.strictEqual(view.getFloat32(field_offset, true), float_val); break;
case 10: assert.strictEqual(view.getFloat64(field_offset, true), double_val); break;
}
}
}
}
main();
| 1 | 17,890 | Use !! to cast to bool, rather than the ternary bool antipattern. | google-flatbuffers | java |
@@ -145,7 +145,7 @@ func (cmd CmdSnapshots) Execute(args []string) error {
tab.Rows = append(tab.Rows, []interface{}{sn.ID()[:plen/2], sn.Time.Format(TimeFormat), sn.Hostname, sn.Paths[0]})
if len(sn.Paths) > 1 {
- for _, path := range sn.Paths {
+ for _, path := range sn.Paths[1:] {
tab.Rows = append(tab.Rows, []interface{}{"", "", "", path})
}
} | 1 | package main
import (
"fmt"
"io"
"os"
"sort"
"strings"
"time"
"github.com/restic/restic"
"github.com/restic/restic/backend"
)
const (
minute = 60
hour = 60 * minute
day = 24 * hour
week = 7 * day
)
type Table struct {
Header string
Rows [][]interface{}
RowFormat string
}
func NewTable() Table {
return Table{
Rows: [][]interface{}{},
}
}
func (t Table) Write(w io.Writer) error {
_, err := fmt.Fprintln(w, t.Header)
if err != nil {
return err
}
_, err = fmt.Fprintln(w, strings.Repeat("-", 70))
if err != nil {
return err
}
for _, row := range t.Rows {
_, err = fmt.Fprintf(w, t.RowFormat+"\n", row...)
if err != nil {
return err
}
}
return nil
}
const TimeFormat = "2006-01-02 15:04:05"
func reltime(t time.Time) string {
sec := uint64(time.Since(t).Seconds())
switch {
case sec > week:
return t.Format(TimeFormat)
case sec > day:
return fmt.Sprintf("%d days ago", sec/day)
case sec > hour:
return fmt.Sprintf("%d hours ago", sec/hour)
case sec > minute:
return fmt.Sprintf("%d minutes ago", sec/minute)
default:
return fmt.Sprintf("%d seconds ago", sec)
}
}
type CmdSnapshots struct {
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("snapshots",
"show snapshots",
"The snapshots command lists all snapshots stored in a repository",
&CmdSnapshots{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdSnapshots) Usage() string {
return ""
}
func (cmd CmdSnapshots) Execute(args []string) error {
if len(args) != 0 {
return fmt.Errorf("wrong number of arguments, usage: %s", cmd.Usage())
}
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
tab := NewTable()
tab.Header = fmt.Sprintf("%-8s %-19s %-10s %s", "ID", "Date", "Source", "Directory")
tab.RowFormat = "%-8s %-19s %-10s %s"
done := make(chan struct{})
defer close(done)
list := []*restic.Snapshot{}
for id := range repo.List(backend.Snapshot, done) {
sn, err := restic.LoadSnapshot(repo, id)
if err != nil {
fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err)
continue
}
pos := sort.Search(len(list), func(i int) bool {
return list[i].Time.After(sn.Time)
})
if pos < len(list) {
list = append(list, nil)
copy(list[pos+1:], list[pos:])
list[pos] = sn
} else {
list = append(list, sn)
}
}
plen, err := repo.PrefixLength(backend.Snapshot)
if err != nil {
return err
}
for _, sn := range list {
if len(sn.Paths) == 0 {
continue
}
tab.Rows = append(tab.Rows, []interface{}{sn.ID()[:plen/2], sn.Time.Format(TimeFormat), sn.Hostname, sn.Paths[0]})
if len(sn.Paths) > 1 {
for _, path := range sn.Paths {
tab.Rows = append(tab.Rows, []interface{}{"", "", "", path})
}
}
}
tab.Write(os.Stdout)
return nil
}
| 1 | 6,635 | why is it in the list twice to begin with? | restic-restic | go |
@@ -890,6 +890,10 @@ void Player::sendPing()
return;
}
+ if (pzLocked) {
+ return;
+ }
+
if (!g_creatureEvents->playerLogout(this)) {
return;
} | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "bed.h"
#include "chat.h"
#include "combat.h"
#include "configmanager.h"
#include "creatureevent.h"
#include "events.h"
#include "game.h"
#include "iologindata.h"
#include "monster.h"
#include "movement.h"
#include "scheduler.h"
#include "weapons.h"
#include <fmt/format.h>
extern ConfigManager g_config;
extern Game g_game;
extern Chat* g_chat;
extern Vocations g_vocations;
extern MoveEvents* g_moveEvents;
extern Weapons* g_weapons;
extern CreatureEvents* g_creatureEvents;
extern Events* g_events;
MuteCountMap Player::muteCountMap;
uint32_t Player::playerAutoID = 0x10000000;
Player::Player(ProtocolGame_ptr p) :
Creature(), lastPing(OTSYS_TIME()), lastPong(lastPing), inbox(new Inbox(ITEM_INBOX)), storeInbox(new StoreInbox(ITEM_STORE_INBOX)), client(std::move(p))
{
inbox->incrementReferenceCounter();
storeInbox->setParent(this);
storeInbox->incrementReferenceCounter();
}
Player::~Player()
{
for (Item* item : inventory) {
if (item) {
item->setParent(nullptr);
item->decrementReferenceCounter();
}
}
for (const auto& it : depotLockerMap) {
it.second->removeInbox(inbox);
}
inbox->decrementReferenceCounter();
storeInbox->setParent(nullptr);
storeInbox->decrementReferenceCounter();
setWriteItem(nullptr);
setEditHouse(nullptr);
}
bool Player::setVocation(uint16_t vocId)
{
Vocation* voc = g_vocations.getVocation(vocId);
if (!voc) {
return false;
}
vocation = voc;
updateRegeneration();
return true;
}
bool Player::isPushable() const
{
if (hasFlag(PlayerFlag_CannotBePushed)) {
return false;
}
return Creature::isPushable();
}
std::string Player::getDescription(int32_t lookDistance) const
{
std::ostringstream s;
if (lookDistance == -1) {
s << "yourself.";
if (group->access) {
s << " You are " << group->name << '.';
} else if (vocation->getId() != VOCATION_NONE) {
s << " You are " << vocation->getVocDescription() << '.';
} else {
s << " You have no vocation.";
}
} else {
s << name;
if (!group->access) {
s << " (Level " << level << ')';
}
s << '.';
if (sex == PLAYERSEX_FEMALE) {
s << " She";
} else {
s << " He";
}
if (group->access) {
s << " is " << group->name << '.';
} else if (vocation->getId() != VOCATION_NONE) {
s << " is " << vocation->getVocDescription() << '.';
} else {
s << " has no vocation.";
}
}
if (party) {
if (lookDistance == -1) {
s << " Your party has ";
} else if (sex == PLAYERSEX_FEMALE) {
s << " She is in a party with ";
} else {
s << " He is in a party with ";
}
size_t memberCount = party->getMemberCount() + 1;
if (memberCount == 1) {
s << "1 member and ";
} else {
s << memberCount << " members and ";
}
size_t invitationCount = party->getInvitationCount();
if (invitationCount == 1) {
s << "1 pending invitation.";
} else {
s << invitationCount << " pending invitations.";
}
}
if (!guild || !guildRank) {
return s.str();
}
if (lookDistance == -1) {
s << " You are ";
} else if (sex == PLAYERSEX_FEMALE) {
s << " She is ";
} else {
s << " He is ";
}
s << guildRank->name << " of the " << guild->getName();
if (!guildNick.empty()) {
s << " (" << guildNick << ')';
}
size_t memberCount = guild->getMemberCount();
if (memberCount == 1) {
s << ", which has 1 member, " << guild->getMembersOnline().size() << " of them online.";
} else {
s << ", which has " << memberCount << " members, " << guild->getMembersOnline().size() << " of them online.";
}
return s.str();
}
Item* Player::getInventoryItem(slots_t slot) const
{
if (slot < CONST_SLOT_FIRST || slot > CONST_SLOT_LAST) {
return nullptr;
}
return inventory[slot];
}
void Player::addConditionSuppressions(uint32_t conditions)
{
conditionSuppressions |= conditions;
}
void Player::removeConditionSuppressions(uint32_t conditions)
{
conditionSuppressions &= ~conditions;
}
Item* Player::getWeapon(slots_t slot, bool ignoreAmmo) const
{
Item* item = inventory[slot];
if (!item) {
return nullptr;
}
WeaponType_t weaponType = item->getWeaponType();
if (weaponType == WEAPON_NONE || weaponType == WEAPON_SHIELD || weaponType == WEAPON_AMMO) {
return nullptr;
}
if (!ignoreAmmo && weaponType == WEAPON_DISTANCE) {
const ItemType& it = Item::items[item->getID()];
if (it.ammoType != AMMO_NONE) {
Item* ammoItem = inventory[CONST_SLOT_AMMO];
if (!ammoItem || ammoItem->getAmmoType() != it.ammoType) {
return nullptr;
}
item = ammoItem;
}
}
return item;
}
Item* Player::getWeapon(bool ignoreAmmo/* = false*/) const
{
Item* item = getWeapon(CONST_SLOT_LEFT, ignoreAmmo);
if (item) {
return item;
}
item = getWeapon(CONST_SLOT_RIGHT, ignoreAmmo);
if (item) {
return item;
}
return nullptr;
}
WeaponType_t Player::getWeaponType() const
{
Item* item = getWeapon();
if (!item) {
return WEAPON_NONE;
}
return item->getWeaponType();
}
int32_t Player::getWeaponSkill(const Item* item) const
{
if (!item) {
return getSkillLevel(SKILL_FIST);
}
int32_t attackSkill;
WeaponType_t weaponType = item->getWeaponType();
switch (weaponType) {
case WEAPON_SWORD: {
attackSkill = getSkillLevel(SKILL_SWORD);
break;
}
case WEAPON_CLUB: {
attackSkill = getSkillLevel(SKILL_CLUB);
break;
}
case WEAPON_AXE: {
attackSkill = getSkillLevel(SKILL_AXE);
break;
}
case WEAPON_DISTANCE: {
attackSkill = getSkillLevel(SKILL_DISTANCE);
break;
}
default: {
attackSkill = 0;
break;
}
}
return attackSkill;
}
int32_t Player::getArmor() const
{
int32_t armor = 0;
static const slots_t armorSlots[] = {CONST_SLOT_HEAD, CONST_SLOT_NECKLACE, CONST_SLOT_ARMOR, CONST_SLOT_LEGS, CONST_SLOT_FEET, CONST_SLOT_RING};
for (slots_t slot : armorSlots) {
Item* inventoryItem = inventory[slot];
if (inventoryItem) {
armor += inventoryItem->getArmor();
}
}
return static_cast<int32_t>(armor * vocation->armorMultiplier);
}
void Player::getShieldAndWeapon(const Item*& shield, const Item*& weapon) const
{
shield = nullptr;
weapon = nullptr;
for (uint32_t slot = CONST_SLOT_RIGHT; slot <= CONST_SLOT_LEFT; slot++) {
Item* item = inventory[slot];
if (!item) {
continue;
}
switch (item->getWeaponType()) {
case WEAPON_NONE:
break;
case WEAPON_SHIELD: {
if (!shield || item->getDefense() > shield->getDefense()) {
shield = item;
}
break;
}
default: { // weapons that are not shields
weapon = item;
break;
}
}
}
}
int32_t Player::getDefense() const
{
int32_t defenseSkill = getSkillLevel(SKILL_FIST);
int32_t defenseValue = 7;
const Item* weapon;
const Item* shield;
getShieldAndWeapon(shield, weapon);
if (weapon) {
defenseValue = weapon->getDefense() + weapon->getExtraDefense();
defenseSkill = getWeaponSkill(weapon);
}
if (shield) {
defenseValue = weapon != nullptr ? shield->getDefense() + weapon->getExtraDefense() : shield->getDefense();
defenseSkill = getSkillLevel(SKILL_SHIELD);
}
if (defenseSkill == 0) {
switch (fightMode) {
case FIGHTMODE_ATTACK:
case FIGHTMODE_BALANCED:
return 1;
case FIGHTMODE_DEFENSE:
return 2;
}
}
return (defenseSkill / 4. + 2.23) * defenseValue * 0.15 * getDefenseFactor() * vocation->defenseMultiplier;
}
uint32_t Player::getAttackSpeed() const
{
const Item* weapon = getWeapon(true);
if (!weapon || weapon->getAttackSpeed() == 0) {
return vocation->getAttackSpeed();
}
return weapon->getAttackSpeed();
}
float Player::getAttackFactor() const
{
switch (fightMode) {
case FIGHTMODE_ATTACK: return 1.0f;
case FIGHTMODE_BALANCED: return 1.2f;
case FIGHTMODE_DEFENSE: return 2.0f;
default: return 1.0f;
}
}
float Player::getDefenseFactor() const
{
switch (fightMode) {
case FIGHTMODE_ATTACK: return (OTSYS_TIME() - lastAttack) < getAttackSpeed() ? 0.5f : 1.0f;
case FIGHTMODE_BALANCED: return (OTSYS_TIME() - lastAttack) < getAttackSpeed() ? 0.75f : 1.0f;
case FIGHTMODE_DEFENSE: return 1.0f;
default: return 1.0f;
}
}
uint16_t Player::getClientIcons() const
{
uint16_t icons = 0;
for (Condition* condition : conditions) {
if (!isSuppress(condition->getType())) {
icons |= condition->getIcons();
}
}
if (pzLocked) {
icons |= ICON_REDSWORDS;
}
if (tile && tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
icons |= ICON_PIGEON;
// Don't show ICON_SWORDS if player is in protection zone.
icons &= ~ICON_SWORDS;
}
// Game client debugs with 10 or more icons
// so let's prevent that from happening.
std::bitset<20> icon_bitset(static_cast<uint64_t>(icons));
for (size_t pos = 0, bits_set = icon_bitset.count(); bits_set >= 10; ++pos) {
if (icon_bitset[pos]) {
icon_bitset.reset(pos);
--bits_set;
}
}
return icon_bitset.to_ulong();
}
void Player::updateInventoryWeight()
{
if (hasFlag(PlayerFlag_HasInfiniteCapacity)) {
return;
}
inventoryWeight = 0;
for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
const Item* item = inventory[i];
if (item) {
inventoryWeight += item->getWeight();
}
}
if (StoreInbox* storeInbox = getStoreInbox()) {
inventoryWeight += storeInbox->getWeight();
}
}
void Player::addSkillAdvance(skills_t skill, uint64_t count)
{
uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level);
uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
//player has reached max skill
return;
}
g_events->eventPlayerOnGainSkillTries(this, skill, count);
if (count == 0) {
return;
}
bool sendUpdateSkills = false;
while ((skills[skill].tries + count) >= nextReqTries) {
count -= nextReqTries - skills[skill].tries;
skills[skill].level++;
skills[skill].tries = 0;
skills[skill].percent = 0;
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("You advanced to {:s} level {:d}.", getSkillName(skill), skills[skill].level));
g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level);
sendUpdateSkills = true;
currReqTries = nextReqTries;
nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
count = 0;
break;
}
}
skills[skill].tries += count;
uint32_t newPercent;
if (nextReqTries > currReqTries) {
newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries);
} else {
newPercent = 0;
}
if (skills[skill].percent != newPercent) {
skills[skill].percent = newPercent;
sendUpdateSkills = true;
}
if (sendUpdateSkills) {
sendSkills();
}
}
void Player::removeSkillTries(skills_t skill, uint64_t count, bool notify/* = false*/)
{
uint16_t oldLevel = skills[skill].level;
uint8_t oldPercent = skills[skill].percent;
while (count > skills[skill].tries) {
count -= skills[skill].tries;
if (skills[skill].level <= MINIMUM_SKILL_LEVEL) {
skills[skill].level = MINIMUM_SKILL_LEVEL;
skills[skill].tries = 0;
count = 0;
break;
}
skills[skill].tries = vocation->getReqSkillTries(skill, skills[skill].level);
skills[skill].level--;
}
skills[skill].tries = std::max<int32_t>(0, skills[skill].tries - count);
skills[skill].percent = Player::getPercentLevel(skills[skill].tries, vocation->getReqSkillTries(skill, skills[skill].level));
if (notify) {
bool sendUpdateSkills = false;
if (oldLevel != skills[skill].level) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("You were downgraded to {:s} level {:d}.", getSkillName(skill), skills[skill].level));
sendUpdateSkills = true;
}
if (sendUpdateSkills || oldPercent != skills[skill].percent) {
sendSkills();
}
}
}
void Player::setVarStats(stats_t stat, int32_t modifier)
{
varStats[stat] += modifier;
switch (stat) {
case STAT_MAXHITPOINTS: {
if (getHealth() > getMaxHealth()) {
Creature::changeHealth(getMaxHealth() - getHealth());
} else {
g_game.addCreatureHealth(this);
}
break;
}
case STAT_MAXMANAPOINTS: {
if (getMana() > getMaxMana()) {
changeMana(getMaxMana() - getMana());
}
break;
}
default: {
break;
}
}
}
int32_t Player::getDefaultStats(stats_t stat) const
{
switch (stat) {
case STAT_MAXHITPOINTS: return healthMax;
case STAT_MAXMANAPOINTS: return manaMax;
case STAT_MAGICPOINTS: return getBaseMagicLevel();
default: return 0;
}
}
void Player::addContainer(uint8_t cid, Container* container)
{
if (cid > 0xF) {
return;
}
if (container->getID() == ITEM_BROWSEFIELD) {
container->incrementReferenceCounter();
}
auto it = openContainers.find(cid);
if (it != openContainers.end()) {
OpenContainer& openContainer = it->second;
Container* oldContainer = openContainer.container;
if (oldContainer->getID() == ITEM_BROWSEFIELD) {
oldContainer->decrementReferenceCounter();
}
openContainer.container = container;
openContainer.index = 0;
} else {
OpenContainer openContainer;
openContainer.container = container;
openContainer.index = 0;
openContainers[cid] = openContainer;
}
}
void Player::closeContainer(uint8_t cid)
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return;
}
OpenContainer openContainer = it->second;
Container* container = openContainer.container;
openContainers.erase(it);
if (container && container->getID() == ITEM_BROWSEFIELD) {
container->decrementReferenceCounter();
}
}
void Player::setContainerIndex(uint8_t cid, uint16_t index)
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return;
}
it->second.index = index;
}
Container* Player::getContainerByID(uint8_t cid)
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return nullptr;
}
return it->second.container;
}
int8_t Player::getContainerID(const Container* container) const
{
for (const auto& it : openContainers) {
if (it.second.container == container) {
return it.first;
}
}
return -1;
}
uint16_t Player::getContainerIndex(uint8_t cid) const
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return 0;
}
return it->second.index;
}
bool Player::canOpenCorpse(uint32_t ownerId) const
{
return getID() == ownerId || (party && party->canOpenCorpse(ownerId));
}
uint16_t Player::getLookCorpse() const
{
if (sex == PLAYERSEX_FEMALE) {
return ITEM_FEMALE_CORPSE;
}
return ITEM_MALE_CORPSE;
}
void Player::addStorageValue(const uint32_t key, const int32_t value, const bool isLogin/* = false*/)
{
if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) {
if (IS_IN_KEYRANGE(key, OUTFITS_RANGE)) {
outfits.emplace_back(
value >> 16,
value & 0xFF
);
return;
} else if (IS_IN_KEYRANGE(key, MOUNTS_RANGE)) {
// do nothing
} else {
std::cout << "Warning: unknown reserved key: " << key << " player: " << getName() << std::endl;
return;
}
}
if (value != -1) {
int32_t oldValue;
getStorageValue(key, oldValue);
storageMap[key] = value;
if (!isLogin) {
auto currentFrameTime = g_dispatcher.getDispatcherCycle();
if (lastQuestlogUpdate != currentFrameTime && g_game.quests.isQuestStorage(key, value, oldValue)) {
lastQuestlogUpdate = currentFrameTime;
sendTextMessage(MESSAGE_EVENT_ADVANCE, "Your questlog has been updated.");
}
}
} else {
storageMap.erase(key);
}
}
bool Player::getStorageValue(const uint32_t key, int32_t& value) const
{
auto it = storageMap.find(key);
if (it == storageMap.end()) {
value = -1;
return false;
}
value = it->second;
return true;
}
bool Player::canSee(const Position& pos) const
{
if (!client) {
return false;
}
return client->canSee(pos);
}
bool Player::canSeeCreature(const Creature* creature) const
{
if (creature == this) {
return true;
}
if (creature->isInGhostMode() && !canSeeGhostMode(creature)) {
return false;
}
if (!creature->getPlayer() && !canSeeInvisibility() && creature->isInvisible()) {
return false;
}
return true;
}
bool Player::canSeeGhostMode(const Creature*) const
{
return group->access;
}
bool Player::canWalkthrough(const Creature* creature) const
{
if (group->access || creature->isInGhostMode()) {
return true;
}
const Player* player = creature->getPlayer();
if (!player || !g_config.getBoolean(ConfigManager::ALLOW_WALKTHROUGH)) {
return false;
}
const Tile* playerTile = player->getTile();
if (!playerTile || (!playerTile->hasFlag(TILESTATE_PROTECTIONZONE) && player->getLevel() > static_cast<uint32_t>(g_config.getNumber(ConfigManager::PROTECTION_LEVEL)))) {
return false;
}
const Item* playerTileGround = playerTile->getGround();
if (!playerTileGround || !playerTileGround->hasWalkStack()) {
return false;
}
Player* thisPlayer = const_cast<Player*>(this);
if ((OTSYS_TIME() - lastWalkthroughAttempt) > 2000) {
thisPlayer->setLastWalkthroughAttempt(OTSYS_TIME());
return false;
}
if (creature->getPosition() != lastWalkthroughPosition) {
thisPlayer->setLastWalkthroughPosition(creature->getPosition());
return false;
}
thisPlayer->setLastWalkthroughPosition(creature->getPosition());
return true;
}
bool Player::canWalkthroughEx(const Creature* creature) const
{
if (group->access) {
return true;
}
const Player* player = creature->getPlayer();
if (!player || !g_config.getBoolean(ConfigManager::ALLOW_WALKTHROUGH)) {
return false;
}
const Tile* playerTile = player->getTile();
return playerTile && (playerTile->hasFlag(TILESTATE_PROTECTIONZONE) || player->getLevel() <= static_cast<uint32_t>(g_config.getNumber(ConfigManager::PROTECTION_LEVEL)));
}
void Player::onReceiveMail() const
{
if (isNearDepotBox()) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, "New mail has arrived.");
}
}
bool Player::isNearDepotBox() const
{
const Position& pos = getPosition();
for (int32_t cx = -1; cx <= 1; ++cx) {
for (int32_t cy = -1; cy <= 1; ++cy) {
Tile* tile = g_game.map.getTile(pos.x + cx, pos.y + cy, pos.z);
if (!tile) {
continue;
}
if (tile->hasFlag(TILESTATE_DEPOT)) {
return true;
}
}
}
return false;
}
DepotChest* Player::getDepotChest(uint32_t depotId, bool autoCreate)
{
auto it = depotChests.find(depotId);
if (it != depotChests.end()) {
return it->second;
}
if (!autoCreate) {
return nullptr;
}
it = depotChests.emplace(depotId, new DepotChest(ITEM_DEPOT)).first;
it->second->setMaxDepotItems(getMaxDepotItems());
return it->second;
}
DepotLocker* Player::getDepotLocker(uint32_t depotId)
{
auto it = depotLockerMap.find(depotId);
if (it != depotLockerMap.end()) {
inbox->setParent(it->second.get());
return it->second.get();
}
it = depotLockerMap.emplace(depotId, new DepotLocker(ITEM_LOCKER1)).first;
it->second->setDepotId(depotId);
it->second->internalAddThing(Item::CreateItem(ITEM_MARKET));
it->second->internalAddThing(inbox);
it->second->internalAddThing(getDepotChest(depotId, true));
return it->second.get();
}
void Player::sendCancelMessage(ReturnValue message) const
{
sendCancelMessage(getReturnMessage(message));
}
void Player::sendStats()
{
if (client) {
client->sendStats();
lastStatsTrainingTime = getOfflineTrainingTime() / 60 / 1000;
}
}
void Player::sendPing()
{
int64_t timeNow = OTSYS_TIME();
bool hasLostConnection = false;
if ((timeNow - lastPing) >= 5000) {
lastPing = timeNow;
if (client) {
client->sendPing();
} else {
hasLostConnection = true;
}
}
int64_t noPongTime = timeNow - lastPong;
if ((hasLostConnection || noPongTime >= 7000) && attackedCreature && attackedCreature->getPlayer()) {
setAttackedCreature(nullptr);
}
int32_t noPongKickTime = vocation->getNoPongKickTime();
if (pzLocked && noPongKickTime < 60000) {
noPongKickTime = 60000;
}
if (noPongTime >= noPongKickTime) {
if (isConnecting || getTile()->hasFlag(TILESTATE_NOLOGOUT)) {
return;
}
if (!g_creatureEvents->playerLogout(this)) {
return;
}
if (client) {
client->logout(true, true);
} else {
g_game.removeCreature(this, true);
}
}
}
Item* Player::getWriteItem(uint32_t& windowTextId, uint16_t& maxWriteLen)
{
windowTextId = this->windowTextId;
maxWriteLen = this->maxWriteLen;
return writeItem;
}
void Player::setWriteItem(Item* item, uint16_t maxWriteLen /*= 0*/)
{
windowTextId++;
if (writeItem) {
writeItem->decrementReferenceCounter();
}
if (item) {
writeItem = item;
this->maxWriteLen = maxWriteLen;
writeItem->incrementReferenceCounter();
} else {
writeItem = nullptr;
this->maxWriteLen = 0;
}
}
House* Player::getEditHouse(uint32_t& windowTextId, uint32_t& listId)
{
windowTextId = this->windowTextId;
listId = this->editListId;
return editHouse;
}
void Player::setEditHouse(House* house, uint32_t listId /*= 0*/)
{
windowTextId++;
editHouse = house;
editListId = listId;
}
void Player::sendHouseWindow(House* house, uint32_t listId) const
{
if (!client) {
return;
}
std::string text;
if (house->getAccessList(listId, text)) {
client->sendHouseWindow(windowTextId, text);
}
}
//container
void Player::sendAddContainerItem(const Container* container, const Item* item)
{
if (!client) {
return;
}
for (const auto& it : openContainers) {
const OpenContainer& openContainer = it.second;
if (openContainer.container != container) {
continue;
}
uint16_t slot = openContainer.index;
if (container->getID() == ITEM_BROWSEFIELD) {
uint16_t containerSize = container->size() - 1;
uint16_t pageEnd = openContainer.index + container->capacity() - 1;
if (containerSize > pageEnd) {
slot = pageEnd;
item = container->getItemByIndex(pageEnd);
} else {
slot = containerSize;
}
} else if (openContainer.index >= container->capacity()) {
item = container->getItemByIndex(openContainer.index);
}
if (item) {
client->sendAddContainerItem(it.first, slot, item);
}
}
}
void Player::sendUpdateContainerItem(const Container* container, uint16_t slot, const Item* newItem)
{
if (!client) {
return;
}
for (const auto& it : openContainers) {
const OpenContainer& openContainer = it.second;
if (openContainer.container != container) {
continue;
}
if (slot < openContainer.index) {
continue;
}
uint16_t pageEnd = openContainer.index + container->capacity();
if (slot >= pageEnd) {
continue;
}
client->sendUpdateContainerItem(it.first, slot, newItem);
}
}
void Player::sendRemoveContainerItem(const Container* container, uint16_t slot)
{
if (!client) {
return;
}
for (auto& it : openContainers) {
OpenContainer& openContainer = it.second;
if (openContainer.container != container) {
continue;
}
uint16_t& firstIndex = openContainer.index;
if (firstIndex > 0 && firstIndex >= container->size() - 1) {
firstIndex -= container->capacity();
sendContainer(it.first, container, false, firstIndex);
}
client->sendRemoveContainerItem(it.first, std::max<uint16_t>(slot, firstIndex), container->getItemByIndex(container->capacity() + firstIndex));
}
}
void Player::onUpdateTileItem(const Tile* tile, const Position& pos, const Item* oldItem,
const ItemType& oldType, const Item* newItem, const ItemType& newType)
{
Creature::onUpdateTileItem(tile, pos, oldItem, oldType, newItem, newType);
if (oldItem != newItem) {
onRemoveTileItem(tile, pos, oldType, oldItem);
}
if (tradeState != TRADE_TRANSFER) {
if (tradeItem && oldItem == tradeItem) {
g_game.internalCloseTrade(this);
}
}
}
void Player::onRemoveTileItem(const Tile* tile, const Position& pos, const ItemType& iType,
const Item* item)
{
Creature::onRemoveTileItem(tile, pos, iType, item);
if (tradeState != TRADE_TRANSFER) {
checkTradeState(item);
if (tradeItem) {
const Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
g_game.internalCloseTrade(this);
}
}
}
}
void Player::onCreatureAppear(Creature* creature, bool isLogin)
{
Creature::onCreatureAppear(creature, isLogin);
if (isLogin && creature == this) {
sendItems();
for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_LAST; ++slot) {
Item* item = inventory[slot];
if (item) {
item->startDecaying();
g_moveEvents->onPlayerEquip(this, item, static_cast<slots_t>(slot), false);
}
}
for (Condition* condition : storedConditionList) {
addCondition(condition);
}
storedConditionList.clear();
updateRegeneration();
BedItem* bed = g_game.getBedBySleeper(guid);
if (bed) {
bed->wakeUp(this);
}
Account account = IOLoginData::loadAccount(accountNumber);
if (g_config.getBoolean(ConfigManager::PLAYER_CONSOLE_LOGS)) {
std::cout << name << " has logged in." << std::endl;
}
if (guild) {
guild->addMember(this);
}
int32_t offlineTime;
if (getLastLogout() != 0) {
// Not counting more than 21 days to prevent overflow when multiplying with 1000 (for milliseconds).
offlineTime = std::min<int32_t>(time(nullptr) - getLastLogout(), 86400 * 21);
} else {
offlineTime = 0;
}
for (Condition* condition : getMuteConditions()) {
condition->setTicks(condition->getTicks() - (offlineTime * 1000));
if (condition->getTicks() <= 0) {
removeCondition(condition);
}
}
g_game.checkPlayersRecord();
IOLoginData::updateOnlineStatus(guid, true);
}
}
void Player::onAttackedCreatureDisappear(bool isLogout)
{
sendCancelTarget();
if (!isLogout) {
sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost.");
}
}
void Player::onFollowCreatureDisappear(bool isLogout)
{
sendCancelTarget();
if (!isLogout) {
sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost.");
}
}
void Player::onChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
if (attackedCreature && !hasFlag(PlayerFlag_IgnoreProtectionZone)) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
if (!group->access && isMounted()) {
dismount();
g_game.internalCreatureChangeOutfit(this, defaultOutfit);
wasMounted = true;
}
} else {
if (wasMounted) {
toggleMount(true);
wasMounted = false;
}
}
g_game.updateCreatureWalkthrough(this);
sendIcons();
}
void Player::onAttackedCreatureChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
} else if (zone == ZONE_NOPVP) {
if (attackedCreature->getPlayer()) {
if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
}
} else if (zone == ZONE_NORMAL) {
//attackedCreature can leave a pvp zone if not pzlocked
if (g_game.getWorldType() == WORLD_TYPE_NO_PVP) {
if (attackedCreature->getPlayer()) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
}
}
}
void Player::onRemoveCreature(Creature* creature, bool isLogout)
{
Creature::onRemoveCreature(creature, isLogout);
if (creature == this) {
if (isLogout) {
loginPosition = getPosition();
}
lastLogout = time(nullptr);
if (eventWalk != 0) {
setFollowCreature(nullptr);
}
if (tradePartner) {
g_game.internalCloseTrade(this);
}
closeShopWindow();
clearPartyInvitations();
if (party) {
party->leaveParty(this);
}
g_chat->removeUserFromAllChannels(*this);
if (g_config.getBoolean(ConfigManager::PLAYER_CONSOLE_LOGS)) {
std::cout << getName() << " has logged out." << std::endl;
}
if (guild) {
guild->removeMember(this);
}
IOLoginData::updateOnlineStatus(guid, false);
bool saved = false;
for (uint32_t tries = 0; tries < 3; ++tries) {
if (IOLoginData::savePlayer(this)) {
saved = true;
break;
}
}
if (!saved) {
std::cout << "Error while saving player: " << getName() << std::endl;
}
}
}
void Player::openShopWindow(Npc* npc, const std::list<ShopInfo>& shop)
{
shopItemList = shop;
sendShop(npc);
sendSaleItemList();
}
bool Player::closeShopWindow(bool sendCloseShopWindow /*= true*/)
{
//unreference callbacks
int32_t onBuy;
int32_t onSell;
Npc* npc = getShopOwner(onBuy, onSell);
if (!npc) {
shopItemList.clear();
return false;
}
setShopOwner(nullptr, -1, -1);
npc->onPlayerEndTrade(this, onBuy, onSell);
if (sendCloseShopWindow) {
sendCloseShop();
}
shopItemList.clear();
return true;
}
void Player::onWalk(Direction& dir)
{
Creature::onWalk(dir);
setNextActionTask(nullptr);
setNextAction(OTSYS_TIME() + getStepDuration(dir));
}
void Player::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos,
const Tile* oldTile, const Position& oldPos, bool teleport)
{
Creature::onCreatureMove(creature, newTile, newPos, oldTile, oldPos, teleport);
if (hasFollowPath && (creature == followCreature || (creature == this && followCreature))) {
isUpdatingPath = false;
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, &g_game, getID())));
}
if (creature != this) {
return;
}
if (tradeState != TRADE_TRANSFER) {
//check if we should close trade
if (tradeItem && !Position::areInRange<1, 1, 0>(tradeItem->getPosition(), getPosition())) {
g_game.internalCloseTrade(this);
}
if (tradePartner && !Position::areInRange<2, 2, 0>(tradePartner->getPosition(), getPosition())) {
g_game.internalCloseTrade(this);
}
}
// close modal windows
if (!modalWindows.empty()) {
// TODO: This shouldn't be hard-coded
for (uint32_t modalWindowId : modalWindows) {
if (modalWindowId == std::numeric_limits<uint32_t>::max()) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted.");
break;
}
}
modalWindows.clear();
}
// leave market
if (inMarket) {
inMarket = false;
}
if (party) {
party->updateSharedExperience();
}
if (teleport || oldPos.z != newPos.z) {
int32_t ticks = g_config.getNumber(ConfigManager::STAIRHOP_DELAY);
if (ticks > 0) {
if (Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_PACIFIED, ticks, 0)) {
addCondition(condition);
}
}
}
}
//container
void Player::onAddContainerItem(const Item* item)
{
checkTradeState(item);
}
void Player::onUpdateContainerItem(const Container* container, const Item* oldItem, const Item* newItem)
{
if (oldItem != newItem) {
onRemoveContainerItem(container, oldItem);
}
if (tradeState != TRADE_TRANSFER) {
checkTradeState(oldItem);
}
}
void Player::onRemoveContainerItem(const Container* container, const Item* item)
{
if (tradeState != TRADE_TRANSFER) {
checkTradeState(item);
if (tradeItem) {
if (tradeItem->getParent() != container && container->isHoldingItem(tradeItem)) {
g_game.internalCloseTrade(this);
}
}
}
}
void Player::onCloseContainer(const Container* container)
{
if (!client) {
return;
}
for (const auto& it : openContainers) {
if (it.second.container == container) {
client->sendCloseContainer(it.first);
}
}
}
void Player::onSendContainer(const Container* container)
{
if (!client) {
return;
}
bool hasParent = container->hasParent();
for (const auto& it : openContainers) {
const OpenContainer& openContainer = it.second;
if (openContainer.container == container) {
client->sendContainer(it.first, container, hasParent, openContainer.index);
}
}
}
//inventory
void Player::onUpdateInventoryItem(Item* oldItem, Item* newItem)
{
if (oldItem != newItem) {
onRemoveInventoryItem(oldItem);
}
if (tradeState != TRADE_TRANSFER) {
checkTradeState(oldItem);
}
}
void Player::onRemoveInventoryItem(Item* item)
{
if (tradeState != TRADE_TRANSFER) {
checkTradeState(item);
if (tradeItem) {
const Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
g_game.internalCloseTrade(this);
}
}
}
}
void Player::checkTradeState(const Item* item)
{
if (!tradeItem || tradeState == TRADE_TRANSFER) {
return;
}
if (tradeItem == item) {
g_game.internalCloseTrade(this);
} else {
const Container* container = dynamic_cast<const Container*>(item->getParent());
while (container) {
if (container == tradeItem) {
g_game.internalCloseTrade(this);
break;
}
container = dynamic_cast<const Container*>(container->getParent());
}
}
}
void Player::setNextWalkActionTask(SchedulerTask* task)
{
if (walkTaskEvent != 0) {
g_scheduler.stopEvent(walkTaskEvent);
walkTaskEvent = 0;
}
delete walkTask;
walkTask = task;
}
void Player::setNextWalkTask(SchedulerTask* task)
{
if (nextStepEvent != 0) {
g_scheduler.stopEvent(nextStepEvent);
nextStepEvent = 0;
}
if (task) {
nextStepEvent = g_scheduler.addEvent(task);
resetIdleTime();
}
}
void Player::setNextActionTask(SchedulerTask* task, bool resetIdleTime /*= true */)
{
if (actionTaskEvent != 0) {
g_scheduler.stopEvent(actionTaskEvent);
actionTaskEvent = 0;
}
if (task) {
actionTaskEvent = g_scheduler.addEvent(task);
if (resetIdleTime) {
this->resetIdleTime();
}
}
}
uint32_t Player::getNextActionTime() const
{
return std::max<int64_t>(SCHEDULER_MINTICKS, nextAction - OTSYS_TIME());
}
void Player::onThink(uint32_t interval)
{
Creature::onThink(interval);
sendPing();
MessageBufferTicks += interval;
if (MessageBufferTicks >= 1500) {
MessageBufferTicks = 0;
addMessageBuffer();
}
if (!getTile()->hasFlag(TILESTATE_NOLOGOUT) && !isAccessPlayer()) {
idleTime += interval;
const int32_t kickAfterMinutes = g_config.getNumber(ConfigManager::KICK_AFTER_MINUTES);
if (idleTime > (kickAfterMinutes * 60000) + 60000) {
kickPlayer(true);
} else if (client && idleTime == 60000 * kickAfterMinutes) {
client->sendTextMessage(TextMessage(MESSAGE_STATUS_WARNING, fmt::format("There was no variation in your behaviour for {:d} minutes. You will be disconnected in one minute if there is no change in your actions until then.", kickAfterMinutes)));
}
}
if (g_game.getWorldType() != WORLD_TYPE_PVP_ENFORCED) {
checkSkullTicks(interval / 1000);
}
addOfflineTrainingTime(interval);
if (lastStatsTrainingTime != getOfflineTrainingTime() / 60 / 1000) {
sendStats();
}
}
uint32_t Player::isMuted() const
{
if (hasFlag(PlayerFlag_CannotBeMuted)) {
return 0;
}
int32_t muteTicks = 0;
for (Condition* condition : conditions) {
if (condition->getType() == CONDITION_MUTED && condition->getTicks() > muteTicks) {
muteTicks = condition->getTicks();
}
}
return static_cast<uint32_t>(muteTicks) / 1000;
}
void Player::addMessageBuffer()
{
if (MessageBufferCount > 0 && g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER) != 0 && !hasFlag(PlayerFlag_CannotBeMuted)) {
--MessageBufferCount;
}
}
void Player::removeMessageBuffer()
{
if (hasFlag(PlayerFlag_CannotBeMuted)) {
return;
}
const int32_t maxMessageBuffer = g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER);
if (maxMessageBuffer != 0 && MessageBufferCount <= maxMessageBuffer + 1) {
if (++MessageBufferCount > maxMessageBuffer) {
uint32_t muteCount = 1;
auto it = muteCountMap.find(guid);
if (it != muteCountMap.end()) {
muteCount = it->second;
}
uint32_t muteTime = 5 * muteCount * muteCount;
muteCountMap[guid] = muteCount + 1;
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_MUTED, muteTime * 1000, 0);
addCondition(condition);
sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You are muted for {:d} seconds.", muteTime));
}
}
}
void Player::drainHealth(Creature* attacker, int32_t damage)
{
Creature::drainHealth(attacker, damage);
sendStats();
}
void Player::drainMana(Creature* attacker, int32_t manaLoss)
{
onAttacked();
changeMana(-manaLoss);
if (attacker) {
addDamagePoints(attacker, manaLoss);
}
sendStats();
}
void Player::addManaSpent(uint64_t amount)
{
if (hasFlag(PlayerFlag_NotGainMana)) {
return;
}
uint64_t currReqMana = vocation->getReqMana(magLevel);
uint64_t nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
//player has reached max magic level
return;
}
g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, amount);
if (amount == 0) {
return;
}
bool sendUpdateStats = false;
while ((manaSpent + amount) >= nextReqMana) {
amount -= nextReqMana - manaSpent;
magLevel++;
manaSpent = 0;
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("You advanced to magic level {:d}.", magLevel));
g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel);
sendUpdateStats = true;
currReqMana = nextReqMana;
nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
return;
}
}
manaSpent += amount;
uint8_t oldPercent = magLevelPercent;
if (nextReqMana > currReqMana) {
magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana);
} else {
magLevelPercent = 0;
}
if (oldPercent != magLevelPercent) {
sendUpdateStats = true;
}
if (sendUpdateStats) {
sendStats();
}
}
void Player::removeManaSpent(uint64_t amount, bool notify/* = false*/)
{
if (amount == 0) {
return;
}
uint32_t oldLevel = magLevel;
uint8_t oldPercent = magLevelPercent;
while (amount > manaSpent && magLevel > 0) {
amount -= manaSpent;
manaSpent = vocation->getReqMana(magLevel);
magLevel--;
}
manaSpent -= amount;
uint64_t nextReqMana = vocation->getReqMana(magLevel + 1);
if (nextReqMana > vocation->getReqMana(magLevel)) {
magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana);
} else {
magLevelPercent = 0;
}
if (notify) {
bool sendUpdateStats = false;
if (oldLevel != magLevel) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("You were downgraded to magic level {:d}.", magLevel));
sendUpdateStats = true;
}
if (sendUpdateStats || oldPercent != magLevelPercent) {
sendStats();
}
}
}
void Player::addExperience(Creature* source, uint64_t exp, bool sendText/* = false*/)
{
uint64_t currLevelExp = Player::getExpForLevel(level);
uint64_t nextLevelExp = Player::getExpForLevel(level + 1);
uint64_t rawExp = exp;
if (currLevelExp >= nextLevelExp) {
//player has reached max level
levelPercent = 0;
sendStats();
return;
}
g_events->eventPlayerOnGainExperience(this, source, exp, rawExp);
if (exp == 0) {
return;
}
experience += exp;
if (sendText) {
std::string expString = std::to_string(exp) + (exp != 1 ? " experience points." : " experience point.");
TextMessage message(MESSAGE_EXPERIENCE, "You gained " + expString);
message.position = position;
message.primary.value = exp;
message.primary.color = TEXTCOLOR_WHITE_EXP;
sendTextMessage(message);
SpectatorVec spectators;
g_game.map.getSpectators(spectators, position, false, true);
spectators.erase(this);
if (!spectators.empty()) {
message.type = MESSAGE_EXPERIENCE_OTHERS;
message.text = getName() + " gained " + expString;
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendTextMessage(message);
}
}
}
uint32_t prevLevel = level;
while (experience >= nextLevelExp) {
++level;
healthMax += vocation->getHPGain();
health += vocation->getHPGain();
manaMax += vocation->getManaGain();
mana += vocation->getManaGain();
capacity += vocation->getCapGain();
currLevelExp = nextLevelExp;
nextLevelExp = Player::getExpForLevel(level + 1);
if (currLevelExp >= nextLevelExp) {
//player has reached max level
break;
}
}
if (prevLevel != level) {
health = getMaxHealth();
mana = getMaxMana();
updateBaseSpeed();
setBaseSpeed(getBaseSpeed());
g_game.changeSpeed(this, 0);
g_game.addCreatureHealth(this);
const uint32_t protectionLevel = static_cast<uint32_t>(g_config.getNumber(ConfigManager::PROTECTION_LEVEL));
if (prevLevel < protectionLevel && level >= protectionLevel) {
g_game.updateCreatureWalkthrough(this);
}
if (party) {
party->updateSharedExperience();
}
g_creatureEvents->playerAdvance(this, SKILL_LEVEL, prevLevel, level);
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("You advanced from Level {:d} to Level {:d}.", prevLevel, level));
}
if (nextLevelExp > currLevelExp) {
levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp);
} else {
levelPercent = 0;
}
sendStats();
}
void Player::removeExperience(uint64_t exp, bool sendText/* = false*/)
{
if (experience == 0 || exp == 0) {
return;
}
g_events->eventPlayerOnLoseExperience(this, exp);
if (exp == 0) {
return;
}
uint64_t lostExp = experience;
experience = std::max<int64_t>(0, experience - exp);
if (sendText) {
lostExp -= experience;
std::string expString = std::to_string(lostExp) + (lostExp != 1 ? " experience points." : " experience point.");
TextMessage message(MESSAGE_EXPERIENCE, "You lost " + expString);
message.position = position;
message.primary.value = lostExp;
message.primary.color = TEXTCOLOR_RED;
sendTextMessage(message);
SpectatorVec spectators;
g_game.map.getSpectators(spectators, position, false, true);
spectators.erase(this);
if (!spectators.empty()) {
message.type = MESSAGE_EXPERIENCE_OTHERS;
message.text = getName() + " lost " + expString;
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendTextMessage(message);
}
}
}
uint32_t oldLevel = level;
uint64_t currLevelExp = Player::getExpForLevel(level);
while (level > 1 && experience < currLevelExp) {
--level;
healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain());
manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain());
capacity = std::max<int32_t>(0, capacity - vocation->getCapGain());
currLevelExp = Player::getExpForLevel(level);
}
if (oldLevel != level) {
health = getMaxHealth();
mana = getMaxMana();
updateBaseSpeed();
setBaseSpeed(getBaseSpeed());
g_game.changeSpeed(this, 0);
g_game.addCreatureHealth(this);
const uint32_t protectionLevel = static_cast<uint32_t>(g_config.getNumber(ConfigManager::PROTECTION_LEVEL));
if (oldLevel >= protectionLevel && level < protectionLevel) {
g_game.updateCreatureWalkthrough(this);
}
if (party) {
party->updateSharedExperience();
}
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("You were downgraded from Level {:d} to Level {:d}.", oldLevel, level));
}
uint64_t nextLevelExp = Player::getExpForLevel(level + 1);
if (nextLevelExp > currLevelExp) {
levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp);
} else {
levelPercent = 0;
}
sendStats();
}
uint8_t Player::getPercentLevel(uint64_t count, uint64_t nextLevelCount)
{
if (nextLevelCount == 0) {
return 0;
}
uint8_t result = (count * 100) / nextLevelCount;
if (result > 100) {
return 0;
}
return result;
}
void Player::onBlockHit()
{
if (shieldBlockCount > 0) {
--shieldBlockCount;
if (hasShield()) {
addSkillAdvance(SKILL_SHIELD, 1);
}
}
}
void Player::onAttackedCreatureBlockHit(BlockType_t blockType)
{
lastAttackBlockType = blockType;
switch (blockType) {
case BLOCK_NONE: {
addAttackSkillPoint = true;
bloodHitCount = 30;
shieldBlockCount = 30;
break;
}
case BLOCK_DEFENSE:
case BLOCK_ARMOR: {
//need to draw blood every 30 hits
if (bloodHitCount > 0) {
addAttackSkillPoint = true;
--bloodHitCount;
} else {
addAttackSkillPoint = false;
}
break;
}
default: {
addAttackSkillPoint = false;
break;
}
}
}
bool Player::hasShield() const
{
Item* item = inventory[CONST_SLOT_LEFT];
if (item && item->getWeaponType() == WEAPON_SHIELD) {
return true;
}
item = inventory[CONST_SLOT_RIGHT];
if (item && item->getWeaponType() == WEAPON_SHIELD) {
return true;
}
return false;
}
BlockType_t Player::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage,
bool checkDefense /* = false*/, bool checkArmor /* = false*/, bool field /* = false*/, bool ignoreResistances /* = false*/)
{
BlockType_t blockType = Creature::blockHit(attacker, combatType, damage, checkDefense, checkArmor, field, ignoreResistances);
if (attacker) {
sendCreatureSquare(attacker, SQ_COLOR_BLACK);
}
if (blockType != BLOCK_NONE) {
return blockType;
}
if (damage <= 0) {
damage = 0;
return BLOCK_ARMOR;
}
if (!ignoreResistances) {
for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_AMMO; ++slot) {
if (!isItemAbilityEnabled(static_cast<slots_t>(slot))) {
continue;
}
Item* item = inventory[slot];
if (!item) {
continue;
}
const ItemType& it = Item::items[item->getID()];
if (!it.abilities) {
if (damage <= 0) {
damage = 0;
return BLOCK_ARMOR;
}
continue;
}
const int16_t& absorbPercent = it.abilities->absorbPercent[combatTypeToIndex(combatType)];
if (absorbPercent != 0) {
damage -= std::round(damage * (absorbPercent / 100.));
uint16_t charges = item->getCharges();
if (charges != 0) {
g_game.transformItem(item, item->getID(), charges - 1);
}
}
if (field) {
const int16_t& fieldAbsorbPercent = it.abilities->fieldAbsorbPercent[combatTypeToIndex(combatType)];
if (fieldAbsorbPercent != 0) {
damage -= std::round(damage * (fieldAbsorbPercent / 100.));
uint16_t charges = item->getCharges();
if (charges != 0) {
g_game.transformItem(item, item->getID(), charges - 1);
}
}
}
}
}
if (damage <= 0) {
damage = 0;
blockType = BLOCK_ARMOR;
}
return blockType;
}
uint32_t Player::getIP() const
{
if (client) {
return client->getIP();
}
return 0;
}
void Player::death(Creature* lastHitCreature)
{
loginPosition = town->getTemplePosition();
if (skillLoss) {
uint8_t unfairFightReduction = 100;
bool lastHitPlayer = Player::lastHitIsPlayer(lastHitCreature);
if (lastHitPlayer) {
uint32_t sumLevels = 0;
uint32_t inFightTicks = g_config.getNumber(ConfigManager::PZ_LOCKED);
for (const auto& it : damageMap) {
CountBlock_t cb = it.second;
if ((OTSYS_TIME() - cb.ticks) <= inFightTicks) {
Player* damageDealer = g_game.getPlayerByID(it.first);
if (damageDealer) {
sumLevels += damageDealer->getLevel();
}
}
}
if (sumLevels > level) {
double reduce = level / static_cast<double>(sumLevels);
unfairFightReduction = std::max<uint8_t>(20, std::floor((reduce * 100) + 0.5));
}
}
//Magic level loss
uint64_t sumMana = 0;
for (uint32_t i = 1; i <= magLevel; ++i) {
sumMana += vocation->getReqMana(i);
}
double deathLossPercent = getLostPercent() * (unfairFightReduction / 100.);
removeManaSpent(static_cast<uint64_t>((sumMana + manaSpent) * deathLossPercent), false);
//Skill loss
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; ++i) { //for each skill
uint64_t sumSkillTries = 0;
for (uint16_t c = MINIMUM_SKILL_LEVEL + 1; c <= skills[i].level; ++c) { //sum up all required tries for all skill levels
sumSkillTries += vocation->getReqSkillTries(i, c);
}
sumSkillTries += skills[i].tries;
removeSkillTries(static_cast<skills_t>(i), sumSkillTries * deathLossPercent, false);
}
//Level loss
uint64_t expLoss = static_cast<uint64_t>(experience * deathLossPercent);
g_events->eventPlayerOnLoseExperience(this, expLoss);
if (expLoss != 0) {
uint32_t oldLevel = level;
if (vocation->getId() == VOCATION_NONE || level > 7) {
experience -= expLoss;
}
while (level > 1 && experience < Player::getExpForLevel(level)) {
--level;
healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain());
manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain());
capacity = std::max<int32_t>(0, capacity - vocation->getCapGain());
}
if (oldLevel != level) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("You were downgraded from Level {:d} to Level {:d}.", oldLevel, level));
}
uint64_t currLevelExp = Player::getExpForLevel(level);
uint64_t nextLevelExp = Player::getExpForLevel(level + 1);
if (nextLevelExp > currLevelExp) {
levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp);
} else {
levelPercent = 0;
}
}
if (blessings.test(5)) {
if (lastHitPlayer) {
blessings.reset(5);
} else {
blessings.reset();
blessings.set(5);
}
} else {
blessings.reset();
}
sendStats();
sendSkills();
sendReLoginWindow(unfairFightReduction);
if (getSkull() == SKULL_BLACK) {
health = 40;
mana = 0;
} else {
health = healthMax;
mana = manaMax;
}
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->isPersistent()) {
it = conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
} else {
++it;
}
}
} else {
setSkillLoss(true);
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->isPersistent()) {
it = conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
} else {
++it;
}
}
health = healthMax;
g_game.internalTeleport(this, getTemplePosition(), true);
g_game.addCreatureHealth(this);
onThink(EVENT_CREATURE_THINK_INTERVAL);
onIdleStatus();
sendStats();
}
}
bool Player::dropCorpse(Creature* lastHitCreature, Creature* mostDamageCreature, bool lastHitUnjustified, bool mostDamageUnjustified)
{
if (getZone() != ZONE_PVP || !Player::lastHitIsPlayer(lastHitCreature)) {
return Creature::dropCorpse(lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
setDropLoot(true);
return false;
}
Item* Player::getCorpse(Creature* lastHitCreature, Creature* mostDamageCreature)
{
Item* corpse = Creature::getCorpse(lastHitCreature, mostDamageCreature);
if (corpse && corpse->getContainer()) {
std::unordered_map<std::string, uint16_t> names;
for (const auto& killer : getKillers()) {
++names[killer->getName()];
}
if (lastHitCreature) {
if (!mostDamageCreature) {
corpse->setSpecialDescription(fmt::format("You recognize {:s}. {:s} was killed by {:s}{:s}", getNameDescription(), getSex() == PLAYERSEX_FEMALE ? "She" : "He", lastHitCreature->getNameDescription(), names.size() > 1 ? " and others." : "."));
} else if (lastHitCreature != mostDamageCreature && names[lastHitCreature->getName()] == 1) {
corpse->setSpecialDescription(fmt::format("You recognize {:s}. {:s} was killed by {:s}, {:s}{:s}", getNameDescription(), getSex() == PLAYERSEX_FEMALE ? "She" : "He", mostDamageCreature->getNameDescription(), lastHitCreature->getNameDescription(), names.size() > 2 ? " and others." : "."));
} else {
corpse->setSpecialDescription(fmt::format("You recognize {:s}. {:s} was killed by {:s} and others.", getNameDescription(), getSex() == PLAYERSEX_FEMALE ? "She" : "He", mostDamageCreature->getNameDescription()));
}
} else if (mostDamageCreature) {
if (names.size() > 1) {
corpse->setSpecialDescription(fmt::format("You recognize {:s}. {:s} was killed by something evil, {:s}, and others", getNameDescription(), getSex() == PLAYERSEX_FEMALE ? "She" : "He", mostDamageCreature->getNameDescription()));
} else {
corpse->setSpecialDescription(fmt::format("You recognize {:s}. {:s} was killed by something evil and others", getNameDescription(), getSex() == PLAYERSEX_FEMALE ? "She" : "He", mostDamageCreature->getNameDescription()));
}
} else {
corpse->setSpecialDescription(fmt::format("You recognize {:s}. {:s} was killed by something evil {:s}", getNameDescription(), getSex() == PLAYERSEX_FEMALE ? "She" : "He", names.size() ? " and others." : "."));
}
}
return corpse;
}
void Player::addInFightTicks(bool pzlock /*= false*/)
{
if (hasFlag(PlayerFlag_NotGainInFight)) {
return;
}
if (pzlock) {
pzLocked = true;
}
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::PZ_LOCKED), 0);
addCondition(condition);
}
void Player::removeList()
{
g_game.removePlayer(this);
for (const auto& it : g_game.getPlayers()) {
it.second->notifyStatusChange(this, VIPSTATUS_OFFLINE);
}
}
void Player::addList()
{
for (const auto& it : g_game.getPlayers()) {
it.second->notifyStatusChange(this, VIPSTATUS_ONLINE);
}
g_game.addPlayer(this);
}
void Player::kickPlayer(bool displayEffect)
{
g_creatureEvents->playerLogout(this);
if (client) {
client->logout(displayEffect, true);
} else {
g_game.removeCreature(this);
}
}
void Player::notifyStatusChange(Player* loginPlayer, VipStatus_t status)
{
if (!client) {
return;
}
auto it = VIPList.find(loginPlayer->guid);
if (it == VIPList.end()) {
return;
}
client->sendUpdatedVIPStatus(loginPlayer->guid, status);
if (status == VIPSTATUS_ONLINE) {
client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged in."));
} else if (status == VIPSTATUS_OFFLINE) {
client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged out."));
}
}
bool Player::removeVIP(uint32_t vipGuid)
{
if (VIPList.erase(vipGuid) == 0) {
return false;
}
IOLoginData::removeVIPEntry(accountNumber, vipGuid);
return true;
}
bool Player::addVIP(uint32_t vipGuid, const std::string& vipName, VipStatus_t status)
{
if (VIPList.size() >= getMaxVIPEntries()) {
sendTextMessage(MESSAGE_STATUS_SMALL, "You cannot add more buddies.");
return false;
}
auto result = VIPList.insert(vipGuid);
if (!result.second) {
sendTextMessage(MESSAGE_STATUS_SMALL, "This player is already in your list.");
return false;
}
IOLoginData::addVIPEntry(accountNumber, vipGuid, "", 0, false);
if (client) {
client->sendVIP(vipGuid, vipName, "", 0, false, status);
}
return true;
}
bool Player::addVIPInternal(uint32_t vipGuid)
{
if (VIPList.size() >= getMaxVIPEntries()) {
return false;
}
return VIPList.insert(vipGuid).second;
}
bool Player::editVIP(uint32_t vipGuid, const std::string& description, uint32_t icon, bool notify)
{
auto it = VIPList.find(vipGuid);
if (it == VIPList.end()) {
return false; // player is not in VIP
}
IOLoginData::editVIPEntry(accountNumber, vipGuid, description, icon, notify);
return true;
}
//close container and its child containers
void Player::autoCloseContainers(const Container* container)
{
std::vector<uint32_t> closeList;
for (const auto& it : openContainers) {
Container* tmpContainer = it.second.container;
while (tmpContainer) {
if (tmpContainer->isRemoved() || tmpContainer == container) {
closeList.push_back(it.first);
break;
}
tmpContainer = dynamic_cast<Container*>(tmpContainer->getParent());
}
}
for (uint32_t containerId : closeList) {
closeContainer(containerId);
if (client) {
client->sendCloseContainer(containerId);
}
}
}
bool Player::hasCapacity(const Item* item, uint32_t count) const
{
if (hasFlag(PlayerFlag_CannotPickupItem)) {
return false;
}
if (hasFlag(PlayerFlag_HasInfiniteCapacity) || item->getTopParent() == this) {
return true;
}
uint32_t itemWeight = item->getContainer() != nullptr ? item->getWeight() : item->getBaseWeight();
if (item->isStackable()) {
itemWeight *= count;
}
return itemWeight <= getFreeCapacity();
}
ReturnValue Player::queryAdd(int32_t index, const Thing& thing, uint32_t count, uint32_t flags, Creature*) const
{
const Item* item = thing.getItem();
if (item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
bool childIsOwner = hasBitSet(FLAG_CHILDISOWNER, flags);
if (childIsOwner) {
//a child container is querying the player, just check if enough capacity
bool skipLimit = hasBitSet(FLAG_NOLIMIT, flags);
if (skipLimit || hasCapacity(item, count)) {
return RETURNVALUE_NOERROR;
}
return RETURNVALUE_NOTENOUGHCAPACITY;
}
if (!item->isPickupable()) {
return RETURNVALUE_CANNOTPICKUP;
}
if (item->isStoreItem()) {
return RETURNVALUE_ITEMCANNOTBEMOVEDTHERE;
}
ReturnValue ret = RETURNVALUE_NOERROR;
const int32_t& slotPosition = item->getSlotPosition();
if ((slotPosition & SLOTP_HEAD) || (slotPosition & SLOTP_NECKLACE) ||
(slotPosition & SLOTP_BACKPACK) || (slotPosition & SLOTP_ARMOR) ||
(slotPosition & SLOTP_LEGS) || (slotPosition & SLOTP_FEET) ||
(slotPosition & SLOTP_RING)) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else if (slotPosition & SLOTP_TWO_HAND) {
ret = RETURNVALUE_PUTTHISOBJECTINBOTHHANDS;
} else if ((slotPosition & SLOTP_RIGHT) || (slotPosition & SLOTP_LEFT)) {
if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else {
ret = RETURNVALUE_PUTTHISOBJECTINYOURHAND;
}
}
switch (index) {
case CONST_SLOT_HEAD: {
if (slotPosition & SLOTP_HEAD) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_NECKLACE: {
if (slotPosition & SLOTP_NECKLACE) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_BACKPACK: {
if (slotPosition & SLOTP_BACKPACK) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_ARMOR: {
if (slotPosition & SLOTP_ARMOR) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_RIGHT: {
if (slotPosition & SLOTP_RIGHT) {
if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
if (item->getWeaponType() != WEAPON_SHIELD) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else {
const Item* leftItem = inventory[CONST_SLOT_LEFT];
if (leftItem) {
if ((leftItem->getSlotPosition() | slotPosition) & SLOTP_TWO_HAND) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else {
ret = RETURNVALUE_NOERROR;
}
}
} else if (slotPosition & SLOTP_TWO_HAND) {
if (inventory[CONST_SLOT_LEFT] && inventory[CONST_SLOT_LEFT] != item) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else if (inventory[CONST_SLOT_LEFT]) {
const Item* leftItem = inventory[CONST_SLOT_LEFT];
WeaponType_t type = item->getWeaponType(), leftType = leftItem->getWeaponType();
if (leftItem->getSlotPosition() & SLOTP_TWO_HAND) {
ret = RETURNVALUE_DROPTWOHANDEDITEM;
} else if (item == leftItem && count == item->getItemCount()) {
ret = RETURNVALUE_NOERROR;
} else if (leftType == WEAPON_SHIELD && type == WEAPON_SHIELD) {
ret = RETURNVALUE_CANONLYUSEONESHIELD;
} else if (leftType == WEAPON_NONE || type == WEAPON_NONE ||
leftType == WEAPON_SHIELD || leftType == WEAPON_AMMO
|| type == WEAPON_SHIELD || type == WEAPON_AMMO) {
ret = RETURNVALUE_NOERROR;
} else {
ret = RETURNVALUE_CANONLYUSEONEWEAPON;
}
} else {
ret = RETURNVALUE_NOERROR;
}
}
break;
}
case CONST_SLOT_LEFT: {
if (slotPosition & SLOTP_LEFT) {
if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
WeaponType_t type = item->getWeaponType();
if (type == WEAPON_NONE || type == WEAPON_SHIELD) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else if (inventory[CONST_SLOT_RIGHT] && (slotPosition & SLOTP_TWO_HAND)) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else if (slotPosition & SLOTP_TWO_HAND) {
if (inventory[CONST_SLOT_RIGHT] && inventory[CONST_SLOT_RIGHT] != item) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else if (inventory[CONST_SLOT_RIGHT]) {
const Item* rightItem = inventory[CONST_SLOT_RIGHT];
WeaponType_t type = item->getWeaponType(), rightType = rightItem->getWeaponType();
if (rightItem->getSlotPosition() & SLOTP_TWO_HAND) {
ret = RETURNVALUE_DROPTWOHANDEDITEM;
} else if (item == rightItem && count == item->getItemCount()) {
ret = RETURNVALUE_NOERROR;
} else if (rightType == WEAPON_SHIELD && type == WEAPON_SHIELD) {
ret = RETURNVALUE_CANONLYUSEONESHIELD;
} else if (rightType == WEAPON_NONE || type == WEAPON_NONE ||
rightType == WEAPON_SHIELD || rightType == WEAPON_AMMO
|| type == WEAPON_SHIELD || type == WEAPON_AMMO) {
ret = RETURNVALUE_NOERROR;
} else {
ret = RETURNVALUE_CANONLYUSEONEWEAPON;
}
} else {
ret = RETURNVALUE_NOERROR;
}
}
break;
}
case CONST_SLOT_LEGS: {
if (slotPosition & SLOTP_LEGS) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_FEET: {
if (slotPosition & SLOTP_FEET) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_RING: {
if (slotPosition & SLOTP_RING) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_AMMO: {
if ((slotPosition & SLOTP_AMMO) || g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_WHEREEVER:
case -1:
ret = RETURNVALUE_NOTENOUGHROOM;
break;
default:
ret = RETURNVALUE_NOTPOSSIBLE;
break;
}
if (ret != RETURNVALUE_NOERROR && ret != RETURNVALUE_NOTENOUGHROOM) {
return ret;
}
//check if enough capacity
if (!hasCapacity(item, count)) {
return RETURNVALUE_NOTENOUGHCAPACITY;
}
ret = g_moveEvents->onPlayerEquip(const_cast<Player*>(this), const_cast<Item*>(item), static_cast<slots_t>(index), true);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
//need an exchange with source? (destination item is swapped with currently moved item)
const Item* inventoryItem = getInventoryItem(static_cast<slots_t>(index));
if (inventoryItem && (!inventoryItem->isStackable() || inventoryItem->getID() != item->getID())) {
const Cylinder* cylinder = item->getTopParent();
if (cylinder && (dynamic_cast<const DepotChest*>(cylinder) || dynamic_cast<const Player*>(cylinder))) {
return RETURNVALUE_NEEDEXCHANGE;
}
return RETURNVALUE_NOTENOUGHROOM;
}
return ret;
}
ReturnValue Player::queryMaxCount(int32_t index, const Thing& thing, uint32_t count, uint32_t& maxQueryCount,
uint32_t flags) const
{
const Item* item = thing.getItem();
if (item == nullptr) {
maxQueryCount = 0;
return RETURNVALUE_NOTPOSSIBLE;
}
if (index == INDEX_WHEREEVER) {
uint32_t n = 0;
for (int32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) {
Item* inventoryItem = inventory[slotIndex];
if (inventoryItem) {
if (Container* subContainer = inventoryItem->getContainer()) {
uint32_t queryCount = 0;
subContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags);
n += queryCount;
//iterate through all items, including sub-containers (deep search)
for (ContainerIterator it = subContainer->iterator(); it.hasNext(); it.advance()) {
if (Container* tmpContainer = (*it)->getContainer()) {
queryCount = 0;
tmpContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags);
n += queryCount;
}
}
} else if (inventoryItem->isStackable() && item->equals(inventoryItem) && inventoryItem->getItemCount() < 100) {
uint32_t remainder = (100 - inventoryItem->getItemCount());
if (queryAdd(slotIndex, *item, remainder, flags) == RETURNVALUE_NOERROR) {
n += remainder;
}
}
} else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot
if (item->isStackable()) {
n += 100;
} else {
++n;
}
}
}
maxQueryCount = n;
} else {
const Item* destItem = nullptr;
const Thing* destThing = getThing(index);
if (destThing) {
destItem = destThing->getItem();
}
if (destItem) {
if (destItem->isStackable() && item->equals(destItem) && destItem->getItemCount() < 100) {
maxQueryCount = 100 - destItem->getItemCount();
} else {
maxQueryCount = 0;
}
} else if (queryAdd(index, *item, count, flags) == RETURNVALUE_NOERROR) { //empty slot
if (item->isStackable()) {
maxQueryCount = 100;
} else {
maxQueryCount = 1;
}
return RETURNVALUE_NOERROR;
}
}
if (maxQueryCount < count) {
return RETURNVALUE_NOTENOUGHROOM;
}
return RETURNVALUE_NOERROR;
}
ReturnValue Player::queryRemove(const Thing& thing, uint32_t count, uint32_t flags, Creature* /*= nullptr*/) const
{
int32_t index = getThingIndex(&thing);
if (index == -1) {
return RETURNVALUE_NOTPOSSIBLE;
}
const Item* item = thing.getItem();
if (item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (count == 0 || (item->isStackable() && count > item->getItemCount())) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!item->isMoveable() && !hasBitSet(FLAG_IGNORENOTMOVEABLE, flags)) {
return RETURNVALUE_NOTMOVEABLE;
}
return RETURNVALUE_NOERROR;
}
Cylinder* Player::queryDestination(int32_t& index, const Thing& thing, Item** destItem,
uint32_t& flags)
{
if (index == 0 /*drop to capacity window*/ || index == INDEX_WHEREEVER) {
*destItem = nullptr;
const Item* item = thing.getItem();
if (item == nullptr) {
return this;
}
bool autoStack = !((flags & FLAG_IGNOREAUTOSTACK) == FLAG_IGNOREAUTOSTACK);
bool isStackable = item->isStackable();
std::vector<Container*> containers;
for (uint32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) {
Item* inventoryItem = inventory[slotIndex];
if (inventoryItem) {
if (inventoryItem == tradeItem) {
continue;
}
if (inventoryItem == item) {
continue;
}
if (autoStack && isStackable) {
//try find an already existing item to stack with
if (queryAdd(slotIndex, *item, item->getItemCount(), 0) == RETURNVALUE_NOERROR) {
if (inventoryItem->equals(item) && inventoryItem->getItemCount() < 100) {
index = slotIndex;
*destItem = inventoryItem;
return this;
}
}
if (Container* subContainer = inventoryItem->getContainer()) {
containers.push_back(subContainer);
}
} else if (Container* subContainer = inventoryItem->getContainer()) {
containers.push_back(subContainer);
}
} else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot
index = slotIndex;
*destItem = nullptr;
return this;
}
}
size_t i = 0;
while (i < containers.size()) {
Container* tmpContainer = containers[i++];
if (!autoStack || !isStackable) {
//we need to find first empty container as fast as we can for non-stackable items
uint32_t n = tmpContainer->capacity() - std::min(tmpContainer->capacity(), static_cast<uint32_t>(tmpContainer->size()));
while (n) {
if (tmpContainer->queryAdd(tmpContainer->capacity() - n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) {
index = tmpContainer->capacity() - n;
*destItem = nullptr;
return tmpContainer;
}
--n;
}
for (Item* tmpContainerItem : tmpContainer->getItemList()) {
if (Container* subContainer = tmpContainerItem->getContainer()) {
containers.push_back(subContainer);
}
}
continue;
}
uint32_t n = 0;
for (Item* tmpItem : tmpContainer->getItemList()) {
if (tmpItem == tradeItem) {
continue;
}
if (tmpItem == item) {
continue;
}
//try find an already existing item to stack with
if (tmpItem->equals(item) && tmpItem->getItemCount() < 100) {
index = n;
*destItem = tmpItem;
return tmpContainer;
}
if (Container* subContainer = tmpItem->getContainer()) {
containers.push_back(subContainer);
}
n++;
}
if (n < tmpContainer->capacity() && tmpContainer->queryAdd(n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) {
index = n;
*destItem = nullptr;
return tmpContainer;
}
}
return this;
}
Thing* destThing = getThing(index);
if (destThing) {
*destItem = destThing->getItem();
}
Cylinder* subCylinder = dynamic_cast<Cylinder*>(destThing);
if (subCylinder) {
index = INDEX_WHEREEVER;
*destItem = nullptr;
return subCylinder;
}
return this;
}
void Player::addThing(int32_t index, Thing* thing)
{
if (index < CONST_SLOT_FIRST || index > CONST_SLOT_LAST) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
item->setParent(this);
inventory[index] = item;
//send to client
sendInventoryItem(static_cast<slots_t>(index), item);
}
void Player::updateThing(Thing* thing, uint16_t itemId, uint32_t count)
{
int32_t index = getThingIndex(thing);
if (index == -1) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
item->setID(itemId);
item->setSubType(count);
//send to client
sendInventoryItem(static_cast<slots_t>(index), item);
//event methods
onUpdateInventoryItem(item, item);
}
void Player::replaceThing(uint32_t index, Thing* thing)
{
if (index > CONST_SLOT_LAST) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* oldItem = getInventoryItem(static_cast<slots_t>(index));
if (!oldItem) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
//send to client
sendInventoryItem(static_cast<slots_t>(index), item);
//event methods
onUpdateInventoryItem(oldItem, item);
item->setParent(this);
inventory[index] = item;
}
void Player::removeThing(Thing* thing, uint32_t count)
{
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
int32_t index = getThingIndex(thing);
if (index == -1) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
if (item->isStackable()) {
if (count == item->getItemCount()) {
//send change to client
sendInventoryItem(static_cast<slots_t>(index), nullptr);
//event methods
onRemoveInventoryItem(item);
item->setParent(nullptr);
inventory[index] = nullptr;
} else {
uint8_t newCount = static_cast<uint8_t>(std::max<int32_t>(0, item->getItemCount() - count));
item->setItemCount(newCount);
//send change to client
sendInventoryItem(static_cast<slots_t>(index), item);
//event methods
onUpdateInventoryItem(item, item);
}
} else {
//send change to client
sendInventoryItem(static_cast<slots_t>(index), nullptr);
//event methods
onRemoveInventoryItem(item);
item->setParent(nullptr);
inventory[index] = nullptr;
}
}
int32_t Player::getThingIndex(const Thing* thing) const
{
for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
if (inventory[i] == thing) {
return i;
}
}
return -1;
}
size_t Player::getFirstIndex() const
{
return CONST_SLOT_FIRST;
}
size_t Player::getLastIndex() const
{
return CONST_SLOT_LAST + 1;
}
uint32_t Player::getItemTypeCount(uint16_t itemId, int32_t subType /*= -1*/) const
{
uint32_t count = 0;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) {
Item* item = inventory[i];
if (!item) {
continue;
}
if (item->getID() == itemId) {
count += Item::countByType(item, subType);
}
if (Container* container = item->getContainer()) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if ((*it)->getID() == itemId) {
count += Item::countByType(*it, subType);
}
}
}
}
return count;
}
bool Player::removeItemOfType(uint16_t itemId, uint32_t amount, int32_t subType, bool ignoreEquipped/* = false*/) const
{
if (amount == 0) {
return true;
}
std::vector<Item*> itemList;
uint32_t count = 0;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) {
Item* item = inventory[i];
if (!item) {
continue;
}
if (!ignoreEquipped && item->getID() == itemId) {
uint32_t itemCount = Item::countByType(item, subType);
if (itemCount == 0) {
continue;
}
itemList.push_back(item);
count += itemCount;
if (count >= amount) {
g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable);
return true;
}
} else if (Container* container = item->getContainer()) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
Item* containerItem = *it;
if (containerItem->getID() == itemId) {
uint32_t itemCount = Item::countByType(containerItem, subType);
if (itemCount == 0) {
continue;
}
itemList.push_back(containerItem);
count += itemCount;
if (count >= amount) {
g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable);
return true;
}
}
}
}
}
return false;
}
std::map<uint32_t, uint32_t>& Player::getAllItemTypeCount(std::map<uint32_t, uint32_t>& countMap) const
{
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) {
Item* item = inventory[i];
if (!item) {
continue;
}
countMap[item->getID()] += Item::countByType(item, -1);
if (Container* container = item->getContainer()) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
countMap[(*it)->getID()] += Item::countByType(*it, -1);
}
}
}
return countMap;
}
Thing* Player::getThing(size_t index) const
{
if (index >= CONST_SLOT_FIRST && index <= CONST_SLOT_LAST) {
return inventory[index];
}
return nullptr;
}
void Player::postAddNotification(Thing* thing, const Cylinder* oldParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/)
{
if (link == LINK_OWNER) {
//calling movement scripts
g_moveEvents->onPlayerEquip(this, thing->getItem(), static_cast<slots_t>(index), false);
}
bool requireListUpdate = false;
if (link == LINK_OWNER || link == LINK_TOPPARENT) {
const Item* i = (oldParent ? oldParent->getItem() : nullptr);
// Check if we owned the old container too, so we don't need to do anything,
// as the list was updated in postRemoveNotification
assert(i ? i->getContainer() != nullptr : true);
if (i) {
requireListUpdate = i->getContainer()->getHoldingPlayer() != this;
} else {
requireListUpdate = oldParent != this;
}
updateInventoryWeight();
updateItemsLight();
sendStats();
}
if (const Item* item = thing->getItem()) {
if (const Container* container = item->getContainer()) {
onSendContainer(container);
}
if (shopOwner && requireListUpdate) {
updateSaleShopList(item);
}
} else if (const Creature* creature = thing->getCreature()) {
if (creature == this) {
//check containers
std::vector<Container*> containers;
for (const auto& it : openContainers) {
Container* container = it.second.container;
if (!Position::areInRange<1, 1, 0>(container->getPosition(), getPosition())) {
containers.push_back(container);
}
}
for (const Container* container : containers) {
autoCloseContainers(container);
}
}
}
}
void Player::postRemoveNotification(Thing* thing, const Cylinder* newParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/)
{
if (link == LINK_OWNER) {
//calling movement scripts
g_moveEvents->onPlayerDeEquip(this, thing->getItem(), static_cast<slots_t>(index));
}
bool requireListUpdate = false;
if (link == LINK_OWNER || link == LINK_TOPPARENT) {
const Item* i = (newParent ? newParent->getItem() : nullptr);
// Check if we owned the old container too, so we don't need to do anything,
// as the list was updated in postRemoveNotification
assert(i ? i->getContainer() != nullptr : true);
if (i) {
requireListUpdate = i->getContainer()->getHoldingPlayer() != this;
} else {
requireListUpdate = newParent != this;
}
updateInventoryWeight();
updateItemsLight();
sendStats();
}
if (const Item* item = thing->getItem()) {
if (const Container* container = item->getContainer()) {
if (container->isRemoved() || !Position::areInRange<1, 1, 0>(getPosition(), container->getPosition())) {
autoCloseContainers(container);
} else if (container->getTopParent() == this) {
onSendContainer(container);
} else if (const Container* topContainer = dynamic_cast<const Container*>(container->getTopParent())) {
if (const DepotChest* depotChest = dynamic_cast<const DepotChest*>(topContainer)) {
bool isOwner = false;
for (const auto& it : depotChests) {
if (it.second == depotChest) {
isOwner = true;
onSendContainer(container);
}
}
if (!isOwner) {
autoCloseContainers(container);
}
} else {
onSendContainer(container);
}
} else {
autoCloseContainers(container);
}
}
if (shopOwner && requireListUpdate) {
updateSaleShopList(item);
}
}
}
bool Player::updateSaleShopList(const Item* item)
{
uint16_t itemId = item->getID();
if (itemId != ITEM_GOLD_COIN && itemId != ITEM_PLATINUM_COIN && itemId != ITEM_CRYSTAL_COIN) {
auto it = std::find_if(shopItemList.begin(), shopItemList.end(), [itemId](const ShopInfo& shopInfo) { return shopInfo.itemId == itemId && shopInfo.sellPrice != 0; });
if (it == shopItemList.end()) {
const Container* container = item->getContainer();
if (!container) {
return false;
}
const auto& items = container->getItemList();
return std::any_of(items.begin(), items.end(), [this](const Item* containerItem) {
return updateSaleShopList(containerItem);
});
}
}
if (client) {
client->sendSaleItemList(shopItemList);
}
return true;
}
bool Player::hasShopItemForSale(uint32_t itemId, uint8_t subType) const
{
const ItemType& itemType = Item::items[itemId];
return std::any_of(shopItemList.begin(), shopItemList.end(), [&](const ShopInfo& shopInfo) {
return shopInfo.itemId == itemId && shopInfo.buyPrice != 0 && (!itemType.isFluidContainer() || shopInfo.subType == subType);
});
}
void Player::internalAddThing(Thing* thing)
{
internalAddThing(0, thing);
}
void Player::internalAddThing(uint32_t index, Thing* thing)
{
Item* item = thing->getItem();
if (!item) {
return;
}
//index == 0 means we should equip this item at the most appropriate slot (no action required here)
if (index > CONST_SLOT_WHEREEVER && index <= CONST_SLOT_LAST) {
if (inventory[index]) {
return;
}
inventory[index] = item;
item->setParent(this);
}
}
bool Player::setFollowCreature(Creature* creature)
{
if (!Creature::setFollowCreature(creature)) {
setFollowCreature(nullptr);
setAttackedCreature(nullptr);
sendCancelMessage(RETURNVALUE_THEREISNOWAY);
sendCancelTarget();
stopWalk();
return false;
}
return true;
}
bool Player::setAttackedCreature(Creature* creature)
{
if (!Creature::setAttackedCreature(creature)) {
sendCancelTarget();
return false;
}
if (chaseMode && creature) {
if (followCreature != creature) {
//chase opponent
setFollowCreature(creature);
}
} else if (followCreature) {
setFollowCreature(nullptr);
}
if (creature) {
g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID())));
}
return true;
}
void Player::goToFollowCreature()
{
if (!walkTask) {
if ((OTSYS_TIME() - lastFailedFollow) < 2000) {
return;
}
Creature::goToFollowCreature();
if (followCreature && !hasFollowPath) {
lastFailedFollow = OTSYS_TIME();
}
}
}
void Player::getPathSearchParams(const Creature* creature, FindPathParams& fpp) const
{
Creature::getPathSearchParams(creature, fpp);
fpp.fullPathSearch = true;
}
void Player::doAttacking(uint32_t)
{
if (lastAttack == 0) {
lastAttack = OTSYS_TIME() - getAttackSpeed() - 1;
}
if (hasCondition(CONDITION_PACIFIED)) {
return;
}
if ((OTSYS_TIME() - lastAttack) >= getAttackSpeed()) {
bool result = false;
Item* tool = getWeapon();
const Weapon* weapon = g_weapons->getWeapon(tool);
uint32_t delay = getAttackSpeed();
bool classicSpeed = g_config.getBoolean(ConfigManager::CLASSIC_ATTACK_SPEED);
if (weapon) {
if (!weapon->interruptSwing()) {
result = weapon->useWeapon(this, tool, attackedCreature);
} else if (!classicSpeed && !canDoAction()) {
delay = getNextActionTime();
} else {
result = weapon->useWeapon(this, tool, attackedCreature);
}
} else {
result = Weapon::useFist(this, attackedCreature);
}
SchedulerTask* task = createSchedulerTask(std::max<uint32_t>(SCHEDULER_MINTICKS, delay), std::bind(&Game::checkCreatureAttack, &g_game, getID()));
if (!classicSpeed) {
setNextActionTask(task, false);
} else {
g_scheduler.addEvent(task);
}
if (result) {
lastAttack = OTSYS_TIME();
}
}
}
uint64_t Player::getGainedExperience(Creature* attacker) const
{
if (g_config.getBoolean(ConfigManager::EXPERIENCE_FROM_PLAYERS)) {
Player* attackerPlayer = attacker->getPlayer();
if (attackerPlayer && attackerPlayer != this && skillLoss && std::abs(static_cast<int32_t>(attackerPlayer->getLevel() - level)) <= g_config.getNumber(ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE)) {
return std::max<uint64_t>(0, std::floor(getLostExperience() * getDamageRatio(attacker) * 0.75));
}
}
return 0;
}
void Player::onFollowCreature(const Creature* creature)
{
if (!creature) {
stopWalk();
}
}
void Player::setChaseMode(bool mode)
{
bool prevChaseMode = chaseMode;
chaseMode = mode;
if (prevChaseMode != chaseMode) {
if (chaseMode) {
if (!followCreature && attackedCreature) {
//chase opponent
setFollowCreature(attackedCreature);
}
} else if (attackedCreature) {
setFollowCreature(nullptr);
cancelNextWalk = true;
}
}
}
void Player::onWalkAborted()
{
setNextWalkActionTask(nullptr);
sendCancelWalk();
}
void Player::onWalkComplete()
{
if (walkTask) {
walkTaskEvent = g_scheduler.addEvent(walkTask);
walkTask = nullptr;
}
}
void Player::stopWalk()
{
cancelNextWalk = true;
}
LightInfo Player::getCreatureLight() const
{
if (internalLight.level > itemsLight.level) {
return internalLight;
}
return itemsLight;
}
void Player::updateItemsLight(bool internal /*=false*/)
{
LightInfo maxLight;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
Item* item = inventory[i];
if (item) {
LightInfo curLight = item->getLightInfo();
if (curLight.level > maxLight.level) {
maxLight = std::move(curLight);
}
}
}
if (itemsLight.level != maxLight.level || itemsLight.color != maxLight.color) {
itemsLight = maxLight;
if (!internal) {
g_game.changeLight(this);
}
}
}
void Player::onAddCondition(ConditionType_t type)
{
Creature::onAddCondition(type);
if (type == CONDITION_OUTFIT && isMounted()) {
dismount();
}
sendIcons();
}
void Player::onAddCombatCondition(ConditionType_t type)
{
switch (type) {
case CONDITION_POISON:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are poisoned.");
break;
case CONDITION_DROWN:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drowning.");
break;
case CONDITION_PARALYZE:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are paralyzed.");
break;
case CONDITION_DRUNK:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drunk.");
break;
case CONDITION_CURSED:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are cursed.");
break;
case CONDITION_FREEZING:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are freezing.");
break;
case CONDITION_DAZZLED:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are dazzled.");
break;
case CONDITION_BLEEDING:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are bleeding.");
break;
default:
break;
}
}
void Player::onEndCondition(ConditionType_t type)
{
Creature::onEndCondition(type);
if (type == CONDITION_INFIGHT) {
onIdleStatus();
pzLocked = false;
clearAttacked();
if (getSkull() != SKULL_RED && getSkull() != SKULL_BLACK) {
setSkull(SKULL_NONE);
}
}
sendIcons();
}
void Player::onCombatRemoveCondition(Condition* condition)
{
//Creature::onCombatRemoveCondition(condition);
if (condition->getId() > 0) {
//Means the condition is from an item, id == slot
if (g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) {
Item* item = getInventoryItem(static_cast<slots_t>(condition->getId()));
if (item) {
//25% chance to destroy the item
if (25 >= uniform_random(1, 100)) {
g_game.internalRemoveItem(item);
}
}
}
} else {
if (!canDoAction()) {
const uint32_t delay = getNextActionTime();
const int32_t ticks = delay - (delay % EVENT_CREATURE_THINK_INTERVAL);
if (ticks < 0) {
removeCondition(condition);
} else {
condition->setTicks(ticks);
}
} else {
removeCondition(condition);
}
}
}
void Player::onAttackedCreature(Creature* target, bool addFightTicks /* = true */)
{
Creature::onAttackedCreature(target);
if (target->getZone() == ZONE_PVP) {
return;
}
if (target == this) {
if (addFightTicks) {
addInFightTicks();
}
return;
}
if (hasFlag(PlayerFlag_NotGainInFight)) {
return;
}
Player* targetPlayer = target->getPlayer();
if (targetPlayer && !isPartner(targetPlayer) && !isGuildMate(targetPlayer)) {
if (!pzLocked && g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) {
pzLocked = true;
sendIcons();
}
targetPlayer->addInFightTicks();
if (getSkull() == SKULL_NONE && getSkullClient(targetPlayer) == SKULL_YELLOW) {
addAttacked(targetPlayer);
targetPlayer->sendCreatureSkull(this);
} else if (!targetPlayer->hasAttacked(this)) {
if (!pzLocked) {
pzLocked = true;
sendIcons();
}
if (!Combat::isInPvpZone(this, targetPlayer) && !isInWar(targetPlayer)) {
addAttacked(targetPlayer);
if (targetPlayer->getSkull() == SKULL_NONE && getSkull() == SKULL_NONE) {
setSkull(SKULL_WHITE);
}
if (getSkull() == SKULL_NONE) {
targetPlayer->sendCreatureSkull(this);
}
}
}
}
if (addFightTicks) {
addInFightTicks();
}
}
void Player::onAttacked()
{
Creature::onAttacked();
addInFightTicks();
}
void Player::onIdleStatus()
{
Creature::onIdleStatus();
if (party) {
party->clearPlayerPoints(this);
}
}
void Player::onPlacedCreature()
{
//scripting event - onLogin
if (!g_creatureEvents->playerLogin(this)) {
kickPlayer(true);
}
}
void Player::onAttackedCreatureDrainHealth(Creature* target, int32_t points)
{
Creature::onAttackedCreatureDrainHealth(target, points);
if (target) {
if (party && !Combat::isPlayerCombat(target)) {
Monster* tmpMonster = target->getMonster();
if (tmpMonster && tmpMonster->isHostile()) {
//We have fulfilled a requirement for shared experience
party->updatePlayerTicks(this, points);
}
}
}
}
void Player::onTargetCreatureGainHealth(Creature* target, int32_t points)
{
if (target && party) {
Player* tmpPlayer = nullptr;
if (target->getPlayer()) {
tmpPlayer = target->getPlayer();
} else if (Creature* targetMaster = target->getMaster()) {
if (Player* targetMasterPlayer = targetMaster->getPlayer()) {
tmpPlayer = targetMasterPlayer;
}
}
if (isPartner(tmpPlayer)) {
party->updatePlayerTicks(this, points);
}
}
}
bool Player::onKilledCreature(Creature* target, bool lastHit/* = true*/)
{
bool unjustified = false;
if (hasFlag(PlayerFlag_NotGenerateLoot)) {
target->setDropLoot(false);
}
Creature::onKilledCreature(target, lastHit);
Player* targetPlayer = target->getPlayer();
if (!targetPlayer) {
return false;
}
if (targetPlayer->getZone() == ZONE_PVP) {
targetPlayer->setDropLoot(false);
targetPlayer->setSkillLoss(false);
} else if (!hasFlag(PlayerFlag_NotGainInFight) && !isPartner(targetPlayer)) {
if (!Combat::isInPvpZone(this, targetPlayer) && hasAttacked(targetPlayer) && !targetPlayer->hasAttacked(this) && !isGuildMate(targetPlayer) && targetPlayer != this) {
if (targetPlayer->getSkull() == SKULL_NONE && !isInWar(targetPlayer)) {
unjustified = true;
addUnjustifiedDead(targetPlayer);
}
if (lastHit && hasCondition(CONDITION_INFIGHT)) {
pzLocked = true;
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::WHITE_SKULL_TIME) * 1000, 0);
addCondition(condition);
}
}
}
return unjustified;
}
void Player::gainExperience(uint64_t gainExp, Creature* source)
{
if (hasFlag(PlayerFlag_NotGainExperience) || gainExp == 0 || staminaMinutes == 0) {
return;
}
addExperience(source, gainExp, true);
}
void Player::onGainExperience(uint64_t gainExp, Creature* target)
{
if (hasFlag(PlayerFlag_NotGainExperience)) {
return;
}
if (target && !target->getPlayer() && party && party->isSharedExperienceActive() && party->isSharedExperienceEnabled()) {
party->shareExperience(gainExp, target);
//We will get a share of the experience through the sharing mechanism
return;
}
Creature::onGainExperience(gainExp, target);
gainExperience(gainExp, target);
}
void Player::onGainSharedExperience(uint64_t gainExp, Creature* source)
{
gainExperience(gainExp, source);
}
bool Player::isImmune(CombatType_t type) const
{
if (hasFlag(PlayerFlag_CannotBeAttacked)) {
return true;
}
return Creature::isImmune(type);
}
bool Player::isImmune(ConditionType_t type) const
{
if (hasFlag(PlayerFlag_CannotBeAttacked)) {
return true;
}
return Creature::isImmune(type);
}
bool Player::isAttackable() const
{
return !hasFlag(PlayerFlag_CannotBeAttacked);
}
bool Player::lastHitIsPlayer(Creature* lastHitCreature)
{
if (!lastHitCreature) {
return false;
}
if (lastHitCreature->getPlayer()) {
return true;
}
Creature* lastHitMaster = lastHitCreature->getMaster();
return lastHitMaster && lastHitMaster->getPlayer();
}
void Player::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/)
{
Creature::changeHealth(healthChange, sendHealthChange);
sendStats();
}
void Player::changeMana(int32_t manaChange)
{
if (!hasFlag(PlayerFlag_HasInfiniteMana)) {
if (manaChange > 0) {
mana += std::min<int32_t>(manaChange, getMaxMana() - mana);
} else {
mana = std::max<int32_t>(0, mana + manaChange);
}
}
sendStats();
}
void Player::changeSoul(int32_t soulChange)
{
if (soulChange > 0) {
soul += std::min<int32_t>(soulChange, vocation->getSoulMax() - soul);
} else {
soul = std::max<int32_t>(0, soul + soulChange);
}
sendStats();
}
bool Player::canWear(uint32_t lookType, uint8_t addons) const
{
if (group->access) {
return true;
}
const Outfit* outfit = Outfits::getInstance().getOutfitByLookType(sex, lookType);
if (!outfit) {
return false;
}
if (outfit->premium && !isPremium()) {
return false;
}
if (outfit->unlocked && addons == 0) {
return true;
}
for (const OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType == lookType) {
if (outfitEntry.addons == addons || outfitEntry.addons == 3 || addons == 0) {
return true;
}
return false; //have lookType on list and addons don't match
}
}
return false;
}
bool Player::hasOutfit(uint32_t lookType, uint8_t addons)
{
const Outfit* outfit = Outfits::getInstance().getOutfitByLookType(sex, lookType);
if (!outfit) {
return false;
}
if (outfit->unlocked && addons == 0) {
return true;
}
for (const OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType == lookType) {
if (outfitEntry.addons == addons || outfitEntry.addons == 3 || addons == 0){
return true;
}
return false; //have lookType on list and addons don't match
}
}
return false;
}
void Player::genReservedStorageRange()
{
//generate outfits range
uint32_t base_key = PSTRG_OUTFITS_RANGE_START;
for (const OutfitEntry& entry : outfits) {
storageMap[++base_key] = (entry.lookType << 16) | entry.addons;
}
}
void Player::addOutfit(uint16_t lookType, uint8_t addons)
{
for (OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType == lookType) {
outfitEntry.addons |= addons;
return;
}
}
outfits.emplace_back(lookType, addons);
}
bool Player::removeOutfit(uint16_t lookType)
{
for (auto it = outfits.begin(), end = outfits.end(); it != end; ++it) {
OutfitEntry& entry = *it;
if (entry.lookType == lookType) {
outfits.erase(it);
return true;
}
}
return false;
}
bool Player::removeOutfitAddon(uint16_t lookType, uint8_t addons)
{
for (OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType == lookType) {
outfitEntry.addons &= ~addons;
return true;
}
}
return false;
}
bool Player::getOutfitAddons(const Outfit& outfit, uint8_t& addons) const
{
if (group->access) {
addons = 3;
return true;
}
if (outfit.premium && !isPremium()) {
return false;
}
for (const OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType != outfit.lookType) {
continue;
}
addons = outfitEntry.addons;
return true;
}
if (!outfit.unlocked) {
return false;
}
addons = 0;
return true;
}
void Player::setSex(PlayerSex_t newSex)
{
sex = newSex;
}
Skulls_t Player::getSkull() const
{
if (hasFlag(PlayerFlag_NotGainInFight)) {
return SKULL_NONE;
}
return skull;
}
Skulls_t Player::getSkullClient(const Creature* creature) const
{
if (!creature || g_game.getWorldType() != WORLD_TYPE_PVP) {
return SKULL_NONE;
}
const Player* player = creature->getPlayer();
if (!player || player->getSkull() != SKULL_NONE) {
return Creature::getSkullClient(creature);
}
if (player->hasAttacked(this)) {
return SKULL_YELLOW;
}
if (isPartner(player)) {
return SKULL_GREEN;
}
return Creature::getSkullClient(creature);
}
bool Player::hasAttacked(const Player* attacked) const
{
if (hasFlag(PlayerFlag_NotGainInFight) || !attacked) {
return false;
}
return attackedSet.find(attacked->guid) != attackedSet.end();
}
void Player::addAttacked(const Player* attacked)
{
if (hasFlag(PlayerFlag_NotGainInFight) || !attacked || attacked == this) {
return;
}
attackedSet.insert(attacked->guid);
}
void Player::removeAttacked(const Player* attacked)
{
if (!attacked || attacked == this) {
return;
}
auto it = attackedSet.find(attacked->guid);
if (it != attackedSet.end()) {
attackedSet.erase(it);
}
}
void Player::clearAttacked()
{
attackedSet.clear();
}
void Player::addUnjustifiedDead(const Player* attacked)
{
if (hasFlag(PlayerFlag_NotGainInFight) || attacked == this || g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) {
return;
}
sendTextMessage(MESSAGE_EVENT_ADVANCE, "Warning! The murder of " + attacked->getName() + " was not justified.");
skullTicks += g_config.getNumber(ConfigManager::FRAG_TIME);
if (getSkull() != SKULL_BLACK) {
if (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) {
setSkull(SKULL_BLACK);
} else if (getSkull() != SKULL_RED && g_config.getNumber(ConfigManager::KILLS_TO_RED) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_RED) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) {
setSkull(SKULL_RED);
}
}
}
void Player::checkSkullTicks(int64_t ticks)
{
int64_t newTicks = skullTicks - ticks;
if (newTicks < 0) {
skullTicks = 0;
} else {
skullTicks = newTicks;
}
if ((skull == SKULL_RED || skull == SKULL_BLACK) && skullTicks < 1 && !hasCondition(CONDITION_INFIGHT)) {
setSkull(SKULL_NONE);
}
}
bool Player::isPromoted() const
{
uint16_t promotedVocation = g_vocations.getPromotedVocation(vocation->getId());
return promotedVocation == VOCATION_NONE && vocation->getId() != promotedVocation;
}
double Player::getLostPercent() const
{
int32_t deathLosePercent = g_config.getNumber(ConfigManager::DEATH_LOSE_PERCENT);
if (deathLosePercent != -1) {
if (isPromoted()) {
deathLosePercent -= 3;
}
deathLosePercent -= blessings.count();
return std::max<int32_t>(0, deathLosePercent) / 100.;
}
double lossPercent;
if (level >= 25) {
double tmpLevel = level + (levelPercent / 100.);
lossPercent = static_cast<double>((tmpLevel + 50) * 50 * ((tmpLevel * tmpLevel) - (5 * tmpLevel) + 8)) / experience;
} else {
lossPercent = 10;
}
double percentReduction = 0;
if (isPromoted()) {
percentReduction += 30;
}
percentReduction += blessings.count() * 8;
return lossPercent * (1 - (percentReduction / 100.)) / 100.;
}
void Player::learnInstantSpell(const std::string& spellName)
{
if (!hasLearnedInstantSpell(spellName)) {
learnedInstantSpellList.push_front(spellName);
}
}
void Player::forgetInstantSpell(const std::string& spellName)
{
learnedInstantSpellList.remove(spellName);
}
bool Player::hasLearnedInstantSpell(const std::string& spellName) const
{
if (hasFlag(PlayerFlag_CannotUseSpells)) {
return false;
}
if (hasFlag(PlayerFlag_IgnoreSpellCheck)) {
return true;
}
for (const auto& learnedSpellName : learnedInstantSpellList) {
if (strcasecmp(learnedSpellName.c_str(), spellName.c_str()) == 0) {
return true;
}
}
return false;
}
bool Player::isInWar(const Player* player) const
{
if (!player || !guild) {
return false;
}
const Guild* playerGuild = player->getGuild();
if (!playerGuild) {
return false;
}
return isInWarList(playerGuild->getId()) && player->isInWarList(guild->getId());
}
bool Player::isInWarList(uint32_t guildId) const
{
return std::find(guildWarVector.begin(), guildWarVector.end(), guildId) != guildWarVector.end();
}
bool Player::isPremium() const
{
if (g_config.getBoolean(ConfigManager::FREE_PREMIUM) || hasFlag(PlayerFlag_IsAlwaysPremium)) {
return true;
}
return premiumEndsAt > time(nullptr);
}
void Player::setPremiumTime(time_t premiumEndsAt)
{
this->premiumEndsAt = premiumEndsAt;
sendBasicData();
}
PartyShields_t Player::getPartyShield(const Player* player) const
{
if (!player) {
return SHIELD_NONE;
}
if (party) {
if (party->getLeader() == player) {
if (party->isSharedExperienceActive()) {
if (party->isSharedExperienceEnabled()) {
return SHIELD_YELLOW_SHAREDEXP;
}
if (party->canUseSharedExperience(player)) {
return SHIELD_YELLOW_NOSHAREDEXP;
}
return SHIELD_YELLOW_NOSHAREDEXP_BLINK;
}
return SHIELD_YELLOW;
}
if (player->party == party) {
if (party->isSharedExperienceActive()) {
if (party->isSharedExperienceEnabled()) {
return SHIELD_BLUE_SHAREDEXP;
}
if (party->canUseSharedExperience(player)) {
return SHIELD_BLUE_NOSHAREDEXP;
}
return SHIELD_BLUE_NOSHAREDEXP_BLINK;
}
return SHIELD_BLUE;
}
if (isInviting(player)) {
return SHIELD_WHITEBLUE;
}
}
if (player->isInviting(this)) {
return SHIELD_WHITEYELLOW;
}
if (player->party) {
return SHIELD_GRAY;
}
return SHIELD_NONE;
}
bool Player::isInviting(const Player* player) const
{
if (!player || !party || party->getLeader() != this) {
return false;
}
return party->isPlayerInvited(player);
}
bool Player::isPartner(const Player* player) const
{
if (!player || !party || player == this) {
return false;
}
return party == player->party;
}
bool Player::isGuildMate(const Player* player) const
{
if (!player || !guild) {
return false;
}
return guild == player->guild;
}
void Player::sendPlayerPartyIcons(Player* player)
{
sendCreatureShield(player);
sendCreatureSkull(player);
}
bool Player::addPartyInvitation(Party* party)
{
auto it = std::find(invitePartyList.begin(), invitePartyList.end(), party);
if (it != invitePartyList.end()) {
return false;
}
invitePartyList.push_front(party);
return true;
}
void Player::removePartyInvitation(Party* party)
{
invitePartyList.remove(party);
}
void Player::clearPartyInvitations()
{
for (Party* invitingParty : invitePartyList) {
invitingParty->removeInvite(*this, false);
}
invitePartyList.clear();
}
GuildEmblems_t Player::getGuildEmblem(const Player* player) const
{
if (!player) {
return GUILDEMBLEM_NONE;
}
const Guild* playerGuild = player->getGuild();
if (!playerGuild) {
return GUILDEMBLEM_NONE;
}
if (player->getGuildWarVector().empty()) {
if (guild == playerGuild) {
return GUILDEMBLEM_MEMBER;
} else {
return GUILDEMBLEM_OTHER;
}
} else if (guild == playerGuild) {
return GUILDEMBLEM_ALLY;
} else if (isInWar(player)) {
return GUILDEMBLEM_ENEMY;
}
return GUILDEMBLEM_NEUTRAL;
}
uint8_t Player::getCurrentMount() const
{
int32_t value;
if (getStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, value)) {
return value;
}
return 0;
}
void Player::setCurrentMount(uint8_t mountId)
{
addStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, mountId);
}
bool Player::toggleMount(bool mount)
{
if ((OTSYS_TIME() - lastToggleMount) < 3000 && !wasMounted) {
sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
return false;
}
if (mount) {
if (isMounted()) {
return false;
}
if (!group->access && tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
sendCancelMessage(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE);
return false;
}
const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(getSex(), defaultOutfit.lookType);
if (!playerOutfit) {
return false;
}
uint8_t currentMountId = getCurrentMount();
if (currentMountId == 0) {
sendOutfitWindow();
return false;
}
Mount* currentMount = g_game.mounts.getMountByID(currentMountId);
if (!currentMount) {
return false;
}
if (!hasMount(currentMount)) {
setCurrentMount(0);
sendOutfitWindow();
return false;
}
if (currentMount->premium && !isPremium()) {
sendCancelMessage(RETURNVALUE_YOUNEEDPREMIUMACCOUNT);
return false;
}
if (hasCondition(CONDITION_OUTFIT)) {
sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return false;
}
defaultOutfit.lookMount = currentMount->clientId;
if (currentMount->speed != 0) {
g_game.changeSpeed(this, currentMount->speed);
}
} else {
if (!isMounted()) {
return false;
}
dismount();
}
g_game.internalCreatureChangeOutfit(this, defaultOutfit);
lastToggleMount = OTSYS_TIME();
return true;
}
bool Player::tameMount(uint8_t mountId)
{
if (!g_game.mounts.getMountByID(mountId)) {
return false;
}
const uint8_t tmpMountId = mountId - 1;
const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31);
int32_t value;
if (getStorageValue(key, value)) {
value |= (1 << (tmpMountId % 31));
} else {
value = (1 << (tmpMountId % 31));
}
addStorageValue(key, value);
return true;
}
bool Player::untameMount(uint8_t mountId)
{
if (!g_game.mounts.getMountByID(mountId)) {
return false;
}
const uint8_t tmpMountId = mountId - 1;
const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31);
int32_t value;
if (!getStorageValue(key, value)) {
return true;
}
value &= ~(1 << (tmpMountId % 31));
addStorageValue(key, value);
if (getCurrentMount() == mountId) {
if (isMounted()) {
dismount();
g_game.internalCreatureChangeOutfit(this, defaultOutfit);
}
setCurrentMount(0);
}
return true;
}
bool Player::hasMount(const Mount* mount) const
{
if (isAccessPlayer()) {
return true;
}
if (mount->premium && !isPremium()) {
return false;
}
const uint8_t tmpMountId = mount->id - 1;
int32_t value;
if (!getStorageValue(PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31), value)) {
return false;
}
return ((1 << (tmpMountId % 31)) & value) != 0;
}
void Player::dismount()
{
Mount* mount = g_game.mounts.getMountByID(getCurrentMount());
if (mount && mount->speed > 0) {
g_game.changeSpeed(this, -mount->speed);
}
defaultOutfit.lookMount = 0;
}
bool Player::addOfflineTrainingTries(skills_t skill, uint64_t tries)
{
if (tries == 0 || skill == SKILL_LEVEL) {
return false;
}
bool sendUpdate = false;
uint32_t oldSkillValue, newSkillValue;
long double oldPercentToNextLevel, newPercentToNextLevel;
if (skill == SKILL_MAGLEVEL) {
uint64_t currReqMana = vocation->getReqMana(magLevel);
uint64_t nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
return false;
}
oldSkillValue = magLevel;
oldPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana;
g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, tries);
uint32_t currMagLevel = magLevel;
while ((manaSpent + tries) >= nextReqMana) {
tries -= nextReqMana - manaSpent;
magLevel++;
manaSpent = 0;
g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel);
sendUpdate = true;
currReqMana = nextReqMana;
nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
tries = 0;
break;
}
}
manaSpent += tries;
if (magLevel != currMagLevel) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("You advanced to magic level {:d}.", magLevel));
}
uint8_t newPercent;
if (nextReqMana > currReqMana) {
newPercent = Player::getPercentLevel(manaSpent, nextReqMana);
newPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana;
} else {
newPercent = 0;
newPercentToNextLevel = 0;
}
if (newPercent != magLevelPercent) {
magLevelPercent = newPercent;
sendUpdate = true;
}
newSkillValue = magLevel;
} else {
uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level);
uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
return false;
}
oldSkillValue = skills[skill].level;
oldPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries;
g_events->eventPlayerOnGainSkillTries(this, skill, tries);
uint32_t currSkillLevel = skills[skill].level;
while ((skills[skill].tries + tries) >= nextReqTries) {
tries -= nextReqTries - skills[skill].tries;
skills[skill].level++;
skills[skill].tries = 0;
skills[skill].percent = 0;
g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level);
sendUpdate = true;
currReqTries = nextReqTries;
nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
tries = 0;
break;
}
}
skills[skill].tries += tries;
if (currSkillLevel != skills[skill].level) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("You advanced to {:s} level {:d}.", getSkillName(skill), skills[skill].level));
}
uint8_t newPercent;
if (nextReqTries > currReqTries) {
newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries);
newPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries;
} else {
newPercent = 0;
newPercentToNextLevel = 0;
}
if (skills[skill].percent != newPercent) {
skills[skill].percent = newPercent;
sendUpdate = true;
}
newSkillValue = skills[skill].level;
}
if (sendUpdate) {
sendSkills();
}
sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("Your {:s} skill changed from level {:d} (with {:.2f}% progress towards level {:d}) to level {:d} (with {:.2f}% progress towards level {:d})", ucwords(getSkillName(skill)), oldSkillValue, oldPercentToNextLevel, (oldSkillValue + 1), newSkillValue, newPercentToNextLevel, (newSkillValue + 1)));
return sendUpdate;
}
bool Player::hasModalWindowOpen(uint32_t modalWindowId) const
{
return find(modalWindows.begin(), modalWindows.end(), modalWindowId) != modalWindows.end();
}
void Player::onModalWindowHandled(uint32_t modalWindowId)
{
modalWindows.remove(modalWindowId);
}
void Player::sendModalWindow(const ModalWindow& modalWindow)
{
if (!client) {
return;
}
modalWindows.push_front(modalWindow.id);
client->sendModalWindow(modalWindow);
}
void Player::clearModalWindows()
{
modalWindows.clear();
}
uint16_t Player::getHelpers() const
{
uint16_t helpers;
if (guild && party) {
std::unordered_set<Player*> helperSet;
const auto& guildMembers = guild->getMembersOnline();
helperSet.insert(guildMembers.begin(), guildMembers.end());
const auto& partyMembers = party->getMembers();
helperSet.insert(partyMembers.begin(), partyMembers.end());
const auto& partyInvitees = party->getInvitees();
helperSet.insert(partyInvitees.begin(), partyInvitees.end());
helperSet.insert(party->getLeader());
helpers = helperSet.size();
} else if (guild) {
helpers = guild->getMembersOnline().size();
} else if (party) {
helpers = party->getMemberCount() + party->getInvitationCount() + 1;
} else {
helpers = 0;
}
return helpers;
}
void Player::sendClosePrivate(uint16_t channelId)
{
if (channelId == CHANNEL_GUILD || channelId == CHANNEL_PARTY) {
g_chat->removeUserFromChannel(*this, channelId);
}
if (client) {
client->sendClosePrivate(channelId);
}
}
uint64_t Player::getMoney() const
{
std::vector<const Container*> containers;
uint64_t moneyCount = 0;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
Item* item = inventory[i];
if (!item) {
continue;
}
const Container* container = item->getContainer();
if (container) {
containers.push_back(container);
} else {
moneyCount += item->getWorth();
}
}
size_t i = 0;
while (i < containers.size()) {
const Container* container = containers[i++];
for (const Item* item : container->getItemList()) {
const Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
} else {
moneyCount += item->getWorth();
}
}
}
return moneyCount;
}
size_t Player::getMaxVIPEntries() const
{
if (group->maxVipEntries != 0) {
return group->maxVipEntries;
}
return g_config.getNumber(isPremium() ? ConfigManager::VIP_PREMIUM_LIMIT : ConfigManager::VIP_FREE_LIMIT);
}
size_t Player::getMaxDepotItems() const
{
if (group->maxDepotItems != 0) {
return group->maxDepotItems;
}
return g_config.getNumber(isPremium() ? ConfigManager::DEPOT_PREMIUM_LIMIT : ConfigManager::DEPOT_FREE_LIMIT);
}
std::forward_list<Condition*> Player::getMuteConditions() const
{
std::forward_list<Condition*> muteConditions;
for (Condition* condition : conditions) {
if (condition->getTicks() <= 0) {
continue;
}
ConditionType_t type = condition->getType();
if (type != CONDITION_MUTED && type != CONDITION_CHANNELMUTEDTICKS && type != CONDITION_YELLTICKS) {
continue;
}
muteConditions.push_front(condition);
}
return muteConditions;
}
void Player::setGuild(Guild* guild)
{
if (guild == this->guild) {
return;
}
Guild* oldGuild = this->guild;
this->guildNick.clear();
this->guild = nullptr;
this->guildRank = nullptr;
if (guild) {
GuildRank_ptr rank = guild->getRankByLevel(1);
if (!rank) {
return;
}
this->guild = guild;
this->guildRank = rank;
guild->addMember(this);
}
if (oldGuild) {
oldGuild->removeMember(this);
}
}
void Player::updateRegeneration()
{
if (!vocation) {
return;
}
Condition* condition = getCondition(CONDITION_REGENERATION, CONDITIONID_DEFAULT);
if (condition) {
condition->setParam(CONDITION_PARAM_HEALTHGAIN, vocation->getHealthGainAmount());
condition->setParam(CONDITION_PARAM_HEALTHTICKS, vocation->getHealthGainTicks() * 1000);
condition->setParam(CONDITION_PARAM_MANAGAIN, vocation->getManaGainAmount());
condition->setParam(CONDITION_PARAM_MANATICKS, vocation->getManaGainTicks() * 1000);
}
}
| 1 | 19,657 | This way you will **never** be kicked while monsters are around you. | otland-forgottenserver | cpp |
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
import org.junit.Before;
import org.junit.Test;
+import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Arrays;
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.thoughtworks.selenium;
import static org.junit.Assert.assertEquals;
import org.junit.Before;
import org.junit.Test;
import java.lang.reflect.Method;
import java.util.Arrays;
public class CSVTest {
Method CSV;
@Before
public void setUp() {
Method[] methods = HttpCommandProcessor.class.getDeclaredMethods();
for (int i = 0; i < methods.length; i++) {
if ("parseCSV".equals(methods[i].getName())) {
Method csvMethod = methods[i];
csvMethod.setAccessible(true);
CSV = csvMethod;
break;
}
}
}
public String[] parseCSV(String input, String[] expected) {
System.out.print(input + ": ");
String[] output;
try {
output = (String[]) CSV.invoke(null, input);
} catch (Exception e) {
throw new RuntimeException(e);
}
System.out.println(Arrays.asList(output).toString());
compareStringArrays(expected, output);
return output;
}
@Test
public void testSimple() {
String input = "1,2,3";
String[] expected = new String[] {"1", "2", "3"};
parseCSV(input, expected);
}
@Test
public void testBackSlash() {
String input = "1,2\\,3,4"; // Java-escaped, but not CSV-escaped
String[] expected = new String[] {"1", "2,3", "4"}; // backslash should disappear in output
parseCSV(input, expected);
}
@Test
public void testRandomSingleBackSlash() {
String input = "1,\\2,3"; // Java-escaped, but not CSV-escaped
String[] expected = new String[] {"1", "2", "3"}; // backslash should disappear
parseCSV(input, expected);
}
@Test
public void testDoubleBackSlashBeforeComma() {
String input = "1,2\\\\,3"; // Java-escaped and CSV-escaped
String[] expected = new String[] {"1", "2\\", "3"}; // one backslash should disappear in output
parseCSV(input, expected);
}
@Test
public void testRandomDoubleBackSlash() {
String input = "1,\\\\2,3"; // Java-escaped, and CSV-escaped
String[] expected = new String[] {"1", "\\2", "3"}; // one backslash should disappear in output
parseCSV(input, expected);
}
@Test
public void testTripleBackSlashBeforeComma() {
String input = "1,2\\\\\\,3,4"; // Java-escaped, and CSV-escaped
String[] expected = new String[] {"1", "2\\,3", "4"}; // one backslash should disappear in
// output
parseCSV(input, expected);
}
@Test
public void test4BackSlashesBeforeComma() {
String input = "1,2\\\\\\\\,3"; // Java-escaped, and CSV-escaped
String[] expected = new String[] {"1", "2\\\\", "3"}; // two backslashes should disappear in
// output
parseCSV(input, expected);
}
public void compareStringArrays(String[] expected, String[] actual) {
assertEquals("Wrong number of elements", expected.length, actual.length);
for (int i = 0; i < expected.length; i++) {
assertEquals(expected[i], actual[i]);
}
}
}
| 1 | 19,390 | Can you please revert changes to files in the `thoughtworks` package? This is legacy code and we will eventually phase out RC. | SeleniumHQ-selenium | py |
@@ -110,5 +110,5 @@ export default function ModulesList( { moduleSlugs } ) {
}
ModulesList.propTypes = {
- moduleSlug: PropTypes.arrayOf( PropTypes.string ).isRequired,
+ moduleSlugs: PropTypes.arrayOf( PropTypes.string ).isRequired,
}; | 1 | /**
* ModulesList component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import PropTypes from 'prop-types';
/**
* WordPress dependencies
*/
import { useCallback } from '@wordpress/element';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import { CORE_SITE } from '../googlesitekit/datastore/site/constants';
import { CORE_MODULES } from '../googlesitekit/modules/datastore/constants';
import { CORE_LOCATION } from '../googlesitekit/datastore/location/constants';
import { VIEW_CONTEXT_DASHBOARD } from '../googlesitekit/constants';
import { trackEvent } from '../util';
import ModulesListItem from './ModulesListItem';
const { useSelect, useDispatch } = Data;
export default function ModulesList( { moduleSlugs } ) {
const { activateModule } = useDispatch( CORE_MODULES );
const { navigateTo } = useDispatch( CORE_LOCATION );
const { setInternalServerError } = useDispatch( CORE_SITE );
const modules = useSelect( ( select ) =>
select( CORE_MODULES ).getModules()
);
const handleSetupModule = useCallback(
async ( slug ) => {
const { response, error } = await activateModule( slug );
if ( error ) {
setInternalServerError( {
id: 'setup-module-error',
description: error.message,
} );
return null;
}
await trackEvent(
`${ VIEW_CONTEXT_DASHBOARD }_authentication-success-notification`,
'activate_module',
slug
);
// Redirect to ReAuthentication URL
navigateTo( response.moduleReauthURL );
},
[ activateModule, navigateTo, setInternalServerError ]
);
if ( modules === undefined ) {
return null;
}
// Filter specific modules.
const moduleObjects =
Array.isArray( moduleSlugs ) && moduleSlugs.length
? moduleSlugs
.filter( ( slug ) => modules[ slug ] )
.reduce(
( acc, slug ) => ( {
...acc,
[ slug ]: modules[ slug ],
} ),
{}
)
: modules;
// Filter out internal modules and remove modules with dependencies.
const modulesToShow = Object.values( moduleObjects )
.filter(
( module ) => ! module.internal && 0 === module.dependencies.length
)
.sort( ( a, b ) => a.order - b.order );
return (
<div className="googlesitekit-modules-list">
{ modulesToShow.map( ( module ) => (
<ModulesListItem
key={ module.slug }
module={ module }
handleSetupModule={ handleSetupModule }
/>
) ) }
</div>
);
}
ModulesList.propTypes = {
moduleSlug: PropTypes.arrayOf( PropTypes.string ).isRequired,
};
| 1 | 41,234 | This just fixes an unrelated prop type error, correct? | google-site-kit-wp | js |
@@ -1022,7 +1022,7 @@ func (fbo *folderBlockOps) updateWithDirtyEntriesLocked(ctx context.Context,
}
// Remove cached removals from the copy.
- for k := range dirCacheEntry.adds {
+ for k := range dirCacheEntry.dels {
_, ok := dblock.Children[k]
if !ok {
continue | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"fmt"
"path/filepath"
"time"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
type overallBlockState int
const (
// cleanState: no outstanding local writes.
cleanState overallBlockState = iota
// dirtyState: there are outstanding local writes that haven't yet been
// synced.
dirtyState
)
// blockReqType indicates whether an operation makes block
// modifications or not
type blockReqType int
const (
// A block read request.
blockRead blockReqType = iota
// A block write request.
blockWrite
// A block read request that is happening from a different
// goroutine than the blockLock rlock holder, using the same lState.
blockReadParallel
// We are looking up a block for the purposes of creating a new
// node in the node cache for it; avoid any unlocks as part of the
// lookup process.
blockLookup
)
type mdToCleanIfUnused struct {
md ReadOnlyRootMetadata
bps *blockPutState
}
type syncInfo struct {
oldInfo BlockInfo
op *syncOp
unrefs []BlockInfo
bps *blockPutState
refBytes uint64
unrefBytes uint64
toCleanIfUnused []mdToCleanIfUnused
}
func (si *syncInfo) DeepCopy(codec kbfscodec.Codec) (*syncInfo, error) {
newSi := &syncInfo{
oldInfo: si.oldInfo,
refBytes: si.refBytes,
unrefBytes: si.unrefBytes,
}
newSi.unrefs = make([]BlockInfo, len(si.unrefs))
copy(newSi.unrefs, si.unrefs)
if si.bps != nil {
newSi.bps = si.bps.DeepCopy()
}
if si.op != nil {
err := kbfscodec.Update(codec, &newSi.op, si.op)
if err != nil {
return nil, err
}
}
newSi.toCleanIfUnused = make([]mdToCleanIfUnused, len(si.toCleanIfUnused))
for i, toClean := range si.toCleanIfUnused {
// It might be overkill to deep-copy these MDs and bpses,
// which are probably immutable, but for now let's do the safe
// thing.
copyMd, err := toClean.md.deepCopy(codec)
if err != nil {
return nil, err
}
newSi.toCleanIfUnused[i].md = copyMd.ReadOnly()
newSi.toCleanIfUnused[i].bps = toClean.bps.DeepCopy()
}
return newSi, nil
}
func (si *syncInfo) removeReplacedBlock(ctx context.Context,
log logger.Logger, ptr BlockPointer) {
for i, ref := range si.op.RefBlocks {
if ref == ptr {
log.CDebugf(ctx, "Replacing old ref %v", ptr)
si.op.RefBlocks = append(si.op.RefBlocks[:i],
si.op.RefBlocks[i+1:]...)
for j, unref := range si.unrefs {
if unref.BlockPointer == ptr {
// Don't completely remove the unref,
// since it contains size info that we
// need to incorporate into the MD
// usage calculations.
si.unrefs[j].BlockPointer = zeroPtr
}
}
break
}
}
}
func (si *syncInfo) mergeUnrefCache(md *RootMetadata) {
for _, info := range si.unrefs {
// it's ok if we push the same ptr.ID/RefNonce multiple times,
// because the subsequent ones should have a QuotaSize of 0.
md.AddUnrefBlock(info)
}
}
type deCacheEntry struct {
// dirEntry is the dirty directory entry corresponding to the
// BlockPointer that maps to this struct.
dirEntry DirEntry
// adds is a map of the pointers for new entry names that have
// been added to the DirBlock for the BlockPointer that maps to
// this struct.
adds map[string]BlockPointer
// dels is a set of the names that have been removed from the
// DirBlock for the BlockPointer that maps to this struct.
dels map[string]bool
}
type deferredState struct {
// Writes and truncates for blocks that were being sync'd, and
// need to be replayed after the sync finishes on top of the new
// versions of the blocks.
writes []func(context.Context, *lockState, KeyMetadata, path) error
// Blocks that need to be deleted from the dirty cache before any
// deferred writes are replayed.
dirtyDeletes []BlockPointer
waitBytes int64
}
// folderBlockOps contains all the fields that must be synchronized by
// blockLock. It will eventually also contain all the methods that
// must be synchronized by blockLock, so that folderBranchOps will
// have no knowledge of blockLock.
//
// -- And now, a primer on tracking dirty bytes --
//
// The DirtyBlockCache tracks the number of bytes that are dirtied
// system-wide, as the number of bytes that haven't yet been synced
// ("unsynced"), and a number of bytes that haven't yet been resolved
// yet because the overall file Sync hasn't finished yet ("total").
// This data helps us decide when we need to block incoming Writes, in
// order to keep memory usage from exploding.
//
// It's the responsibility of folderBlockOps (and its helper struct
// dirtyFile) to update these totals in DirtyBlockCache for the
// individual files within this TLF. This is complicated by a few things:
// * New writes to a file are "deferred" while a Sync is happening, and
// are replayed after the Sync finishes.
// * Syncs can be canceled or error out halfway through syncing the blocks,
// leaving the file in a dirty state until the next Sync.
// * Syncs can fail with a /recoverable/ error, in which case they get
// retried automatically by folderBranchOps. In that case, the retried
// Sync also sucks in any outstanding deferred writes.
//
// With all that in mind, here is the rough breakdown of how this
// bytes-tracking is implemented:
// * On a Write/Truncate to a block, folderBranchOps counts all the
// newly-dirtied bytes in a file as "unsynced". That is, if the block was
// already in the dirty cache (and not already being synced), only
// extensions to the block count as "unsynced" bytes.
// * When a Sync starts, dirtyFile remembers the total of bytes being synced,
// and the size of each block being synced.
// * When each block put finishes successfully, dirtyFile subtracts the size
// of that block from "unsynced".
// * When a Sync finishes successfully, the total sum of bytes in that sync
// are subtracted from the "total" dirty bytes outstanding.
// * If a Sync fails, but some blocks were put successfully, those blocks
// are "re-dirtied", which means they count as unsynced bytes again.
// dirtyFile handles this.
// * When a Write/Truncate is deferred due to an ongoing Sync, its bytes
// still count towards the "unsynced" total. In fact, this essentially
// creates a new copy of those blocks, and the whole size of that block
// (not just the newly-dirtied bytes) count for the total. However,
// when the write gets replayed, folderBlockOps first subtracts those bytes
// from the system-wide numbers, since they are about to be replayed.
// * When a Sync is retried after a recoverable failure, dirtyFile adds
// the newly-dirtied deferred bytes to the system-wide numbers, since they
// are now being assimilated into this Sync.
// * dirtyFile also exposes a concept of "orphaned" blocks. These are child
// blocks being synced that are now referenced via a new, permanent block
// ID from the parent indirect block. This matters for when hard failures
// occur during a Sync -- the blocks will no longer be accessible under
// their previous old pointers, and so dirtyFile needs to know their old
// bytes can be cleaned up now.
type folderBlockOps struct {
config Config
log logger.Logger
folderBranch FolderBranch
observers *observerList
// forceSyncChan can be sent on to trigger an immediate
// Sync(). It is a blocking channel.
forceSyncChan chan<- struct{}
// protects access to blocks in this folder and all fields
// below.
blockLock blockLock
// Which files are currently dirty and have dirty blocks that are either
// currently syncing, or waiting to be sync'd.
dirtyFiles map[BlockPointer]*dirtyFile
// For writes and truncates, track the unsynced to-be-unref'd
// block infos, per-path.
unrefCache map[BlockRef]*syncInfo
// For writes and truncates, track the modified (but not yet
// committed) directory entries. Maps the entry BlockRef to a
// modified entry.
deCache map[BlockRef]deCacheEntry
// Track deferred operations on a per-file basis.
deferred map[BlockRef]deferredState
// set to true if this write or truncate should be deferred
doDeferWrite bool
// nodeCache itself is goroutine-safe, but write/truncate must
// call PathFromNode() only under blockLock (see nodeCache
// comments in folder_branch_ops.go).
nodeCache NodeCache
}
// Only exported methods of folderBlockOps should be used outside of this
// file.
//
// Although, temporarily, folderBranchOps is allowed to reach in and
// manipulate folderBlockOps fields and methods directly.
func (fbo *folderBlockOps) id() tlf.ID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBlockOps) branch() BranchName {
return fbo.folderBranch.Branch
}
// GetState returns the overall block state of this TLF.
func (fbo *folderBlockOps) GetState(lState *lockState) overallBlockState {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
if len(fbo.deCache) == 0 {
return cleanState
}
return dirtyState
}
// getCleanEncodedBlockHelperLocked retrieves the encoded size of the
// clean block pointed to by ptr, which must be valid, either from the
// cache or from the server. If `rtype` is `blockReadParallel`, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`.
func (fbo *folderBlockOps) getCleanEncodedBlockSizeLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName,
rtype blockReqType) (uint32, error) {
if rtype != blockReadParallel {
if rtype == blockWrite {
panic("Cannot get the size of a block for writing")
}
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getCleanEncodedBlockSizeLocked " +
"with blockReadParallel")
}
if !ptr.IsValid() {
return 0, InvalidBlockRefError{ptr.Ref()}
}
if block, err := fbo.config.BlockCache().Get(ptr); err == nil {
return block.GetEncodedSize(), nil
}
if err := checkDataVersion(fbo.config, path{}, ptr); err != nil {
return 0, err
}
// Unlock the blockLock while we wait for the network, only if
// it's locked for reading by a single goroutine. If it's locked
// for writing, that indicates we are performing an atomic write
// operation, and we need to ensure that nothing else comes in and
// modifies the blocks, so don't unlock.
//
// If there may be multiple goroutines fetching blocks under the
// same lState, we can't safely unlock since some of the other
// goroutines may be operating on the data assuming they have the
// lock.
bops := fbo.config.BlockOps()
var size uint32
var err error
if rtype != blockReadParallel && rtype != blockLookup {
fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) {
size, err = bops.GetEncodedSize(ctx, kmd, ptr)
})
} else {
size, err = bops.GetEncodedSize(ctx, kmd, ptr)
}
if err != nil {
return 0, err
}
return size, nil
}
// getBlockHelperLocked retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. If
// notifyPath is valid and the block isn't cached, trigger a read
// notification. If `rtype` is `blockReadParallel`, it's assumed that
// some coordinating goroutine is holding the correct locks, and
// in that case `lState` must be `nil`.
//
// This must be called only by get{File,Dir}BlockHelperLocked().
func (fbo *folderBlockOps) getBlockHelperLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName,
newBlock makeNewBlock, lifetime BlockCacheLifetime, notifyPath path,
rtype blockReqType) (Block, error) {
if rtype != blockReadParallel {
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getBlockHelperLocked " +
"with blockReadParallel")
}
if !ptr.IsValid() {
return nil, InvalidBlockRefError{ptr.Ref()}
}
if block, err := fbo.config.DirtyBlockCache().Get(
fbo.id(), ptr, branch); err == nil {
return block, nil
}
if block, hasPrefetched, lifetime, err :=
fbo.config.BlockCache().GetWithPrefetch(ptr); err == nil {
// If the block was cached in the past, we need to handle it as if it's
// an on-demand request so that its downstream prefetches are triggered
// correctly according to the new on-demand fetch priority.
fbo.config.BlockOps().BlockRetriever().CacheAndPrefetch(ctx,
ptr, block, kmd, defaultOnDemandRequestPriority, lifetime,
hasPrefetched)
return block, nil
}
if err := checkDataVersion(fbo.config, notifyPath, ptr); err != nil {
return nil, err
}
if notifyPath.isValidForNotification() {
fbo.config.Reporter().Notify(ctx, readNotification(notifyPath, false))
defer fbo.config.Reporter().Notify(ctx,
readNotification(notifyPath, true))
}
// Unlock the blockLock while we wait for the network, only if
// it's locked for reading by a single goroutine. If it's locked
// for writing, that indicates we are performing an atomic write
// operation, and we need to ensure that nothing else comes in and
// modifies the blocks, so don't unlock.
//
// If there may be multiple goroutines fetching blocks under the
// same lState, we can't safely unlock since some of the other
// goroutines may be operating on the data assuming they have the
// lock.
// fetch the block, and add to cache
block := newBlock()
bops := fbo.config.BlockOps()
var err error
if rtype != blockReadParallel && rtype != blockLookup {
fbo.blockLock.DoRUnlockedIfPossible(lState, func(*lockState) {
err = bops.Get(ctx, kmd, ptr, block, lifetime)
})
} else {
err = bops.Get(ctx, kmd, ptr, block, lifetime)
}
if err != nil {
return nil, err
}
return block, nil
}
// getFileBlockHelperLocked retrieves the block pointed to by ptr,
// which must be valid, either from an internal cache, the block
// cache, or from the server. An error is returned if the retrieved
// block is not a file block. If `rtype` is `blockReadParallel`, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`.
//
// This must be called only by GetFileBlockForReading(),
// getFileBlockLocked(), and getFileLocked().
//
// p is used only when reporting errors and sending read
// notifications, and can be empty.
func (fbo *folderBlockOps) getFileBlockHelperLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path, rtype blockReqType) (
*FileBlock, error) {
if rtype != blockReadParallel {
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getFileBlockHelperLocked " +
"with blockReadParallel")
}
block, err := fbo.getBlockHelperLocked(
ctx, lState, kmd, ptr, branch, NewFileBlock, TransientEntry, p, rtype)
if err != nil {
return nil, err
}
fblock, ok := block.(*FileBlock)
if !ok {
return nil, NotFileBlockError{ptr, branch, p}
}
return fblock, nil
}
// GetBlockForReading retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. The
// returned block may have a generic type (not DirBlock or FileBlock).
//
// This should be called for "internal" operations, like conflict
// resolution and state checking, which don't know what kind of block
// the pointer refers to. The block will not be cached, if it wasn't
// in the cache already.
func (fbo *folderBlockOps) GetBlockForReading(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer, branch BranchName) (
Block, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getBlockHelperLocked(ctx, lState, kmd, ptr, branch,
NewCommonBlock, NoCacheEntry, path{}, blockRead)
}
// GetCleanEncodedBlocksSizeSum retrieves the sum of the encoded sizes
// of the blocks pointed to by ptrs, all of which must be valid,
// either from the cache or from the server.
//
// The caller can specify a set of pointers using
// `ignoreRecoverableForRemovalErrors` for which "recoverable" fetch
// errors are tolerated. In that case, the returned sum will not
// include the size for any pointers in the
// `ignoreRecoverableForRemovalErrors` set that hit such an error.
//
// This should be called for "internal" operations, like conflict
// resolution and state checking, which don't know what kind of block
// the pointers refer to. Any downloaded blocks will not be cached,
// if they weren't in the cache already.
func (fbo *folderBlockOps) GetCleanEncodedBlocksSizeSum(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptrs []BlockPointer,
ignoreRecoverableForRemovalErrors map[BlockPointer]bool,
branch BranchName) (uint64, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
sumCh := make(chan uint32, len(ptrs))
eg, groupCtx := errgroup.WithContext(ctx)
for _, ptr := range ptrs {
ptr := ptr // capture range variable
eg.Go(func() error {
size, err := fbo.getCleanEncodedBlockSizeLocked(groupCtx, nil,
kmd, ptr, branch, blockReadParallel)
// TODO: we might be able to recover the size of the
// top-most block of a removed file using the merged
// directory entry, the same way we do in
// `folderBranchOps.unrefEntry`.
if isRecoverableBlockErrorForRemoval(err) &&
ignoreRecoverableForRemovalErrors[ptr] {
fbo.log.CDebugf(groupCtx, "Hit an ignorable, recoverable "+
"error for block %v: %v", ptr, err)
return nil
}
if err != nil {
return err
}
sumCh <- size
return nil
})
}
if err := eg.Wait(); err != nil {
return 0, err
}
close(sumCh)
var sum uint64
for size := range sumCh {
sum += uint64(size)
}
return sum, nil
}
// getDirBlockHelperLocked retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a dir block.
//
// This must be called only by GetDirBlockForReading() and
// getDirLocked().
//
// p is used only when reporting errors, and can be empty.
func (fbo *folderBlockOps) getDirBlockHelperLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path, rtype blockReqType) (*DirBlock, error) {
if rtype != blockReadParallel {
fbo.blockLock.AssertAnyLocked(lState)
}
// Pass in an empty notify path because notifications should only
// trigger for file reads.
block, err := fbo.getBlockHelperLocked(
ctx, lState, kmd, ptr, branch, NewDirBlock, TransientEntry, path{}, rtype)
if err != nil {
return nil, err
}
dblock, ok := block.(*DirBlock)
if !ok {
return nil, NotDirBlockError{ptr, branch, p}
}
return dblock, nil
}
// GetFileBlockForReading retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a file block.
//
// This should be called for "internal" operations, like conflict
// resolution and state checking. "Real" operations should use
// getFileBlockLocked() and getFileLocked() instead.
//
// p is used only when reporting errors, and can be empty.
func (fbo *folderBlockOps) GetFileBlockForReading(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path) (*FileBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getFileBlockHelperLocked(
ctx, lState, kmd, ptr, branch, p, blockRead)
}
// GetDirBlockForReading retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a dir block.
//
// This should be called for "internal" operations, like conflict
// resolution and state checking. "Real" operations should use
// getDirLocked() instead.
//
// p is used only when reporting errors, and can be empty.
func (fbo *folderBlockOps) GetDirBlockForReading(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
branch BranchName, p path) (*DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirBlockHelperLocked(
ctx, lState, kmd, ptr, branch, p, blockRead)
}
// getFileBlockLocked retrieves the block pointed to by ptr, which
// must be valid, either from the cache or from the server. An error
// is returned if the retrieved block is not a file block.
//
// The given path must be valid, and the given pointer must be its
// tail pointer or an indirect pointer from it. A read notification is
// triggered for the given path only if the block isn't in the cache.
//
// This shouldn't be called for "internal" operations, like conflict
// resolution and state checking -- use GetFileBlockForReading() for
// those instead.
//
// When rtype == blockWrite and the cached version of the block is
// currently clean, or the block is currently being synced, this
// method makes a copy of the file block and returns it. If this
// method might be called again for the same block within a single
// operation, it is the caller's responsibility to write that block
// back to the cache as dirty.
//
// Note that blockLock must be locked exactly when rtype ==
// blockWrite, and must be r-locked when rtype == blockRead. (This
// differs from getDirLocked.) This is because a write operation
// (like write, truncate and sync which lock blockLock) fetching a
// file block will almost always need to modify that block, and so
// will pass in blockWrite. If rtype == blockReadParallel, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`.
//
// file is used only when reporting errors and sending read
// notifications, and can be empty except that file.Branch must be set
// correctly.
//
// This method also returns whether the block was already dirty.
func (fbo *folderBlockOps) getFileBlockLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ptr BlockPointer,
file path, rtype blockReqType) (
fblock *FileBlock, wasDirty bool, err error) {
switch rtype {
case blockRead:
fbo.blockLock.AssertRLocked(lState)
case blockWrite:
fbo.blockLock.AssertLocked(lState)
case blockReadParallel:
// This goroutine might not be the official lock holder, so
// don't make any assertions.
if lState != nil {
panic("Non-nil lState passed to getFileBlockLocked " +
"with blockReadParallel")
}
case blockLookup:
panic("blockLookup should only be used for directory blocks")
default:
panic(fmt.Sprintf("Unknown block req type: %d", rtype))
}
fblock, err = fbo.getFileBlockHelperLocked(
ctx, lState, kmd, ptr, file.Branch, file, rtype)
if err != nil {
return nil, false, err
}
wasDirty = fbo.config.DirtyBlockCache().IsDirty(fbo.id(), ptr, file.Branch)
if rtype == blockWrite {
// Copy the block if it's for writing, and either the
// block is not yet dirty or the block is currently
// being sync'd and needs a copy even though it's
// already dirty.
df := fbo.dirtyFiles[file.tailPointer()]
if !wasDirty || (df != nil && df.blockNeedsCopy(ptr)) {
fblock = fblock.DeepCopy()
}
}
return fblock, wasDirty, nil
}
// getFileLocked is getFileBlockLocked called with file.tailPointer().
func (fbo *folderBlockOps) getFileLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path,
rtype blockReqType) (*FileBlock, error) {
// Callers should have already done this check, but it doesn't
// hurt to do it again.
if !file.isValid() {
return nil, InvalidPathError{file}
}
fblock, _, err := fbo.getFileBlockLocked(
ctx, lState, kmd, file.tailPointer(), file, rtype)
return fblock, err
}
// GetIndirectFileBlockInfos returns a list of BlockInfos for all
// indirect blocks of the given file. If the returned error is a
// recoverable one (as determined by
// isRecoverableBlockErrorForRemoval), the returned list may still be
// non-empty, and holds all the BlockInfos for all found indirect
// blocks.
func (fbo *folderBlockOps) GetIndirectFileBlockInfos(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path) ([]BlockInfo, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var uid keybase1.UID // Data reads don't depend on the uid.
fd := fbo.newFileData(lState, file, uid, kmd)
return fd.getIndirectFileBlockInfos(ctx)
}
// GetIndirectFileBlockInfosWithTopBlock returns a list of BlockInfos
// for all indirect blocks of the given file, starting from the given
// top-most block. If the returned error is a recoverable one (as
// determined by isRecoverableBlockErrorForRemoval), the returned list
// may still be non-empty, and holds all the BlockInfos for all found
// indirect blocks. (This will be relevant when we handle multiple
// levels of indirection.)
func (fbo *folderBlockOps) GetIndirectFileBlockInfosWithTopBlock(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
topBlock *FileBlock) (
[]BlockInfo, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var uid keybase1.UID // Data reads don't depend on the uid.
fd := fbo.newFileData(lState, file, uid, kmd)
return fd.getIndirectFileBlockInfosWithTopBlock(ctx, topBlock)
}
// DeepCopyFile makes a complete copy of the given file, deduping leaf
// blocks and making new random BlockPointers for all indirect blocks.
// It returns the new top pointer of the copy, and all the new child
// pointers in the copy. It takes a custom DirtyBlockCache, which
// directs where the resulting block copies are stored.
func (fbo *folderBlockOps) DeepCopyFile(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
dirtyBcache DirtyBlockCache, dataVer DataVer) (
newTopPtr BlockPointer, allChildPtrs []BlockPointer, err error) {
// Deep copying doesn't alter any data in use, it only makes copy,
// so only a read lock is needed.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return BlockPointer{}, nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, session.UID, kmd, dirtyBcache)
return fd.deepCopy(ctx, dataVer)
}
func (fbo *folderBlockOps) UndupChildrenInCopy(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path, bps *blockPutState,
dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, session.UID, kmd, dirtyBcache)
return fd.undupChildrenInCopy(ctx, fbo.config.BlockCache(),
fbo.config.BlockOps(), bps, topBlock)
}
func (fbo *folderBlockOps) ReadyNonLeafBlocksInCopy(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path, bps *blockPutState,
dirtyBcache DirtyBlockCache, topBlock *FileBlock) ([]BlockInfo, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, session.UID, kmd, dirtyBcache)
return fd.readyNonLeafBlocksInCopy(ctx, fbo.config.BlockCache(),
fbo.config.BlockOps(), bps, topBlock)
}
// getDirLocked retrieves the block pointed to by the tail pointer of
// the given path, which must be valid, either from the cache or from
// the server. An error is returned if the retrieved block is not a
// dir block.
//
// This shouldn't be called for "internal" operations, like conflict
// resolution and state checking -- use GetDirBlockForReading() for
// those instead.
//
// When rtype == blockWrite and the cached version of the block is
// currently clean, this method makes a copy of the directory block
// and returns it. If this method might be called again for the same
// block within a single operation, it is the caller's responsibility
// to write that block back to the cache as dirty.
//
// Note that blockLock must be either r-locked or locked, but
// independently of rtype. (This differs from getFileLocked and
// getFileBlockLocked.) File write operations (which lock blockLock)
// don't need a copy of parent dir blocks, and non-file write
// operations do need to copy dir blocks for modifications.
func (fbo *folderBlockOps) getDirLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) (
*DirBlock, error) {
fbo.blockLock.AssertAnyLocked(lState)
// Callers should have already done this check, but it doesn't
// hurt to do it again.
if !dir.isValid() {
return nil, InvalidPathError{dir}
}
// Get the block for the last element in the path.
dblock, err := fbo.getDirBlockHelperLocked(
ctx, lState, kmd, dir.tailPointer(), dir.Branch, dir, rtype)
if err != nil {
return nil, err
}
if rtype == blockWrite && !fbo.config.DirtyBlockCache().IsDirty(
fbo.id(), dir.tailPointer(), dir.Branch) {
// Copy the block if it's for writing and the block is
// not yet dirty.
dblock = dblock.DeepCopy()
}
return dblock, nil
}
// GetDir retrieves the block pointed to by the tail pointer of the
// given path, which must be valid, either from the cache or from the
// server. An error is returned if the retrieved block is not a dir
// block.
//
// This shouldn't be called for "internal" operations, like conflict
// resolution and state checking -- use GetDirBlockForReading() for
// those instead.
//
// When rtype == blockWrite and the cached version of the block is
// currently clean, this method makes a copy of the directory block
// and returns it. If this method might be called again for the same
// block within a single operation, it is the caller's responsibility
// to write that block back to the cache as dirty.
func (fbo *folderBlockOps) GetDir(
ctx context.Context, lState *lockState, kmd KeyMetadata, dir path,
rtype blockReqType) (*DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirLocked(ctx, lState, kmd, dir, rtype)
}
func (fbo *folderBlockOps) addDirEntryInCacheLocked(lState *lockState, dir path,
newName string, newDe DirEntry) {
fbo.blockLock.AssertLocked(lState)
cacheEntry := fbo.deCache[dir.tailPointer().Ref()]
if cacheEntry.adds == nil {
cacheEntry.adds = make(map[string]BlockPointer)
}
cacheEntry.adds[newName] = newDe.BlockPointer
// In case it was removed in the cache but not flushed yet.
delete(cacheEntry.dels, newName)
fbo.deCache[dir.tailPointer().Ref()] = cacheEntry
}
// AddDirEntryInCache adds a brand new entry to the given directory in
// the cache, which will get applied to the dirty block on subsequent
// fetches for the directory. The new entry must not yet have a cache
// entry itself.
func (fbo *folderBlockOps) AddDirEntryInCache(lState *lockState, dir path,
newName string, newDe DirEntry) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.addDirEntryInCacheLocked(lState, dir, newName, newDe)
// Add target dir entry as well.
if newDe.IsInitialized() {
cacheEntry, ok := fbo.deCache[newDe.Ref()]
if ok {
panic("New entry shouldn't already exist")
}
cacheEntry.dirEntry = newDe
fbo.deCache[newDe.Ref()] = cacheEntry
}
}
func (fbo *folderBlockOps) removeDirEntryInCacheLocked(lState *lockState,
dir path, oldName string) {
fbo.blockLock.AssertLocked(lState)
cacheEntry := fbo.deCache[dir.tailPointer().Ref()]
if cacheEntry.dels == nil {
cacheEntry.dels = make(map[string]bool)
}
cacheEntry.dels[oldName] = true
// In case it was added in the cache but not flushed yet.
delete(cacheEntry.adds, oldName)
fbo.deCache[dir.tailPointer().Ref()] = cacheEntry
}
// RemoveDirEntryInCache removes an entry from the given directory in
// the cache, which will get applied to the dirty block on subsequent
// fetches for the directory.
func (fbo *folderBlockOps) RemoveDirEntryInCache(lState *lockState, dir path,
oldName string) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.removeDirEntryInCacheLocked(lState, dir, oldName)
}
// RenameDirEntryInCache updates the entries of both the old and new
// parent dirs for the given target dir atomically (with respect to
// blockLock). It also updates the cache entry for the target, which
// would have its Ctime changed. The updates will get applied to the
// dirty blocks on subsequent fetches.
//
// The returned bool indicates whether or not the caller should clean
// up the target cache entry when the effects of the operation are no
// longer needed.
func (fbo *folderBlockOps) RenameDirEntryInCache(lState *lockState,
oldParent path, oldName string, newParent path, newName string,
newDe DirEntry) (deleteTargetDirEntry bool) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.addDirEntryInCacheLocked(lState, newParent, newName, newDe)
fbo.removeDirEntryInCacheLocked(lState, oldParent, oldName)
// If there's already an entry for the target, only update the
// Ctime on a rename.
cacheEntry, ok := fbo.deCache[newDe.Ref()]
if ok && cacheEntry.dirEntry.IsInitialized() {
cacheEntry.dirEntry.Ctime = newDe.Ctime
} else {
cacheEntry.dirEntry = newDe
deleteTargetDirEntry = true
}
fbo.deCache[newDe.Ref()] = cacheEntry
return deleteTargetDirEntry
}
func (fbo *folderBlockOps) setCachedAttrLocked(
lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry,
doCreate bool) {
fbo.blockLock.AssertLocked(lState)
fileEntry, ok := fbo.deCache[ref]
if !ok {
if !doCreate {
return
}
fileEntry.dirEntry = *realEntry
}
switch attr {
case exAttr:
fileEntry.dirEntry.Type = realEntry.Type
case mtimeAttr:
fileEntry.dirEntry.Mtime = realEntry.Mtime
}
fileEntry.dirEntry.Ctime = realEntry.Ctime
fbo.deCache[ref] = fileEntry
}
// SetAttrInDirEntryInCache removes an entry from the given directory
// in the cache, which will get applied to the dirty block on
// subsequent fetches for the directory.
//
// The returned bool indicates whether or not the caller should clean
// up the cache entry when the effects of the operation are no longer
// needed.
func (fbo *folderBlockOps) SetAttrInDirEntryInCache(lState *lockState,
newDe DirEntry, attr attrChange) (deleteTargetDirEntry bool) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
// If there's already an entry for the target, only update the
// Ctime on a rename.
_, ok := fbo.deCache[newDe.Ref()]
if !ok {
deleteTargetDirEntry = true
}
fbo.setCachedAttrLocked(
lState, newDe.Ref(), attr, &newDe,
true /* create the deCache entry if it doesn't exist yet */)
return deleteTargetDirEntry
}
// ClearCachedAddsAndRemoves clears out any cached directory entry
// adds and removes for the given dir.
func (fbo *folderBlockOps) ClearCachedAddsAndRemoves(
lState *lockState, dir path) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
cacheEntry, ok := fbo.deCache[dir.tailPointer().Ref()]
if !ok {
return
}
// If there's no dirEntry, we can just delete the whole thing.
if !cacheEntry.dirEntry.IsInitialized() {
delete(fbo.deCache, dir.tailPointer().Ref())
return
}
// Otherwise just nil out the adds and dels.
cacheEntry.adds = nil
cacheEntry.dels = nil
fbo.deCache[dir.tailPointer().Ref()] = cacheEntry
}
// updateWithDirtyEntriesLocked checks if the given DirBlock has any
// entries that are in deCache (i.e., entries pointing to dirty
// files). If so, it makes a copy with all such entries replaced with
// the ones in deCache and returns it. If not, it just returns the
// given one.
func (fbo *folderBlockOps) updateWithDirtyEntriesLocked(ctx context.Context,
lState *lockState, dir path, dblock *DirBlock) (*DirBlock, error) {
fbo.blockLock.AssertAnyLocked(lState)
// see if this directory has any outstanding writes/truncates that
// require an updated DirEntry
// Save some time for the common case of having no dirty
// files.
if len(fbo.deCache) == 0 {
return dblock, nil
}
var dblockCopy *DirBlock
dirCacheEntry := fbo.deCache[dir.tailPointer().Ref()]
// TODO: We should get rid of deCache completely and use only
// DirtyBlockCache to store the dirtied version of the DirBlock.
// We can't do that yet, because there might be multiple
// outstanding dirty files in one directory, and the KBFSOps API
// allows for syncing one at a time, so keeping a single dirtied
// DirBlock would accidentally sync the DirEntry of file A when a
// sync of file B is requested.
//
// Soon a sync will sync everything that's dirty at once, and so
// we can remove deCache at that point. Until then, we must
// incrementally build it up each time.
// Add cached additions to the copy.
for k, ptr := range dirCacheEntry.adds {
de, ok := fbo.deCache[ptr.Ref()]
if !ok {
return nil, fmt.Errorf("No cached dir entry found for new entry "+
"%s in dir %s (%v)", k, dir, dir.tailPointer())
}
if dblockCopy == nil {
dblockCopy = dblock.DeepCopy()
}
dblockCopy.Children[k] = de.dirEntry
}
// Remove cached removals from the copy.
for k := range dirCacheEntry.adds {
_, ok := dblock.Children[k]
if !ok {
continue
}
if dblockCopy == nil {
dblockCopy = dblock.DeepCopy()
}
delete(dblockCopy.Children, k)
}
// Update dir entries for any modified files.
for k, v := range dblock.Children {
de, ok := fbo.deCache[v.Ref()]
if !ok {
continue
}
if dblockCopy == nil {
dblockCopy = dblock.DeepCopy()
}
dblockCopy.Children[k] = de.dirEntry
}
if dblockCopy == nil {
return dblock, nil
}
return dblockCopy, nil
}
// getDirtyDirLocked composes getDirLocked and
// updatedWithDirtyEntriesLocked. Note that a dirty dir means that it
// has entries possibly pointing to dirty files, not that it's dirty
// itself.
func (fbo *folderBlockOps) getDirtyDirLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, dir path, rtype blockReqType) (
*DirBlock, error) {
fbo.blockLock.AssertAnyLocked(lState)
dblock, err := fbo.getDirLocked(ctx, lState, kmd, dir, rtype)
if err != nil {
return nil, err
}
return fbo.updateWithDirtyEntriesLocked(ctx, lState, dir, dblock)
}
// GetDirtyDirChildren returns a map of EntryInfos for the (possibly
// dirty) children entries of the given directory.
func (fbo *folderBlockOps) GetDirtyDirChildren(
ctx context.Context, lState *lockState, kmd KeyMetadata, dir path) (
map[string]EntryInfo, error) {
dblock, err := func() (*DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirtyDirLocked(ctx, lState, kmd, dir, blockRead)
}()
if err != nil {
return nil, err
}
children := make(map[string]EntryInfo)
for k, de := range dblock.Children {
children[k] = de.EntryInfo
}
return children, nil
}
// file must have a valid parent.
func (fbo *folderBlockOps) getDirtyParentAndEntryLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path, rtype blockReqType) (
*DirBlock, DirEntry, error) {
fbo.blockLock.AssertAnyLocked(lState)
if !file.hasValidParent() {
return nil, DirEntry{}, InvalidParentPathError{file}
}
parentPath := file.parentPath()
dblock, err := fbo.getDirtyDirLocked(
ctx, lState, kmd, *parentPath, rtype)
if err != nil {
return nil, DirEntry{}, err
}
// make sure it exists
name := file.tailName()
de, ok := dblock.Children[name]
if !ok {
return nil, DirEntry{}, NoSuchNameError{name}
}
return dblock, de, err
}
// GetDirtyParentAndEntry returns a copy of the parent DirBlock
// (suitable for modification) of the given file, which may contain
// entries pointing to other dirty files, and its possibly-dirty
// DirEntry in that directory. file must have a valid parent. Use
// GetDirtyEntry() if you only need the DirEntry.
func (fbo *folderBlockOps) GetDirtyParentAndEntry(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path) (
*DirBlock, DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirtyParentAndEntryLocked(
ctx, lState, kmd, file, blockWrite)
}
// file must have a valid parent.
func (fbo *folderBlockOps) getDirtyEntryLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, file path) (DirEntry, error) {
// TODO: Since we only need a single DirEntry, avoid having to
// look up every entry in the DirBlock.
_, de, err := fbo.getDirtyParentAndEntryLocked(
ctx, lState, kmd, file, blockLookup)
return de, err
}
// GetDirtyEntry returns the possibly-dirty DirEntry of the given file
// in its parent DirBlock. file must have a valid parent.
func (fbo *folderBlockOps) GetDirtyEntry(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path) (DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
}
// Lookup returns the possibly-dirty DirEntry of the given file in its
// parent DirBlock, and a Node for the file if it exists. It has to
// do all of this under the block lock to avoid races with
// UpdatePointers.
func (fbo *folderBlockOps) Lookup(
ctx context.Context, lState *lockState, kmd KeyMetadata,
dir Node, name string) (Node, DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
dirPath := fbo.nodeCache.PathFromNode(dir)
if !dirPath.isValid() {
return nil, DirEntry{}, InvalidPathError{dirPath}
}
childPath := dirPath.ChildPathNoPtr(name)
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, childPath)
if err != nil {
return nil, DirEntry{}, err
}
if de.Type == Sym {
return nil, de, nil
}
err = checkDataVersion(fbo.config, childPath, de.BlockPointer)
if err != nil {
return nil, DirEntry{}, err
}
node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
return node, de, nil
}
func (fbo *folderBlockOps) getOrCreateDirtyFileLocked(lState *lockState,
file path) *dirtyFile {
fbo.blockLock.AssertLocked(lState)
ptr := file.tailPointer()
df := fbo.dirtyFiles[ptr]
if df == nil {
df = newDirtyFile(file, fbo.config.DirtyBlockCache())
fbo.dirtyFiles[ptr] = df
}
return df
}
// cacheBlockIfNotYetDirtyLocked puts a block into the cache, but only
// does so if the block isn't already marked as dirty in the cache.
// This is useful when operating on a dirty copy of a block that may
// already be in the cache.
func (fbo *folderBlockOps) cacheBlockIfNotYetDirtyLocked(
lState *lockState, ptr BlockPointer, file path, block Block) error {
fbo.blockLock.AssertLocked(lState)
df := fbo.getOrCreateDirtyFileLocked(lState, file)
needsCaching, isSyncing := df.setBlockDirty(ptr)
if needsCaching {
err := fbo.config.DirtyBlockCache().Put(fbo.id(), ptr, file.Branch,
block)
if err != nil {
return err
}
}
if isSyncing {
fbo.doDeferWrite = true
}
return nil
}
func (fbo *folderBlockOps) getOrCreateSyncInfoLocked(
lState *lockState, de DirEntry) (*syncInfo, error) {
fbo.blockLock.AssertLocked(lState)
ref := de.Ref()
si, ok := fbo.unrefCache[ref]
if !ok {
so, err := newSyncOp(de.BlockPointer)
if err != nil {
return nil, err
}
si = &syncInfo{
oldInfo: de.BlockInfo,
op: so,
}
fbo.unrefCache[ref] = si
}
return si, nil
}
// GetDirtyRefs returns a list of references of all known dirty
// blocks.
func (fbo *folderBlockOps) GetDirtyRefs(lState *lockState) []BlockRef {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var dirtyRefs []BlockRef
for ref := range fbo.deCache {
dirtyRefs = append(dirtyRefs, ref)
}
return dirtyRefs
}
// fixChildBlocksAfterRecoverableErrorLocked should be called when a sync
// failed with a recoverable block error on a multi-block file. It
// makes sure that any outstanding dirty versions of the file are
// fixed up to reflect the fact that some of the indirect pointers now
// need to change.
func (fbo *folderBlockOps) fixChildBlocksAfterRecoverableErrorLocked(
ctx context.Context, lState *lockState, file path, kmd KeyMetadata,
redirtyOnRecoverableError map[BlockPointer]BlockPointer) {
fbo.blockLock.AssertLocked(lState)
defer func() {
// Below, this function can end up writing dirty blocks back
// to the cache, which will set `doDeferWrite` to `true`.
// This leads to future writes being unnecessarily deferred
// when a Sync is not happening, and can lead to dirty data
// being synced twice and sticking around for longer than
// needed. So just reset `doDeferWrite` once we're
// done. We're under `blockLock`, so this is safe.
fbo.doDeferWrite = false
}()
df := fbo.dirtyFiles[file.tailPointer()]
if df != nil {
// Un-orphan old blocks, since we are reverting back to the
// previous state.
for _, oldPtr := range redirtyOnRecoverableError {
fbo.log.CDebugf(ctx, "Un-orphaning %v", oldPtr)
df.setBlockOrphaned(oldPtr, false)
}
}
dirtyBcache := fbo.config.DirtyBlockCache()
topBlock, err := dirtyBcache.Get(fbo.id(), file.tailPointer(), fbo.branch())
fblock, ok := topBlock.(*FileBlock)
if err != nil || !ok {
fbo.log.CWarningf(ctx, "Couldn't find dirtied "+
"top-block for %v: %v", file.tailPointer(), err)
return
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't find uid during recovery: %v", err)
return
}
fd := fbo.newFileData(lState, file, session.UID, kmd)
// If a copy of the top indirect block was made, we need to
// redirty all the sync'd blocks under their new IDs, so that
// future syncs will know they failed.
newPtrs := make(map[BlockPointer]bool, len(redirtyOnRecoverableError))
for newPtr := range redirtyOnRecoverableError {
newPtrs[newPtr] = true
}
found, err := fd.findIPtrsAndClearSize(ctx, fblock, newPtrs)
if err != nil {
fbo.log.CWarningf(
ctx, "Couldn't find and clear iptrs during recovery: %v", err)
return
}
for newPtr, oldPtr := range redirtyOnRecoverableError {
if !found[newPtr] {
continue
}
fbo.log.CDebugf(ctx, "Re-dirtying %v (and deleting dirty block %v)",
newPtr, oldPtr)
// These blocks would have been permanent, so they're
// definitely still in the cache.
b, err := fbo.config.BlockCache().Get(newPtr)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
continue
}
if err = fbo.cacheBlockIfNotYetDirtyLocked(
lState, newPtr, file, b); err != nil {
fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
}
fbo.log.CDebugf(ctx, "Deleting dirty ptr %v after recoverable error",
oldPtr)
err = dirtyBcache.Delete(fbo.id(), oldPtr, fbo.branch())
if err != nil {
fbo.log.CDebugf(ctx, "Couldn't del-dirty %v: %v", oldPtr, err)
}
}
}
func (fbo *folderBlockOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
// PrepRename prepares the given rename operation. It returns copies
// of the old and new parent block (which may be the same), what is to
// be the new DirEntry, and a local block cache. It also modifies md,
// which must be a copy.
func (fbo *folderBlockOps) PrepRename(
ctx context.Context, lState *lockState, md *RootMetadata,
oldParent path, oldName string, newParent path, newName string) (
oldPBlock, newPBlock *DirBlock, newDe DirEntry, lbc localBcache,
err error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
// look up in the old path
oldPBlock, err = fbo.getDirLocked(
ctx, lState, md, oldParent, blockWrite)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
newDe, ok := oldPBlock.Children[oldName]
// does the name exist?
if !ok {
return nil, nil, DirEntry{}, nil, NoSuchNameError{oldName}
}
ro, err := newRenameOp(oldName, oldParent.tailPointer(), newName,
newParent.tailPointer(), newDe.BlockPointer, newDe.Type)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
// A renameOp doesn't have a single path to represent it, so we
// can't call setFinalPath here unfortunately. That means any
// rename may force a manual paths population at other layers
// (e.g., for journal statuses). TODO: allow a way to set more
// than one final path for renameOps?
md.AddOp(ro)
lbc = make(localBcache)
// TODO: Write a SameBlock() function that can deal properly with
// dedup'd blocks that share an ID but can be updated separately.
if oldParent.tailPointer().ID == newParent.tailPointer().ID {
newPBlock = oldPBlock
} else {
newPBlock, err = fbo.getDirLocked(
ctx, lState, md, newParent, blockWrite)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
now := fbo.nowUnixNano()
oldGrandparent := *oldParent.parentPath()
if len(oldGrandparent.path) > 0 {
// Update the old parent's mtime/ctime, unless the
// oldGrandparent is the same as newParent (in which
// case, the syncBlockAndCheckEmbedLocked call by the
// caller will take care of it).
if oldGrandparent.tailPointer().ID != newParent.tailPointer().ID {
b, err := fbo.getDirLocked(ctx, lState, md, oldGrandparent, blockWrite)
if err != nil {
return nil, nil, DirEntry{}, nil, err
}
if de, ok := b.Children[oldParent.tailName()]; ok {
de.Ctime = now
de.Mtime = now
b.Children[oldParent.tailName()] = de
// Put this block back into the local cache as dirty
lbc[oldGrandparent.tailPointer()] = b
}
}
} else {
md.data.Dir.Ctime = now
md.data.Dir.Mtime = now
}
}
return oldPBlock, newPBlock, newDe, lbc, nil
}
func (fbo *folderBlockOps) newFileData(lState *lockState,
file path, uid keybase1.UID, kmd KeyMetadata) *fileData {
fbo.blockLock.AssertAnyLocked(lState)
return newFileData(file, uid, fbo.config.Crypto(),
fbo.config.BlockSplitter(), kmd,
func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer,
file path, rtype blockReqType) (*FileBlock, bool, error) {
lState := lState
if rtype == blockReadParallel {
lState = nil
}
return fbo.getFileBlockLocked(
ctx, lState, kmd, ptr, file, rtype)
},
func(ptr BlockPointer, block Block) error {
return fbo.cacheBlockIfNotYetDirtyLocked(
lState, ptr, file, block)
}, fbo.log)
}
func (fbo *folderBlockOps) newFileDataWithCache(lState *lockState,
file path, uid keybase1.UID, kmd KeyMetadata,
dirtyBcache DirtyBlockCache) *fileData {
fbo.blockLock.AssertAnyLocked(lState)
return newFileData(file, uid, fbo.config.Crypto(),
fbo.config.BlockSplitter(), kmd,
func(ctx context.Context, kmd KeyMetadata, ptr BlockPointer,
file path, rtype blockReqType) (*FileBlock, bool, error) {
block, err := dirtyBcache.Get(file.Tlf, ptr, file.Branch)
if fblock, ok := block.(*FileBlock); ok && err == nil {
return fblock, true, nil
}
lState := lState
if rtype == blockReadParallel {
lState = nil
}
return fbo.getFileBlockLocked(
ctx, lState, kmd, ptr, file, rtype)
},
func(ptr BlockPointer, block Block) error {
return dirtyBcache.Put(file.Tlf, ptr, file.Branch, block)
}, fbo.log)
}
// Read reads from the given file into the given buffer at the given
// offset. It returns the number of bytes read and nil, or 0 and the
// error if there was one.
func (fbo *folderBlockOps) Read(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
dest []byte, off int64) (int64, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
fbo.log.CDebugf(ctx, "Reading from %v", file.tailPointer())
var uid keybase1.UID // Data reads don't depend on the uid.
fd := fbo.newFileData(lState, file, uid, kmd)
return fd.read(ctx, dest, off)
}
func (fbo *folderBlockOps) maybeWaitOnDeferredWrites(
ctx context.Context, lState *lockState, file Node,
c DirtyPermChan) error {
var errListener chan error
err := func() error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
errListener = make(chan error, 1)
df.addErrListener(errListener)
return nil
}()
if err != nil {
return err
}
logTimer := time.After(100 * time.Millisecond)
doLogUnblocked := false
for {
select {
case <-c:
if doLogUnblocked {
fbo.log.CDebugf(ctx, "Write unblocked")
}
// Make sure there aren't any queued errors.
select {
case err := <-errListener:
return err
default:
}
return nil
case <-logTimer:
// Print a log message once if it's taking too long.
fbo.log.CDebugf(ctx,
"Blocking a write because of a full dirty buffer")
doLogUnblocked = true
case err := <-errListener:
// XXX: should we ignore non-fatal errors (like
// context.Canceled), or errors that are specific only to
// some other file being sync'd (e.g., "recoverable" block
// errors from which we couldn't recover)?
return err
}
}
}
func (fbo *folderBlockOps) pathFromNodeForBlockWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.blockLock.AssertLocked(lState)
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// writeGetFileLocked checks write permissions explicitly for
// writeDataLocked, truncateLocked etc and returns
func (fbo *folderBlockOps) writeGetFileLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path) (*FileBlock, keybase1.UID, error) {
fbo.blockLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, "", err
}
if !kmd.GetTlfHandle().IsWriter(session.UID) {
return nil, "", NewWriteAccessError(kmd.GetTlfHandle(),
session.Name, file.String())
}
fblock, err := fbo.getFileLocked(ctx, lState, kmd, file, blockWrite)
if err != nil {
return nil, "", err
}
return fblock, session.UID, nil
}
// Returns the set of blocks dirtied during this write that might need
// to be cleaned up if the write is deferred.
func (fbo *folderBlockOps) writeDataLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata, file path,
data []byte, off int64) (latestWrite WriteRange, dirtyPtrs []BlockPointer,
newlyDirtiedChildBytes int64, err error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
jServer.dirtyOpStart(fbo.id())
defer jServer.dirtyOpEnd(fbo.id())
}
fbo.blockLock.AssertLocked(lState)
fbo.log.CDebugf(ctx, "writeDataLocked on file pointer %v",
file.tailPointer())
defer func() {
fbo.log.CDebugf(ctx, "writeDataLocked done: %v", err)
}()
fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, 0, err
}
fd := fbo.newFileData(lState, file, uid, kmd)
dirtyBcache := fbo.config.DirtyBlockCache()
df := fbo.getOrCreateDirtyFileLocked(lState, file)
defer func() {
// Always update unsynced bytes and potentially force a sync,
// even on an error, since the previously-dirty bytes stay in
// the cache.
df.updateNotYetSyncingBytes(newlyDirtiedChildBytes)
if dirtyBcache.ShouldForceSync(fbo.id()) {
select {
// If we can't send on the channel, that means a sync is
// already in progress.
case fbo.forceSyncChan <- struct{}{}:
fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer")
default:
}
}
}()
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, 0, err
}
if de.BlockPointer != file.tailPointer() {
fbo.log.CDebugf(ctx, "DirEntry and file tail pointer don't match: "+
"%v vs %v", de.BlockPointer, file.tailPointer())
}
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return WriteRange{}, nil, 0, err
}
newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, bytesExtended, err :=
fd.write(ctx, data, off, fblock, de, df)
// Record the unrefs before checking the error so we remember the
// state of newly dirtied blocks.
si.unrefs = append(si.unrefs, unrefs...)
if err != nil {
return WriteRange{}, nil, newlyDirtiedChildBytes, err
}
// Put it in the `deCache` even if the size didn't change, since
// the `deCache` is used to determine whether there are any dirty
// files. TODO: combine `deCache` with `dirtyFiles` and
// `unrefCache`.
cacheEntry := fbo.deCache[file.tailPointer().Ref()]
now := fbo.nowUnixNano()
newDe.Mtime = now
newDe.Ctime = now
cacheEntry.dirEntry = newDe
fbo.deCache[file.tailPointer().Ref()] = cacheEntry
if fbo.doDeferWrite {
df.addDeferredNewBytes(bytesExtended)
}
latestWrite = si.op.addWrite(uint64(off), uint64(len(data)))
return latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
}
// Write writes the given data to the given file. May block if there
// is too much unflushed data; in that case, it will be unblocked by a
// future sync.
func (fbo *folderBlockOps) Write(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file Node, data []byte, off int64) error {
// If there is too much unflushed data, we should wait until some
// of it gets flush so our memory usage doesn't grow without
// bound.
c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
fbo.id(), int64(len(data)))
if err != nil {
return err
}
defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
-int64(len(data)), false)
err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
if err != nil {
return err
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
defer func() {
fbo.doDeferWrite = false
}()
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.writeDataLocked(
ctx, lState, kmd, filePath, data, off)
if err != nil {
return err
}
fbo.observers.localChange(ctx, file, latestWrite)
if fbo.doDeferWrite {
// There's an ongoing sync, and this write altered dirty
// blocks that are in the process of syncing. So, we have to
// redo this write once the sync is complete, using the new
// file path.
//
// There is probably a less terrible of doing this that
// doesn't involve so much copying and rewriting, but this is
// the most obviously correct way.
dataCopy := make([]byte, len(data))
copy(dataCopy, data)
fbo.log.CDebugf(ctx, "Deferring a write to file %v off=%d len=%d",
filePath.tailPointer(), off, len(data))
ds := fbo.deferred[filePath.tailPointer().Ref()]
ds.dirtyDeletes = append(ds.dirtyDeletes, dirtyPtrs...)
ds.writes = append(ds.writes,
func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error {
// We are about to re-dirty these bytes, so mark that
// they will no longer be synced via the old file.
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes)
// Write the data again. We know this won't be
// deferred, so no need to check the new ptrs.
_, _, _, err = fbo.writeDataLocked(
ctx, lState, kmd, f, dataCopy, off)
return err
})
ds.waitBytes += newlyDirtiedChildBytes
fbo.deferred[filePath.tailPointer().Ref()] = ds
}
return nil
}
// truncateExtendLocked is called by truncateLocked to extend a file and
// creates a hole.
func (fbo *folderBlockOps) truncateExtendLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path, size uint64, parentBlocks []parentBlockAndChildIndex) (
WriteRange, []BlockPointer, error) {
fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, err
}
fd := fbo.newFileData(lState, file, uid, kmd)
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, err
}
df := fbo.getOrCreateDirtyFileLocked(lState, file)
newDe, dirtyPtrs, err := fd.truncateExtend(
ctx, size, fblock, parentBlocks, de, df)
if err != nil {
return WriteRange{}, nil, err
}
cacheEntry := fbo.deCache[file.tailPointer().Ref()]
now := fbo.nowUnixNano()
newDe.Mtime = now
newDe.Ctime = now
cacheEntry.dirEntry = newDe
fbo.deCache[file.tailPointer().Ref()] = cacheEntry
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return WriteRange{}, nil, err
}
latestWrite := si.op.addTruncate(size)
if fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
select {
// If we can't send on the channel, that means a sync is
// already in progress
case fbo.forceSyncChan <- struct{}{}:
fbo.log.CDebugf(ctx, "Forcing a sync due to full buffer")
default:
}
}
fbo.log.CDebugf(ctx, "truncateExtendLocked: done")
return latestWrite, dirtyPtrs, nil
}
// truncateExtendCutoffPoint is the amount of data in extending
// truncate that will trigger the extending with a hole algorithm.
const truncateExtendCutoffPoint = 128 * 1024
// Returns the set of newly-ID'd blocks created during this truncate
// that might need to be cleaned up if the truncate is deferred.
func (fbo *folderBlockOps) truncateLocked(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path, size uint64) (*WriteRange, []BlockPointer, int64, error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
jServer.dirtyOpStart(fbo.id())
defer jServer.dirtyOpEnd(fbo.id())
}
fblock, uid, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return &WriteRange{}, nil, 0, err
}
fd := fbo.newFileData(lState, file, uid, kmd)
// find the block where the file should now end
iSize := int64(size) // TODO: deal with overflow
_, parentBlocks, block, nextBlockOff, startOff, _, err :=
fd.getFileBlockAtOffset(ctx, fblock, iSize, blockWrite)
if err != nil {
return &WriteRange{}, nil, 0, err
}
currLen := int64(startOff) + int64(len(block.Contents))
if currLen+truncateExtendCutoffPoint < iSize {
latestWrite, dirtyPtrs, err := fbo.truncateExtendLocked(
ctx, lState, kmd, file, uint64(iSize), parentBlocks)
if err != nil {
return &latestWrite, dirtyPtrs, 0, err
}
return &latestWrite, dirtyPtrs, 0, err
} else if currLen < iSize {
moreNeeded := iSize - currLen
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err :=
fbo.writeDataLocked(ctx, lState, kmd, file,
make([]byte, moreNeeded, moreNeeded), currLen)
if err != nil {
return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
}
return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
} else if currLen == iSize && nextBlockOff < 0 {
// same size!
return nil, nil, 0, nil
}
// update the local entry size
de, err := fbo.getDirtyEntryLocked(ctx, lState, kmd, file)
if err != nil {
return nil, nil, 0, err
}
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return nil, nil, 0, err
}
newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, err := fd.truncateShrink(
ctx, size, fblock, de)
// Record the unrefs before checking the error so we remember the
// state of newly dirtied blocks.
si.unrefs = append(si.unrefs, unrefs...)
if err != nil {
return nil, nil, newlyDirtiedChildBytes, err
}
// Update dirtied bytes and unrefs regardless of error.
df := fbo.getOrCreateDirtyFileLocked(lState, file)
df.updateNotYetSyncingBytes(newlyDirtiedChildBytes)
latestWrite := si.op.addTruncate(size)
cacheEntry := fbo.deCache[file.tailPointer().Ref()]
now := fbo.nowUnixNano()
newDe.Mtime = now
newDe.Ctime = now
cacheEntry.dirEntry = newDe
fbo.deCache[file.tailPointer().Ref()] = cacheEntry
return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
}
// Truncate truncates or extends the given file to the given size.
// May block if there is too much unflushed data; in that case, it
// will be unblocked by a future sync.
func (fbo *folderBlockOps) Truncate(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file Node, size uint64) error {
// If there is too much unflushed data, we should wait until some
// of it gets flush so our memory usage doesn't grow without
// bound.
//
// Assume the whole remaining file will be dirty after this
// truncate. TODO: try to figure out how many bytes actually will
// be dirtied ahead of time?
c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
fbo.id(), int64(size))
if err != nil {
return err
}
defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
-int64(size), false)
err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
if err != nil {
return err
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
defer func() {
fbo.doDeferWrite = false
}()
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.truncateLocked(
ctx, lState, kmd, filePath, size)
if err != nil {
return err
}
if latestWrite != nil {
fbo.observers.localChange(ctx, file, *latestWrite)
}
if fbo.doDeferWrite {
// There's an ongoing sync, and this truncate altered
// dirty blocks that are in the process of syncing. So,
// we have to redo this truncate once the sync is complete,
// using the new file path.
fbo.log.CDebugf(ctx, "Deferring a truncate to file %v",
filePath.tailPointer())
ds := fbo.deferred[filePath.tailPointer().Ref()]
ds.dirtyDeletes = append(ds.dirtyDeletes, dirtyPtrs...)
ds.writes = append(ds.writes,
func(ctx context.Context, lState *lockState, kmd KeyMetadata, f path) error {
// We are about to re-dirty these bytes, so mark that
// they will no longer be synced via the old file.
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
df.updateNotYetSyncingBytes(-newlyDirtiedChildBytes)
// Truncate the file again. We know this won't be
// deferred, so no need to check the new ptrs.
_, _, _, err := fbo.truncateLocked(
ctx, lState, kmd, f, size)
return err
})
ds.waitBytes += newlyDirtiedChildBytes
fbo.deferred[filePath.tailPointer().Ref()] = ds
}
return nil
}
// IsDirty returns whether the given file is dirty; if false is
// returned, then the file doesn't need to be synced.
func (fbo *folderBlockOps) IsDirty(lState *lockState, file path) bool {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
// Definitely dirty if a block is dirty.
if fbo.config.DirtyBlockCache().IsDirty(
fbo.id(), file.tailPointer(), file.Branch) {
return true
}
// The deCache entry could still be dirty, if a file had an
// attribute set (like mtime or exec) after the file was removed.
// Still count the file as dirty in that case; most likely, the
// caller will next call `ClearCacheInfo` to remove this entry.
// (See comments in `folderBranchOps.syncLocked`.)
_, ok := fbo.deCache[file.tailPointer().Ref()]
return ok
}
func (fbo *folderBlockOps) clearCacheInfoLocked(lState *lockState,
file path) error {
fbo.blockLock.AssertLocked(lState)
ref := file.tailPointer().Ref()
delete(fbo.deCache, ref)
delete(fbo.unrefCache, ref)
df := fbo.dirtyFiles[file.tailPointer()]
if df != nil {
err := df.finishSync()
if err != nil {
return err
}
delete(fbo.dirtyFiles, file.tailPointer())
}
return nil
}
// ClearCacheInfo removes any cached info for the the given file.
func (fbo *folderBlockOps) ClearCacheInfo(lState *lockState, file path) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
return fbo.clearCacheInfoLocked(lState, file)
}
// revertSyncInfoAfterRecoverableError updates the saved sync info to
// include all the blocks from before the error, except for those that
// have encountered recoverable block errors themselves.
func (fbo *folderBlockOps) revertSyncInfoAfterRecoverableError(
blocksToRemove []BlockPointer, result fileSyncState) {
si := result.si
savedSi := result.savedSi
// Save the blocks we need to clean up on the next attempt.
toClean := si.toCleanIfUnused
newIndirect := make(map[BlockPointer]bool)
for _, ptr := range result.newIndirectFileBlockPtrs {
newIndirect[ptr] = true
}
// Propagate all unrefs forward, except those that belong to new
// blocks that were created during the sync.
unrefs := make([]BlockInfo, 0, len(si.unrefs))
for _, unref := range si.unrefs {
if newIndirect[unref.BlockPointer] {
fbo.log.CDebugf(nil, "Dropping unref %v", unref)
continue
}
unrefs = append(unrefs, unref)
}
// This sync will be retried and needs new blocks, so
// reset everything in the sync info.
*si = *savedSi
si.toCleanIfUnused = toClean
si.unrefs = unrefs
if si.bps == nil {
return
}
si.bps.blockStates = nil
// Mark any bad pointers so they get skipped next time.
blocksToRemoveSet := make(map[BlockPointer]bool)
for _, ptr := range blocksToRemove {
blocksToRemoveSet[ptr] = true
}
for _, bs := range savedSi.bps.blockStates {
// Only save the good pointers
if !blocksToRemoveSet[bs.blockPtr] {
si.bps.blockStates = append(si.bps.blockStates, bs)
}
}
}
// ReadyBlock is a thin wrapper around BlockOps.Ready() that handles
// checking for duplicates.
func ReadyBlock(ctx context.Context, bcache BlockCache, bops BlockOps,
crypto cryptoPure, kmd KeyMetadata, block Block, uid keybase1.UID,
bType keybase1.BlockType) (
info BlockInfo, plainSize int, readyBlockData ReadyBlockData, err error) {
var ptr BlockPointer
directType := IndirectBlock
if fBlock, ok := block.(*FileBlock); ok && !fBlock.IsInd {
directType = DirectBlock
// first see if we are duplicating any known blocks in this folder
ptr, err = bcache.CheckForKnownPtr(kmd.TlfID(), fBlock)
if err != nil {
return
}
} else if dBlock, ok := block.(*DirBlock); ok {
if dBlock.IsInd {
panic("Indirect directory blocks aren't supported yet")
}
// TODO: support indirect directory blocks.
directType = DirectBlock
}
// Ready the block, even in the case where we can reuse an
// existing block, just so that we know what the size of the
// encrypted data will be.
id, plainSize, readyBlockData, err := bops.Ready(ctx, kmd, block)
if err != nil {
return
}
if ptr.IsInitialized() {
ptr.RefNonce, err = crypto.MakeBlockRefNonce()
if err != nil {
return
}
ptr.SetWriter(uid)
// In case we're deduping an old pointer with an unknown block type.
ptr.DirectType = directType
} else {
ptr = BlockPointer{
ID: id,
KeyGen: kmd.LatestKeyGeneration(),
DataVer: block.DataVersion(),
DirectType: directType,
Context: kbfsblock.MakeFirstContext(uid, bType),
}
}
info = BlockInfo{
BlockPointer: ptr,
EncodedSize: uint32(readyBlockData.GetEncodedSize()),
}
return
}
// fileSyncState holds state for a sync operation for a single
// file.
type fileSyncState struct {
// If fblock is non-nil, the (dirty, indirect, cached) block
// it points to will be set to savedFblock on a recoverable
// error.
fblock, savedFblock *FileBlock
// redirtyOnRecoverableError, which is non-nil only when fblock is
// non-nil, contains pointers that need to be re-dirtied if the
// top block gets copied during the sync, and a recoverable error
// happens. Maps to the old block pointer for the block, which
// would need a DirtyBlockCache.Delete.
redirtyOnRecoverableError map[BlockPointer]BlockPointer
// If si is non-nil, its updated state will be reset on
// error. Also, if the error is recoverable, it will be
// reverted to savedSi.
//
// TODO: Working with si in this way is racy, since si is a
// member of unrefCache.
si, savedSi *syncInfo
// oldFileBlockPtrs is a list of transient entries in the
// block cache for the file, which should be removed when the
// sync finishes.
oldFileBlockPtrs []BlockPointer
// newIndirectFileBlockPtrs is a list of permanent entries
// added to the block cache for the file, which should be
// removed after the blocks have been sent to the server.
// They are not removed on an error, because in that case the
// file is still dirty locally and may get another chance to
// be sync'd.
//
// TODO: This can be a list of IDs instead.
newIndirectFileBlockPtrs []BlockPointer
}
// startSyncWrite contains the portion of StartSync() that's done
// while write-locking blockLock. If there is no dirty de cache
// entry, dirtyDe will be nil.
func (fbo *folderBlockOps) startSyncWrite(ctx context.Context,
lState *lockState, md *RootMetadata, uid keybase1.UID, file path) (
fblock *FileBlock, bps *blockPutState, syncState fileSyncState,
dirtyDe *DirEntry, err error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
// update the parent directories, and write all the new blocks out
// to disk
fblock, err = fbo.getFileLocked(ctx, lState, md.ReadOnly(), file, blockWrite)
if err != nil {
return nil, nil, syncState, nil, err
}
fileRef := file.tailPointer().Ref()
si, ok := fbo.unrefCache[fileRef]
if !ok {
return nil, nil, syncState, nil,
fmt.Errorf("No syncOp found for file ref %v", fileRef)
}
// Collapse the write range to reduce the size of the sync op.
si.op.Writes = si.op.collapseWriteRange(nil)
// If this function returns a success, we need to make sure the op
// in `md` is not the same variable as the op in `unrefCache`,
// because the latter could get updated still by local writes
// before `md` is flushed to the server. We don't copy it here
// because code below still needs to modify it (and by extension,
// the one stored in `syncState.si`).
si.op.setFinalPath(file)
md.AddOp(si.op)
// Fill in syncState.
if fblock.IsInd {
fblockCopy := fblock.DeepCopy()
syncState.fblock = fblock
syncState.savedFblock = fblockCopy
syncState.redirtyOnRecoverableError = make(map[BlockPointer]BlockPointer)
}
syncState.si = si
syncState.savedSi, err = si.DeepCopy(fbo.config.Codec())
if err != nil {
return nil, nil, syncState, nil, err
}
if si.bps == nil {
si.bps = newBlockPutState(1)
} else {
// reinstate byte accounting from the previous Sync
md.SetRefBytes(si.refBytes)
md.AddDiskUsage(si.refBytes)
md.SetUnrefBytes(si.unrefBytes)
md.SetMDRefBytes(0) // this will be calculated anew
md.SetDiskUsage(md.DiskUsage() - si.unrefBytes)
syncState.newIndirectFileBlockPtrs = append(
syncState.newIndirectFileBlockPtrs, si.op.Refs()...)
}
defer func() {
si.refBytes = md.RefBytes()
si.unrefBytes = md.UnrefBytes()
}()
dirtyBcache := fbo.config.DirtyBlockCache()
df := fbo.getOrCreateDirtyFileLocked(lState, file)
fd := fbo.newFileData(lState, file, uid, md.ReadOnly())
// Note: below we add possibly updated file blocks as "unref" and
// "ref" blocks. This is fine, since conflict resolution or
// notifications will never happen within a file.
// If needed, split the children blocks up along new boundaries
// (e.g., if using a fingerprint-based block splitter).
unrefs, err := fd.split(ctx, fbo.id(), dirtyBcache, fblock, df)
// Preserve any unrefs before checking the error.
for _, unref := range unrefs {
md.AddUnrefBlock(unref)
}
if err != nil {
return nil, nil, syncState, nil, err
}
// Ready all children blocks, if any.
oldPtrs, err := fd.ready(ctx, fbo.id(), fbo.config.BlockCache(),
fbo.config.DirtyBlockCache(), fbo.config.BlockOps(), si.bps, fblock, df)
if err != nil {
return nil, nil, syncState, nil, err
}
for newInfo, oldPtr := range oldPtrs {
syncState.newIndirectFileBlockPtrs = append(
syncState.newIndirectFileBlockPtrs, newInfo.BlockPointer)
df.setBlockOrphaned(oldPtr, true)
// Defer the DirtyBlockCache.Delete until after the new path
// is ready, in case anyone tries to read the dirty file in
// the meantime.
syncState.oldFileBlockPtrs = append(syncState.oldFileBlockPtrs, oldPtr)
md.AddRefBlock(newInfo)
// If this block is replacing a block from a previous, failed
// Sync, we need to take that block out of the refs list, and
// avoid unrefing it as well.
si.removeReplacedBlock(ctx, fbo.log, oldPtr)
err = df.setBlockSyncing(oldPtr)
if err != nil {
return nil, nil, syncState, nil, err
}
syncState.redirtyOnRecoverableError[newInfo.BlockPointer] = oldPtr
}
err = df.setBlockSyncing(file.tailPointer())
if err != nil {
return nil, nil, syncState, nil, err
}
syncState.oldFileBlockPtrs = append(
syncState.oldFileBlockPtrs, file.tailPointer())
// Capture the current de before we release the block lock, so
// other deferred writes don't slip in.
if de, ok := fbo.deCache[fileRef]; ok {
dirtyDe = &de.dirEntry
}
// Leave a copy of the syncOp in `unrefCache`, since it may be
// modified by future local writes while the syncOp in `md` should
// only be modified by the rest of this sync process.
var syncOpCopy *syncOp
err = kbfscodec.Update(fbo.config.Codec(), &syncOpCopy, si.op)
if err != nil {
return nil, nil, syncState, nil, err
}
fbo.unrefCache[fileRef].op = syncOpCopy
// If there are any deferred bytes, it must be because this is
// a retried sync and some blocks snuck in between sync. Those
// blocks will get transferred now, but they are also on the
// deferred list and will be retried on the next sync as well.
df.assimilateDeferredNewBytes()
// TODO: Returning si.bps in this way is racy, since si is a
// member of unrefCache.
return fblock, si.bps, syncState, dirtyDe, nil
}
func (fbo *folderBlockOps) makeLocalBcache(ctx context.Context,
lState *lockState, md *RootMetadata, file path, si *syncInfo,
dirtyDe *DirEntry) (lbc localBcache, err error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
parentPath := file.parentPath()
dblock, err := fbo.getDirLocked(
ctx, lState, md.ReadOnly(), *parentPath, blockWrite)
if err != nil {
return nil, err
}
// Add in the cached unref'd blocks.
si.mergeUnrefCache(md)
lbc = make(localBcache)
// Update the file's directory entry to the cached copy.
if dirtyDe != nil {
dirtyDe.EncodedSize = si.oldInfo.EncodedSize
dblock.Children[file.tailName()] = *dirtyDe
lbc[parentPath.tailPointer()] = dblock
}
return lbc, nil
}
// StartSync starts a sync for the given file. It returns the new
// FileBlock which has the readied top-level block which includes all
// writes since the last sync. Must be used with CleanupSyncState()
// and UpdatePointers/FinishSyncLocked() like so:
//
// fblock, bps, lbc, syncState, err :=
// ...fbo.StartSync(ctx, lState, md, uid, file)
// defer func() {
// ...fbo.CleanupSyncState(
// ctx, lState, md, file, ..., syncState, err)
// }()
// if err != nil {
// ...
// }
// ...
//
//
// ... = fbo.UpdatePointers(..., func() error {
// ...fbo.FinishSyncLocked(ctx, lState, file, ..., syncState)
// })
func (fbo *folderBlockOps) StartSync(ctx context.Context,
lState *lockState, md *RootMetadata, uid keybase1.UID, file path) (
fblock *FileBlock, bps *blockPutState, lbc localBcache,
syncState fileSyncState, err error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
jServer.dirtyOpStart(fbo.id())
}
fblock, bps, syncState, dirtyDe, err := fbo.startSyncWrite(
ctx, lState, md, uid, file)
if err != nil {
return nil, nil, nil, syncState, err
}
lbc, err = fbo.makeLocalBcache(ctx, lState, md, file, syncState.savedSi,
dirtyDe)
if err != nil {
return nil, nil, nil, syncState, err
}
return fblock, bps, lbc, syncState, err
}
// Does any clean-up for a sync of the given file, given an error
// (which may be nil) that happens during or after StartSync() and
// before FinishSync(). blocksToRemove may be nil.
func (fbo *folderBlockOps) CleanupSyncState(
ctx context.Context, lState *lockState, md ReadOnlyRootMetadata,
file path, blocksToRemove []BlockPointer,
result fileSyncState, err error) {
if jServer, err := GetJournalServer(fbo.config); err == nil {
defer jServer.dirtyOpEnd(fbo.id())
}
if err == nil {
return
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
// Notify error listeners before we reset the dirty blocks and
// permissions to be granted.
fbo.notifyErrListenersLocked(lState, file.tailPointer(), err)
// If there was an error, we need to back out any changes that
// might have been filled into the sync op, because it could
// get reused again in a later Sync call.
if result.si != nil {
result.si.op.resetUpdateState()
// Save this MD for later, so we can clean up its
// newly-referenced block pointers if necessary.
result.si.toCleanIfUnused = append(result.si.toCleanIfUnused,
mdToCleanIfUnused{md, result.si.bps.DeepCopy()})
}
if isRecoverableBlockError(err) {
if result.si != nil {
fbo.revertSyncInfoAfterRecoverableError(blocksToRemove, result)
}
if result.fblock != nil {
result.fblock.Set(result.savedFblock)
fbo.fixChildBlocksAfterRecoverableErrorLocked(
ctx, lState, file, md,
result.redirtyOnRecoverableError)
}
} else {
// Since the sync has errored out unrecoverably, the deferred
// bytes are already accounted for.
ds := fbo.deferred[file.tailPointer().Ref()]
if df := fbo.dirtyFiles[file.tailPointer()]; df != nil {
df.updateNotYetSyncingBytes(-ds.waitBytes)
// Some blocks that were dirty are now clean under their
// readied block ID, and now live in the bps rather than
// the dirty bcache, so we can delete them from the dirty
// bcache.
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range result.oldFileBlockPtrs {
if df.isBlockOrphaned(ptr) {
fbo.log.CDebugf(ctx, "Deleting dirty orphan: %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr,
fbo.branch()); err != nil {
fbo.log.CDebugf(ctx, "Couldn't delete %v", ptr)
}
}
}
}
// On an unrecoverable error, the deferred writes aren't
// needed anymore since they're already part of the
// (still-)dirty blocks.
delete(fbo.deferred, file.tailPointer().Ref())
}
// The sync is over, due to an error, so reset the map so that we
// don't defer any subsequent writes.
// Old syncing blocks are now just dirty
if df := fbo.dirtyFiles[file.tailPointer()]; df != nil {
df.resetSyncingBlocksToDirty()
}
}
// cleanUpUnusedBlocks cleans up the blocks from any previous failed
// sync attempts.
func (fbo *folderBlockOps) cleanUpUnusedBlocks(ctx context.Context,
md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) error {
numToClean := len(syncState.si.toCleanIfUnused)
if numToClean == 0 {
return nil
}
// What blocks are referenced in the successful MD?
refs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr == zeroPtr {
panic("Unexpected zero ref ptr in a sync MD revision")
}
refs[ptr] = true
}
for _, update := range op.allUpdates() {
if update.Ref == zeroPtr {
panic("Unexpected zero update ref ptr in a sync MD revision")
}
refs[update.Ref] = true
}
}
// For each MD to clean, clean up the old failed blocks
// immediately if the merge status matches the successful put, if
// they didn't get referenced in the successful put. If the merge
// status is different (e.g., we ended up on a conflict branch),
// clean it up only if the original revision failed. If the same
// block appears more than once, the one with a different merged
// status takes precedence (which will always come earlier in the
// list of MDs).
blocksSeen := make(map[BlockPointer]bool)
for _, oldMD := range syncState.si.toCleanIfUnused {
bdType := blockDeleteAlways
if oldMD.md.MergedStatus() != md.MergedStatus() {
bdType = blockDeleteOnMDFail
}
failedBps := newBlockPutState(len(oldMD.bps.blockStates))
for _, bs := range oldMD.bps.blockStates {
if bs.blockPtr == zeroPtr {
panic("Unexpected zero block ptr in an old sync MD revision")
}
if blocksSeen[bs.blockPtr] {
continue
}
blocksSeen[bs.blockPtr] = true
if refs[bs.blockPtr] && bdType == blockDeleteAlways {
continue
}
failedBps.blockStates = append(failedBps.blockStates,
blockState{blockPtr: bs.blockPtr})
fbo.log.CDebugf(ctx, "Cleaning up block %v from a previous "+
"failed revision %d (oldMD is %s, bdType=%d)", bs.blockPtr,
oldMD.md.Revision(), oldMD.md.MergedStatus(), bdType)
}
if len(failedBps.blockStates) > 0 {
fbm.cleanUpBlockState(oldMD.md, failedBps, bdType)
}
}
return nil
}
func (fbo *folderBlockOps) doDeferredWritesLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, oldPath, newPath path) (
stillDirty bool, err error) {
fbo.blockLock.AssertLocked(lState)
// Redo any writes or truncates that happened to our file while
// the sync was happening.
ds := fbo.deferred[oldPath.tailPointer().Ref()]
stillDirty = len(ds.writes) != 0
delete(fbo.deferred, oldPath.tailPointer().Ref())
// Clear any dirty blocks that resulted from a write/truncate
// happening during the sync, since we're redoing them below.
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range ds.dirtyDeletes {
fbo.log.CDebugf(ctx, "Deleting deferred dirty ptr %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
return true, err
}
}
for _, f := range ds.writes {
err = f(ctx, lState, kmd, newPath)
if err != nil {
// It's a little weird to return an error from a deferred
// write here. Hopefully that will never happen.
return true, err
}
}
return stillDirty, nil
}
// FinishSyncLocked finishes the sync process for a file, given the
// state from StartSync. Specifically, it re-applies any writes that
// happened since the call to StartSync.
func (fbo *folderBlockOps) FinishSyncLocked(
ctx context.Context, lState *lockState,
oldPath, newPath path, md ReadOnlyRootMetadata,
syncState fileSyncState, fbm *folderBlockManager) (
stillDirty bool, err error) {
fbo.blockLock.AssertLocked(lState)
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range syncState.oldFileBlockPtrs {
fbo.log.CDebugf(ctx, "Deleting dirty ptr %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
return true, err
}
}
bcache := fbo.config.BlockCache()
for _, ptr := range syncState.newIndirectFileBlockPtrs {
err := bcache.DeletePermanent(ptr.ID)
if err != nil {
fbo.log.CWarningf(ctx, "Error when deleting %v from cache: %v",
ptr.ID, err)
}
}
stillDirty, err = fbo.doDeferredWritesLocked(
ctx, lState, md, oldPath, newPath)
if err != nil {
return true, err
}
// Clear cached info for the old path. We are guaranteed that any
// concurrent write to this file was deferred, even if it was to a
// block that wasn't currently being sync'd, since the top-most
// block is always in dirtyFiles and is always dirtied during a
// write/truncate.
//
// Also, we can get rid of all the sync state that might have
// happened during the sync, since we will replay the writes
// below anyway.
if err := fbo.clearCacheInfoLocked(lState, oldPath); err != nil {
return true, err
}
if err := fbo.cleanUpUnusedBlocks(ctx, md, syncState, fbm); err != nil {
return true, err
}
return stillDirty, nil
}
// notifyErrListeners notifies any write operations that are blocked
// on a file so that they can learn about unrecoverable sync errors.
func (fbo *folderBlockOps) notifyErrListenersLocked(lState *lockState,
ptr BlockPointer, err error) {
fbo.blockLock.AssertLocked(lState)
if isRecoverableBlockError(err) {
// Don't bother any listeners with this error, since the sync
// will be retried. Unless the sync has reached its retry
// limit, but in that case the listeners will just proceed as
// normal once the dirty block cache bytes are freed, and
// that's ok since this error isn't fatal.
return
}
df := fbo.dirtyFiles[ptr]
if df != nil {
df.notifyErrListeners(err)
}
}
type searchWithOutOfDateCacheError struct {
}
func (e searchWithOutOfDateCacheError) Error() string {
return fmt.Sprintf("Search is using an out-of-date node cache; " +
"try again with a clean cache.")
}
// searchForNodesInDirLocked recursively tries to find a path, and
// ultimately a node, to ptr, given the set of pointers that were
// updated in a particular operation. The keys in nodeMap make up the
// set of BlockPointers that are being searched for, and nodeMap is
// updated in place to include the corresponding discovered nodes.
//
// Returns the number of nodes found by this invocation. If the error
// it returns is searchWithOutOfDateCache, the search should be
// retried by the caller with a clean cache.
func (fbo *folderBlockOps) searchForNodesInDirLocked(ctx context.Context,
lState *lockState, cache NodeCache, newPtrs map[BlockPointer]bool,
kmd KeyMetadata, rootNode Node, currDir path, nodeMap map[BlockPointer]Node,
numNodesFoundSoFar int) (int, error) {
fbo.blockLock.AssertAnyLocked(lState)
dirBlock, err := fbo.getDirLocked(
ctx, lState, kmd, currDir, blockRead)
if err != nil {
return 0, err
}
// getDirLocked may have unlocked blockLock, which means the cache
// could have changed out from under us. Verify that didn't
// happen, so we can avoid messing it up with nodes from an old MD
// version. If it did happen, return a special error that lets
// the caller know they should retry with a fresh cache.
if currDir.path[0].BlockPointer !=
cache.PathFromNode(rootNode).tailPointer() {
return 0, searchWithOutOfDateCacheError{}
}
if numNodesFoundSoFar >= len(nodeMap) {
return 0, nil
}
numNodesFound := 0
for name, de := range dirBlock.Children {
if _, ok := nodeMap[de.BlockPointer]; ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
// make a node for every pathnode
n := rootNode
for i, pn := range childPath.path[1:] {
if !pn.BlockPointer.IsValid() {
// Temporary debugging output for KBFS-1764 -- the
// GetOrCreate call below will panic.
fbo.log.CDebugf(ctx, "Invalid block pointer, path=%s, "+
"path.path=%v (index %d), name=%s, de=%#v, "+
"nodeMap=%v, newPtrs=%v, kmd=%#v",
childPath, childPath.path, i, name, de, nodeMap,
newPtrs, kmd)
}
n, err = cache.GetOrCreate(pn.BlockPointer, pn.Name, n)
if err != nil {
return 0, err
}
}
nodeMap[de.BlockPointer] = n
numNodesFound++
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
// otherwise, recurse if this represents an updated block
if _, ok := newPtrs[de.BlockPointer]; de.Type == Dir && ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
n, err := fbo.searchForNodesInDirLocked(ctx, lState, cache,
newPtrs, kmd, rootNode, childPath, nodeMap,
numNodesFoundSoFar+numNodesFound)
if err != nil {
return 0, err
}
numNodesFound += n
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
}
return numNodesFound, nil
}
func (fbo *folderBlockOps) trySearchWithCacheLocked(ctx context.Context,
lState *lockState, cache NodeCache, ptrs []BlockPointer,
newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) (
map[BlockPointer]Node, error) {
fbo.blockLock.AssertAnyLocked(lState)
nodeMap := make(map[BlockPointer]Node)
for _, ptr := range ptrs {
nodeMap[ptr] = nil
}
if len(ptrs) == 0 {
return nodeMap, nil
}
var node Node
// The node cache used by the main part of KBFS is
// fbo.nodeCache. This basically maps from BlockPointers to
// Nodes. Nodes are used by the callers of the library, but
// internally we need to know the series of BlockPointers and
// file/dir names that make up the path of the corresponding
// file/dir. fbo.nodeCache is long-lived and never invalidated.
//
// As folderBranchOps gets informed of new local or remote MD
// updates, which change the BlockPointers of some subset of the
// nodes in this TLF, it calls nodeCache.UpdatePointer for each
// change. Then, when a caller passes some old Node they have
// lying around into an FBO call, we can translate it to its
// current path using fbo.nodeCache. Note that on every TLF
// modification, we are guaranteed that the BlockPointer of the
// root directory will change (because of the merkle-ish tree of
// content hashes we use to assign BlockPointers).
//
// fbo.nodeCache needs to maintain the absolute latest mappings
// for the TLF, or else FBO calls won't see up-to-date data. The
// tension in search comes from the fact that we are trying to
// discover the BlockPointers of certain files at a specific point
// in the MD history, which is not necessarily the same as the
// most-recently-seen MD update. Specifically, some callers
// process a specific range of MDs, but folderBranchOps may have
// heard about a newer one before, or during, when the caller
// started processing. That means fbo.nodeCache may have been
// updated to reflect the newest BlockPointers, and is no longer
// correct as a cache for our search for the data at the old point
// in time.
if cache == fbo.nodeCache {
// Root node should already exist if we have an up-to-date md.
node = cache.Get(rootPtr.Ref())
if node == nil {
return nil, searchWithOutOfDateCacheError{}
}
} else {
// Root node may or may not exist.
var err error
node, err = cache.GetOrCreate(rootPtr,
string(kmd.GetTlfHandle().GetCanonicalName()), nil)
if err != nil {
return nil, err
}
}
if node == nil {
return nil, fmt.Errorf("Cannot find root node corresponding to %v",
rootPtr)
}
// are they looking for the root directory?
numNodesFound := 0
if _, ok := nodeMap[rootPtr]; ok {
nodeMap[rootPtr] = node
numNodesFound++
if numNodesFound >= len(nodeMap) {
return nodeMap, nil
}
}
rootPath := cache.PathFromNode(node)
if len(rootPath.path) != 1 {
return nil, fmt.Errorf("Invalid root path for %v: %s",
rootPtr, rootPath)
}
_, err := fbo.searchForNodesInDirLocked(ctx, lState, cache, newPtrs,
kmd, node, rootPath, nodeMap, numNodesFound)
if err != nil {
return nil, err
}
if rootPtr != cache.PathFromNode(node).tailPointer() {
return nil, searchWithOutOfDateCacheError{}
}
return nodeMap, nil
}
func (fbo *folderBlockOps) searchForNodesLocked(ctx context.Context,
lState *lockState, cache NodeCache, ptrs []BlockPointer,
newPtrs map[BlockPointer]bool, kmd KeyMetadata, rootPtr BlockPointer) (
map[BlockPointer]Node, NodeCache, error) {
fbo.blockLock.AssertAnyLocked(lState)
// First try the passed-in cache. If it doesn't work because the
// cache is out of date, try again with a clean cache.
nodeMap, err := fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
newPtrs, kmd, rootPtr)
if _, ok := err.(searchWithOutOfDateCacheError); ok {
// The md is out-of-date, so use a throwaway cache so we
// don't pollute the real node cache with stale nodes.
fbo.log.CDebugf(ctx, "Root node %v doesn't exist in the node "+
"cache; using a throwaway node cache instead",
rootPtr)
cache = newNodeCacheStandard(fbo.folderBranch)
nodeMap, err = fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
newPtrs, kmd, rootPtr)
}
if err != nil {
return nil, nil, err
}
// Return the whole map even if some nodes weren't found.
return nodeMap, cache, nil
}
// SearchForNodes tries to resolve all the given pointers to a Node
// object, using only the updated pointers specified in newPtrs.
// Returns an error if any subset of the pointer paths do not exist;
// it is the caller's responsibility to decide to error on particular
// unresolved nodes. It also returns the cache that ultimately
// contains the nodes -- this might differ from the passed-in cache if
// another goroutine updated that cache and it no longer contains the
// root pointer specified in md.
func (fbo *folderBlockOps) SearchForNodes(ctx context.Context,
cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool,
kmd KeyMetadata, rootPtr BlockPointer) (
map[BlockPointer]Node, NodeCache, error) {
lState := makeFBOLockState()
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.searchForNodesLocked(
ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
}
// SearchForPaths is like SearchForNodes, except it returns a
// consistent view of all the paths of the searched-for pointers.
func (fbo *folderBlockOps) SearchForPaths(ctx context.Context,
cache NodeCache, ptrs []BlockPointer, newPtrs map[BlockPointer]bool,
kmd KeyMetadata, rootPtr BlockPointer) (map[BlockPointer]path, error) {
lState := makeFBOLockState()
// Hold the lock while processing the paths so they can't be changed.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
nodeMap, cache, err :=
fbo.searchForNodesLocked(
ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
if err != nil {
return nil, err
}
paths := make(map[BlockPointer]path)
for ptr, n := range nodeMap {
if n == nil {
paths[ptr] = path{}
continue
}
p := cache.PathFromNode(n)
if p.tailPointer() != ptr {
return nil, NodeNotFoundError{ptr}
}
paths[ptr] = p
}
return paths, nil
}
// getUndirtiedEntry returns the clean entry for the given path
// corresponding to a cached dirty entry. If there is no dirty or
// clean entry, nil is returned.
func (fbo *folderBlockOps) getUndirtiedEntry(
ctx context.Context, lState *lockState, kmd KeyMetadata,
file path) (*DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
_, ok := fbo.deCache[file.tailPointer().Ref()]
if !ok {
return nil, nil
}
// Get the undirtied dir block.
dblock, err := fbo.getDirLocked(
ctx, lState, kmd, *file.parentPath(), blockRead)
if err != nil {
return nil, err
}
undirtiedEntry, ok := dblock.Children[file.tailName()]
if !ok {
return nil, nil
}
return &undirtiedEntry, nil
}
func (fbo *folderBlockOps) setCachedAttr(
lState *lockState, ref BlockRef, attr attrChange, realEntry *DirEntry,
doCreate bool) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.setCachedAttrLocked(lState, ref, attr, realEntry, doCreate)
}
// UpdateCachedEntryAttributes updates any cached entry for the given
// path according to the given op. The node for the path is returned
// if there is one.
func (fbo *folderBlockOps) UpdateCachedEntryAttributes(
ctx context.Context, lState *lockState, kmd KeyMetadata,
dir path, op *setAttrOp) (Node, error) {
childPath := dir.ChildPathNoPtr(op.Name)
// find the node for the actual change; requires looking up
// the child entry to get the BlockPointer, unfortunately.
de, err := fbo.GetDirtyEntry(ctx, lState, kmd, childPath)
if err != nil {
return nil, err
}
childNode := fbo.nodeCache.Get(de.Ref())
if childNode == nil {
// Nothing to do, since the cache entry won't be
// accessible from any node.
return nil, nil
}
childPath = dir.ChildPath(op.Name, de.BlockPointer)
// If there's a cache entry, we need to update it, so try and
// fetch the undirtied entry.
cleanEntry, err := fbo.getUndirtiedEntry(ctx, lState, kmd, childPath)
if err != nil {
return nil, err
}
if cleanEntry != nil {
fbo.setCachedAttr(lState, de.Ref(), op.Attr, cleanEntry, false)
}
return childNode, nil
}
// UpdateCachedEntryAttributesOnRemovedFile updates any cached entry
// for the given path of an unlinked file, according to the given op,
// and it makes a new dirty cache entry if one doesn't exist yet. We
// assume Sync will be called eventually on the corresponding open
// file handle, which will clear out the entry.
func (fbo *folderBlockOps) UpdateCachedEntryAttributesOnRemovedFile(
ctx context.Context, lState *lockState, op *setAttrOp, de DirEntry) {
fbo.setCachedAttr(lState, de.Ref(), op.Attr, &de, true)
}
func (fbo *folderBlockOps) getDeferredWriteCountForTest(lState *lockState) int {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
writes := 0
for _, ds := range fbo.deferred {
writes += len(ds.writes)
}
return writes
}
func (fbo *folderBlockOps) updatePointer(kmd KeyMetadata, oldPtr BlockPointer, newPtr BlockPointer, shouldPrefetch bool) {
updated := fbo.nodeCache.UpdatePointer(oldPtr.Ref(), newPtr)
if !updated {
return
}
// Only prefetch if the updated pointer is a new block ID.
if oldPtr.ID != newPtr.ID {
// TODO: Remove this comment when we're done debugging because it'll be everywhere.
fbo.log.CDebugf(context.TODO(), "Updated reference for pointer %s to %s.", oldPtr.ID, newPtr.ID)
if shouldPrefetch {
// Prefetch the new ref, but only if the old ref already exists in
// the block cache. Ideally we'd always prefetch it, but we need
// the type of the block so that we can call `NewEmpty`.
// TODO KBFS-1850: Eventually we should use the codec library's
// ability to decode into a nil interface to no longer need to
// pre-initialize the correct type.
block, _, _, err := fbo.config.BlockCache().GetWithPrefetch(oldPtr)
if err != nil {
return
}
fbo.config.BlockOps().Prefetcher().PrefetchBlock(
block.NewEmpty(),
newPtr,
kmd,
updatePointerPrefetchPriority,
)
}
}
}
// UpdatePointers updates all the pointers in the node cache
// atomically. If `afterUpdateFn` is non-nil, it's called under the
// same block lock under which the pointers were updated.
func (fbo *folderBlockOps) UpdatePointers(kmd KeyMetadata, lState *lockState,
op op, shouldPrefetch bool, afterUpdateFn func() error) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
for _, update := range op.allUpdates() {
fbo.updatePointer(kmd, update.Unref, update.Ref, shouldPrefetch)
}
if afterUpdateFn == nil {
return nil
}
return afterUpdateFn()
}
func (fbo *folderBlockOps) unlinkDuringFastForwardLocked(ctx context.Context,
lState *lockState, ref BlockRef) {
fbo.blockLock.AssertLocked(lState)
oldNode := fbo.nodeCache.Get(ref)
if oldNode == nil {
return
}
oldPath := fbo.nodeCache.PathFromNode(oldNode)
fbo.log.CDebugf(ctx, "Unlinking missing node %s/%v during "+
"fast-forward", oldPath, ref)
fbo.nodeCache.Unlink(ref, oldPath)
}
func (fbo *folderBlockOps) fastForwardDirAndChildrenLocked(ctx context.Context,
lState *lockState, currDir path, children map[string]map[pathNode]bool,
kmd KeyMetadata) ([]NodeChange, error) {
fbo.blockLock.AssertLocked(lState)
dirBlock, err := fbo.getDirLocked(ctx, lState, kmd, currDir, blockRead)
if err != nil {
return nil, err
}
prefix := currDir.String()
// TODO: parallelize me?
var changes []NodeChange
for child := range children[prefix] {
entry, ok := dirBlock.Children[child.Name]
if !ok {
fbo.unlinkDuringFastForwardLocked(
ctx, lState, child.BlockPointer.Ref())
continue
}
fbo.log.CDebugf(ctx, "Fast-forwarding %v -> %v",
child.BlockPointer, entry.BlockPointer)
fbo.updatePointer(kmd, child.BlockPointer,
entry.BlockPointer, true)
node := fbo.nodeCache.Get(entry.BlockPointer.Ref())
newPath := fbo.nodeCache.PathFromNode(node)
if entry.Type == Dir {
if node != nil {
change := NodeChange{Node: node}
for subchild := range children[newPath.String()] {
change.DirUpdated = append(change.DirUpdated, subchild.Name)
}
changes = append(changes, change)
}
childChanges, err := fbo.fastForwardDirAndChildrenLocked(
ctx, lState, newPath, children, kmd)
if err != nil {
return nil, err
}
changes = append(changes, childChanges...)
} else if node != nil {
// File -- invalidate the entire file contents.
changes = append(changes, NodeChange{
Node: node,
FileUpdated: []WriteRange{{Len: 0, Off: 0}},
})
}
}
delete(children, prefix)
return changes, nil
}
// FastForwardAllNodes attempts to update the block pointers
// associated with nodes in the cache by searching for their paths in
// the current version of the TLF. If it can't find a corresponding
// node, it assumes it's been deleted and unlinks it. Returns the set
// of node changes that resulted. If there are no nodes, it returns a
// nil error because there's nothing to be done.
func (fbo *folderBlockOps) FastForwardAllNodes(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata) (
changes []NodeChange, err error) {
// Take a hard lock through this whole process. TODO: is there
// any way to relax this? It could lead to file system operation
// timeouts, even on reads, if we hold it too long.
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
nodes := fbo.nodeCache.AllNodes()
if len(nodes) == 0 {
// Nothing needs to be done!
return nil, nil
}
fbo.log.CDebugf(ctx, "Fast-forwarding %d nodes", len(nodes))
defer func() { fbo.log.CDebugf(ctx, "Fast-forward complete: %v", err) }()
// Build a "tree" representation for each interesting path prefix.
children := make(map[string]map[pathNode]bool)
var rootPath path
for _, n := range nodes {
p := fbo.nodeCache.PathFromNode(n)
if len(p.path) == 1 {
rootPath = p
}
prevPath := ""
for _, pn := range p.path {
if prevPath != "" {
childPNs := children[prevPath]
if childPNs == nil {
childPNs = make(map[pathNode]bool)
children[prevPath] = childPNs
}
childPNs[pn] = true
}
prevPath = filepath.Join(prevPath, pn.Name)
}
}
if !rootPath.isValid() {
return nil, errors.New("Couldn't find the root path")
}
fbo.log.CDebugf(ctx, "Fast-forwarding root %v -> %v",
rootPath.path[0].BlockPointer, md.data.Dir.BlockPointer)
fbo.updatePointer(md, rootPath.path[0].BlockPointer,
md.data.Dir.BlockPointer, false)
rootPath.path[0].BlockPointer = md.data.Dir.BlockPointer
rootNode := fbo.nodeCache.Get(md.data.Dir.BlockPointer.Ref())
if rootNode != nil {
change := NodeChange{Node: rootNode}
for child := range children[rootPath.String()] {
change.DirUpdated = append(change.DirUpdated, child.Name)
}
changes = append(changes, change)
}
childChanges, err := fbo.fastForwardDirAndChildrenLocked(
ctx, lState, rootPath, children, md)
if err != nil {
return nil, err
}
changes = append(changes, childChanges...)
// Unlink any children that remain.
for _, childPNs := range children {
for child := range childPNs {
fbo.unlinkDuringFastForwardLocked(
ctx, lState, child.BlockPointer.Ref())
}
}
return changes, nil
}
type chainsPathPopulator interface {
populateChainPaths(context.Context, logger.Logger, *crChains, bool) error
}
// populateChainPaths updates all the paths in all the ops tracked by
// `chains`, using the main nodeCache.
func (fbo *folderBlockOps) populateChainPaths(ctx context.Context,
log logger.Logger, chains *crChains, includeCreates bool) error {
_, err := chains.getPaths(ctx, fbo, log, fbo.nodeCache, includeCreates)
return err
}
var _ chainsPathPopulator = (*folderBlockOps)(nil)
| 1 | 16,524 | This was a bug in KBFS-2071 -- oops. | keybase-kbfs | go |
@@ -82,9 +82,18 @@ type PaymentChannel struct {
// payer and payee upon initialization or extension
AgreedEol *types.BlockHeight `json:"agreed_eol"`
+ // Conditions are the set of conditions for redeeming or closing the payment
+ // channel
+ Conditions *types.Predicate `json:"conditions"`
+
// Eol is the actual expiration for the payment channel which can differ from
// AgreedEol when the payment channel is in dispute
Eol *types.BlockHeight `json:"eol"`
+
+ // Redeemed is a flag indicating whether or not Redeem has been called on the
+ // payment channel yet. This is necessary because AmountRedeemed can still be
+ // zero in the event of a zero-value voucher
+ Redeemed bool `json:"redeemed"`
}
// Actor provides a mechanism for off chain payments. | 1 | package paymentbroker
import (
"context"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-hamt-ipld"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/go-filecoin/abi"
"github.com/filecoin-project/go-filecoin/actor"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/exec"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/vm/errors"
)
const (
// ErrNonAccountActor indicates an non-account actor attempted to create a payment channel.
ErrNonAccountActor = 33
// ErrDuplicateChannel indicates an attempt to create a payment channel with an existing id.
ErrDuplicateChannel = 34
// ErrEolTooLow indicates an attempt to lower the Eol of a payment channel.
ErrEolTooLow = 35
// ErrReclaimBeforeEol indicates an attempt to reclaim funds before the eol of the channel.
ErrReclaimBeforeEol = 36
// ErrInsufficientChannelFunds indicates an attempt to take more funds than the channel contains.
ErrInsufficientChannelFunds = 37
// ErrUnknownChannel indicates an invalid channel id.
ErrUnknownChannel = 38
// ErrWrongTarget indicates attempt to redeem from wrong target account.
ErrWrongTarget = 39
// ErrExpired indicates the block height has exceeded the eol.
ErrExpired = 40
// ErrAlreadyWithdrawn indicates amount of the voucher has already been withdrawn.
ErrAlreadyWithdrawn = 41
// ErrInvalidSignature indicates the signature is invalid.
ErrInvalidSignature = 42
//ErrTooEarly indicates that the block height is too low to satisfy a voucher
ErrTooEarly = 43
//ErrConditionInvalid indicates that the condition attached to a voucher did not execute successfully
ErrConditionInvalid = 44
)
// CancelDelayBlockTime is the number of rounds given to the target to respond after the channel
// is canceled before it expires.
// TODO: what is a secure value for this? Value is arbitrary right now.
// See https://github.com/filecoin-project/go-filecoin/issues/1887
const CancelDelayBlockTime = 10000
// Errors map error codes to revert errors this actor may return.
var Errors = map[uint8]error{
ErrTooEarly: errors.NewCodedRevertError(ErrTooEarly, "block height too low to redeem voucher"),
ErrNonAccountActor: errors.NewCodedRevertError(ErrNonAccountActor, "Only account actors may create payment channels"),
ErrDuplicateChannel: errors.NewCodedRevertError(ErrDuplicateChannel, "Duplicate create channel attempt"),
ErrEolTooLow: errors.NewCodedRevertError(ErrEolTooLow, "payment channel eol may not be decreased"),
ErrReclaimBeforeEol: errors.NewCodedRevertError(ErrReclaimBeforeEol, "payment channel may not reclaimed before eol"),
ErrInsufficientChannelFunds: errors.NewCodedRevertError(ErrInsufficientChannelFunds, "voucher amount exceeds amount in channel"),
ErrUnknownChannel: errors.NewCodedRevertError(ErrUnknownChannel, "payment channel is unknown"),
ErrWrongTarget: errors.NewCodedRevertError(ErrWrongTarget, "attempt to redeem channel from wrong target account"),
ErrExpired: errors.NewCodedRevertError(ErrExpired, "block height has exceeded channel's end of life"),
ErrAlreadyWithdrawn: errors.NewCodedRevertError(ErrAlreadyWithdrawn, "update amount has already been redeemed"),
ErrInvalidSignature: errors.NewCodedRevertErrorf(ErrInvalidSignature, "signature failed to validate"),
}
func init() {
cbor.RegisterCborType(PaymentChannel{})
}
// PaymentChannel records the intent to pay funds to a target account.
type PaymentChannel struct {
// Target is the address of the account to which funds will be transferred
Target address.Address `json:"target"`
// Amount is the total amount of FIL that has been transferred to the channel from the payer
Amount *types.AttoFIL `json:"amount"`
// AmountRedeemed is the amount of FIL already transferred to the target
AmountRedeemed *types.AttoFIL `json:"amount_redeemed"`
// AgreedEol is the expiration for the payment channel agreed upon by the
// payer and payee upon initialization or extension
AgreedEol *types.BlockHeight `json:"agreed_eol"`
// Eol is the actual expiration for the payment channel which can differ from
// AgreedEol when the payment channel is in dispute
Eol *types.BlockHeight `json:"eol"`
}
// Actor provides a mechanism for off chain payments.
// It allows the creation of payment channels that hold funds for a target account
// and permits that account to withdraw funds only with a voucher signed by the
// channel's creator.
type Actor struct{}
// InitializeState stores the actor's initial data structure.
func (pb *Actor) InitializeState(storage exec.Storage, initializerData interface{}) error {
// pb's default state is an empty lookup, so this method is a no-op
return nil
}
// Exports returns the actor's exports.
func (pb *Actor) Exports() exec.Exports {
return paymentBrokerExports
}
var _ exec.ExecutableActor = (*Actor)(nil)
var paymentBrokerExports = exec.Exports{
"cancel": &exec.FunctionSignature{
Params: []abi.Type{abi.ChannelID},
Return: nil,
},
"close": &exec.FunctionSignature{
Params: []abi.Type{abi.Address, abi.ChannelID, abi.AttoFIL, abi.BlockHeight, abi.Predicate, abi.Bytes, abi.Parameters},
Return: nil,
},
"createChannel": &exec.FunctionSignature{
Params: []abi.Type{abi.Address, abi.BlockHeight},
Return: []abi.Type{abi.ChannelID},
},
"extend": &exec.FunctionSignature{
Params: []abi.Type{abi.ChannelID, abi.BlockHeight},
Return: nil,
},
"ls": &exec.FunctionSignature{
Params: []abi.Type{abi.Address},
Return: []abi.Type{abi.Bytes},
},
"reclaim": &exec.FunctionSignature{
Params: []abi.Type{abi.ChannelID},
Return: nil,
},
"redeem": &exec.FunctionSignature{
Params: []abi.Type{abi.Address, abi.ChannelID, abi.AttoFIL, abi.BlockHeight, abi.Predicate, abi.Bytes, abi.Parameters},
Return: nil,
},
"voucher": &exec.FunctionSignature{
Params: []abi.Type{abi.ChannelID, abi.AttoFIL, abi.BlockHeight, abi.Predicate},
Return: []abi.Type{abi.Bytes},
},
}
// CreateChannel creates a new payment channel from the caller to the target.
// The value attached to the invocation is used as the deposit, and the channel
// will expire and return all of its money to the owner after the given block height.
func (pb *Actor) CreateChannel(vmctx exec.VMContext, target address.Address, eol *types.BlockHeight) (*types.ChannelID, uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
// require that from account be an account actor to ensure nonce is a valid id
if !vmctx.IsFromAccountActor() {
return nil, errors.CodeError(Errors[ErrNonAccountActor]), Errors[ErrNonAccountActor]
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
channelID := types.NewChannelID(uint64(vmctx.Message().Nonce))
err := withPayerChannels(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
// check to see if payment channel is duplicate
_, err := byChannelID.Find(ctx, channelID.KeyString())
if err != hamt.ErrNotFound { // we expect to not find the payment channel
if err == nil {
return Errors[ErrDuplicateChannel]
}
return errors.FaultErrorWrapf(err, "Error retrieving payment channel")
}
// add payment channel and commit
err = byChannelID.Set(ctx, channelID.KeyString(), &PaymentChannel{
Target: target,
Amount: vmctx.Message().Value,
AmountRedeemed: types.NewAttoFILFromFIL(0),
AgreedEol: eol,
Eol: eol,
})
if err != nil {
return errors.FaultErrorWrap(err, "Could not set payment channel")
}
return nil
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return nil, 1, errors.FaultErrorWrap(err, "Error creating payment channel")
}
return nil, errors.CodeError(err), err
}
return channelID, 0, nil
}
// Redeem is called by the target account to withdraw funds with authorization from the payer.
// This method is exactly like Close except it doesn't close the channel.
// This is useful when you want to checkpoint the value in a payment, but continue to use the
// channel afterwards. The amt represents the total funds authorized so far, so that subsequent
// calls to Update will only transfer the difference between the given amt and the greatest
// amt taken so far. A series of channel transactions might look like this:
// Payer: 2000, Target: 0, Channel: 0
// payer createChannel(1000) -> Payer: 1000, Target: 0, Channel: 1000
// target Redeem(100) -> Payer: 1000, Target: 100, Channel: 900
// target Redeem(200) -> Payer: 1000, Target: 200, Channel: 800
// target Close(500) -> Payer: 1500, Target: 500, Channel: 0
//
// If a condition is provided in the voucher:
// - The parameters provided in the condition will be combined with redeemerConditionParams
// - A message will be sent to the the condition.To address using the condition.Method with the combined params
// - If the message returns an error the condition is considered to be false and the redeem will fail
func (pb *Actor) Redeem(vmctx exec.VMContext, payer address.Address, chid *types.ChannelID, amt *types.AttoFIL,
validAt *types.BlockHeight, condition *types.Predicate, sig []byte, redeemerConditionParams []interface{}) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
if !VerifyVoucherSignature(payer, chid, amt, validAt, condition, sig) {
return errors.CodeError(Errors[ErrInvalidSignature]), Errors[ErrInvalidSignature]
}
if errCode, err := checkCondition(vmctx, condition, redeemerConditionParams); err != nil {
return errCode, err
}
ctx := context.Background()
storage := vmctx.Storage()
err := withPayerChannels(ctx, storage, payer, func(byChannelID exec.Lookup) error {
var channel *PaymentChannel
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// validate the amount can be sent to the target and send payment to that address.
err = updateChannel(vmctx, vmctx.Message().From, channel, amt, validAt)
if err != nil {
return err
}
// Reset the EOL to the originally agreed upon EOL in the event that the
// channel has been cancelled.
channel.Eol = channel.AgreedEol
return byChannelID.Set(ctx, chid.KeyString(), channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error redeeming payment channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Close first executes the logic performed in the the Update method, then returns all
// funds remaining in the channel to the payer account and deletes the channel.
//
// If a condition is provided in the voucher:
// - The parameters provided in the condition will be combined with redeemerConditionParams
// - A message will be sent to the the condition.To address using the condition.Method with the combined params
// - If the message returns an error the condition is considered to be false and the redeem will fail
func (pb *Actor) Close(vmctx exec.VMContext, payer address.Address, chid *types.ChannelID, amt *types.AttoFIL,
validAt *types.BlockHeight, condition *types.Predicate, sig []byte, redeemerConditionParams []interface{}) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
if !VerifyVoucherSignature(payer, chid, amt, validAt, condition, sig) {
return errors.CodeError(Errors[ErrInvalidSignature]), Errors[ErrInvalidSignature]
}
if errCode, err := checkCondition(vmctx, condition, redeemerConditionParams); err != nil {
return errCode, err
}
ctx := context.Background()
storage := vmctx.Storage()
err := withPayerChannels(ctx, storage, payer, func(byChannelID exec.Lookup) error {
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// validate the amount can be sent to the target and send payment to that address.
err = updateChannel(vmctx, vmctx.Message().From, channel, amt, validAt)
if err != nil {
return err
}
err = byChannelID.Set(ctx, chid.KeyString(), channel)
if err != nil {
return err
}
// return funds to payer
return reclaim(ctx, vmctx, byChannelID, payer, chid, channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error updating or reclaiming channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Extend can be used by the owner of a channel to add more funds to it and
// extend the Channel's lifespan.
func (pb *Actor) Extend(vmctx exec.VMContext, chid *types.ChannelID, eol *types.BlockHeight) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
err := withPayerChannels(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// eol can only be increased
if channel.Eol.GreaterThan(eol) {
return Errors[ErrEolTooLow]
}
// set new eol
channel.AgreedEol = eol
channel.Eol = eol
// increment the value
channel.Amount = channel.Amount.Add(vmctx.Message().Value)
return byChannelID.Set(ctx, chid.KeyString(), channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error extending channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Cancel can be used to end an off chain payment early. It lowers the EOL of
// the payment channel to 1 blocktime from now and allows a caller to reclaim
// their payments. In the time before the channel is closed, a target can
// potentially dispute a closer.
func (pb *Actor) Cancel(vmctx exec.VMContext, chid *types.ChannelID) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
err := withPayerChannels(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
eol := vmctx.BlockHeight().Add(types.NewBlockHeight(CancelDelayBlockTime))
// eol can only be decreased
if channel.Eol.GreaterThan(eol) {
channel.Eol = eol
}
return byChannelID.Set(ctx, chid.KeyString(), channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error cancelling channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Reclaim is used by the owner of a channel to reclaim unspent funds in timed
// out payment Channels they own.
func (pb *Actor) Reclaim(vmctx exec.VMContext, chid *types.ChannelID) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
err := withPayerChannels(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// reclaim may only be called at or after Eol
if vmctx.BlockHeight().LessThan(channel.Eol) {
return Errors[ErrReclaimBeforeEol]
}
// return funds to payer
return reclaim(ctx, vmctx, byChannelID, payerAddress, chid, channel)
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return 1, errors.FaultErrorWrap(err, "Error reclaiming channel")
}
return errors.CodeError(err), err
}
return 0, nil
}
// Voucher takes a channel id and amount creates a new unsigned PaymentVoucher
// against the given channel. It also takes a block height parameter "validAt"
// enforcing that the voucher is not reclaimed until the given block height
// Voucher errors if the channel doesn't exist or contains less than request
// amount.
// If a condition is provided, attempts to redeem or close with the voucher will
// first send a message based on the condition and require a successful response
// for funds to be transferred.
func (pb *Actor) Voucher(vmctx exec.VMContext, chid *types.ChannelID, amount *types.AttoFIL, validAt *types.BlockHeight, condition *types.Predicate) ([]byte, uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return []byte{}, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
payerAddress := vmctx.Message().From
var voucher types.PaymentVoucher
err := withPayerChannelsForReading(ctx, storage, payerAddress, func(byChannelID exec.Lookup) error {
var channel *PaymentChannel
chInt, err := byChannelID.Find(ctx, chid.KeyString())
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownChannel]
}
return errors.FaultErrorWrapf(err, "Could not retrieve payment channel with ID: %s", chid)
}
channel, ok := chInt.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channels lookup")
}
// voucher must be for less than total amount in channel
if channel.Amount.LessThan(amount) {
return Errors[ErrInsufficientChannelFunds]
}
// set voucher
voucher = types.PaymentVoucher{
Channel: *chid,
Payer: vmctx.Message().From,
Target: channel.Target,
Amount: *amount,
ValidAt: *validAt,
Condition: condition,
}
return nil
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return nil, 1, errors.FaultErrorWrap(err, "Error reclaiming channel")
}
return nil, errors.CodeError(err), err
}
voucherBytes, err := actor.MarshalStorage(voucher)
if err != nil {
return nil, 1, errors.FaultErrorWrap(err, "Error marshalling voucher")
}
return voucherBytes, 0, nil
}
// Ls returns all payment channels for a given payer address.
// The slice of channels will be returned as cbor encoded map from string channelId to PaymentChannel.
func (pb *Actor) Ls(vmctx exec.VMContext, payer address.Address) ([]byte, uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return []byte{}, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
ctx := context.Background()
storage := vmctx.Storage()
channels := map[string]*PaymentChannel{}
err := withPayerChannelsForReading(ctx, storage, payer, func(byChannelID exec.Lookup) error {
kvs, err := byChannelID.Values(ctx)
if err != nil {
return err
}
for _, kv := range kvs {
pc, ok := kv.Value.(*PaymentChannel)
if !ok {
return errors.NewFaultError("Expected PaymentChannel from channel lookup")
}
channels[kv.Key] = pc
}
return nil
})
if err != nil {
// ensure error is properly wrapped
if !errors.IsFault(err) && !errors.ShouldRevert(err) {
return nil, 1, errors.FaultErrorWrap(err, "Error reclaiming channel")
}
return nil, errors.CodeError(err), err
}
channelsBytes, err := actor.MarshalStorage(channels)
if err != nil {
return nil, 1, errors.FaultErrorWrap(err, "Error marshalling voucher")
}
return channelsBytes, 0, nil
}
func updateChannel(ctx exec.VMContext, target address.Address, channel *PaymentChannel, amt *types.AttoFIL, validAt *types.BlockHeight) error {
if target != channel.Target {
return Errors[ErrWrongTarget]
}
if ctx.BlockHeight().LessThan(validAt) {
return Errors[ErrTooEarly]
}
if ctx.BlockHeight().GreaterEqual(channel.Eol) {
return Errors[ErrExpired]
}
if amt.GreaterThan(channel.Amount) {
return Errors[ErrInsufficientChannelFunds]
}
if amt.LessEqual(channel.AmountRedeemed) {
return Errors[ErrAlreadyWithdrawn]
}
// transfer funds to sender
updateAmount := amt.Sub(channel.AmountRedeemed)
_, _, err := ctx.Send(ctx.Message().From, "", updateAmount, nil)
if err != nil {
return err
}
// update amount redeemed from this channel
channel.AmountRedeemed = amt
return nil
}
func reclaim(ctx context.Context, vmctx exec.VMContext, byChannelID exec.Lookup, payer address.Address, chid *types.ChannelID, channel *PaymentChannel) error {
amt := channel.Amount.Sub(channel.AmountRedeemed)
if amt.LessEqual(types.ZeroAttoFIL) {
return nil
}
// clean up
err := byChannelID.Delete(ctx, chid.KeyString())
if err != nil {
return err
}
// send funds
_, _, err = vmctx.Send(payer, "", amt, nil)
if err != nil {
return errors.RevertErrorWrap(err, "could not send update funds")
}
return nil
}
// Separator is the separator used when concatenating channel and amount in a
// voucher signature.
const separator = 0x0
// SignVoucher creates the signature for the given combination of
// channel, amount, validAt (earliest block height for redeem) and from address.
// It does so by signing the following bytes: (channelID | 0x0 | amount | 0x0 | validAt)
func SignVoucher(channelID *types.ChannelID, amount *types.AttoFIL, validAt *types.BlockHeight, addr address.Address, condition *types.Predicate, signer types.Signer) (types.Signature, error) {
data, err := createVoucherSignatureData(channelID, amount, validAt, condition)
if err != nil {
return nil, err
}
return signer.SignBytes(data, addr)
}
// VerifyVoucherSignature returns whether the voucher's signature is valid
func VerifyVoucherSignature(payer address.Address, chid *types.ChannelID, amt *types.AttoFIL, validAt *types.BlockHeight, condition *types.Predicate, sig []byte) bool {
data, err := createVoucherSignatureData(chid, amt, validAt, condition)
// the only error is failure to encode the values
if err != nil {
return false
}
return types.IsValidSignature(data, payer, sig)
}
func createVoucherSignatureData(channelID *types.ChannelID, amount *types.AttoFIL, validAt *types.BlockHeight, condition *types.Predicate) ([]byte, error) {
data := append(channelID.Bytes(), separator)
data = append(data, amount.Bytes()...)
data = append(data, separator)
if condition != nil {
data = append(data, condition.To.Bytes()...)
data = append(data, []byte(condition.Method)...)
encodedParams, err := abi.ToEncodedValues(condition.Params...)
if err != nil {
return []byte{}, err
}
data = append(data, encodedParams...)
}
return append(data, validAt.Bytes()...), nil
}
func withPayerChannels(ctx context.Context, storage exec.Storage, payer address.Address, f func(exec.Lookup) error) error {
stateCid, err := actor.WithLookup(ctx, storage, storage.Head(), func(byPayer exec.Lookup) error {
byChannelLookup, err := findByChannelLookup(ctx, storage, byPayer, payer)
if err != nil {
return err
}
// run inner function
err = f(byChannelLookup)
if err != nil {
return err
}
// commit channel lookup
commitedCID, err := byChannelLookup.Commit(ctx)
if err != nil {
return err
}
// if all payers channels are gone, delete the payer
if byChannelLookup.IsEmpty() {
return byPayer.Delete(ctx, payer.String())
}
// set payers channels into primary lookup
return byPayer.Set(ctx, payer.String(), commitedCID)
})
if err != nil {
return err
}
return storage.Commit(stateCid, storage.Head())
}
func withPayerChannelsForReading(ctx context.Context, storage exec.Storage, payer address.Address, f func(exec.Lookup) error) error {
return actor.WithLookupForReading(ctx, storage, storage.Head(), func(byPayer exec.Lookup) error {
byChannelLookup, err := findByChannelLookup(ctx, storage, byPayer, payer)
if err != nil {
return err
}
// run inner function
return f(byChannelLookup)
})
}
func findByChannelLookup(ctx context.Context, storage exec.Storage, byPayer exec.Lookup, payer address.Address) (exec.Lookup, error) {
byChannelID, err := byPayer.Find(ctx, payer.String())
if err != nil {
if err == hamt.ErrNotFound {
return actor.LoadLookup(ctx, storage, cid.Undef)
}
return nil, err
}
byChannelCID, ok := byChannelID.(cid.Cid)
if !ok {
return nil, errors.NewFaultError("Paymentbroker payer is not a Cid")
}
return actor.LoadTypedLookup(ctx, storage, byChannelCID, &PaymentChannel{})
}
// checkCondition combines params in the condition with the redeemerSuppliedParams, sends a message
// to the actor and method specified in the condition, and returns an error if one exists.
func checkCondition(vmctx exec.VMContext, condition *types.Predicate, redeemerSuppliedParams []interface{}) (uint8, error) {
if condition == nil {
return 0, nil
}
params := append(condition.Params[:0:0], condition.Params...)
params = append(params, redeemerSuppliedParams...)
_, _, err := vmctx.Send(condition.To, condition.Method, types.NewZeroAttoFIL(), params)
if err != nil {
if errors.IsFault(err) {
return errors.CodeError(err), err
}
return ErrConditionInvalid, errors.RevertErrorWrap(err, "failed to validate voucher condition")
}
return 0, nil
}
| 1 | 18,892 | // Condition is a condition for ... Condition | filecoin-project-venus | go |
@@ -207,6 +207,16 @@ describe "National Capital Region proposals" do
expect(current_path).to eq("/proposals/#{work_order.proposal.id}")
end
+ it "does not resave unchanged requests" do
+ visit "/ncr/work_orders/#{work_order.id}/edit"
+ click_on 'Submit for approval'
+ visit "/ncr/work_orders/#{work_order.id}/edit"
+ click_on 'Submit for approval'
+
+ expect(current_path).to eq("/proposals/#{work_order.proposal.id}")
+ expect(page).to have_content("No changes were made to the request")
+ end
+
it "has a disabled field if first approval is done" do
visit "/ncr/work_orders/#{work_order.id}/edit"
expect(find("[name=approver_email]")["disabled"]).to be_nil | 1 | describe "National Capital Region proposals" do
it "requires sign-in" do
visit '/ncr/work_orders/new'
expect(current_path).to eq('/')
expect(page).to have_content("You need to sign in")
end
context "when signed in as the requester" do
let(:requester) { FactoryGirl.create(:user) }
before do
login_as(requester)
end
it "saves a Proposal with the attributes" do
expect(Dispatcher).to receive(:deliver_new_proposal_emails)
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "buying stuff"
fill_in 'Description', with: "desc content"
choose 'BA80'
fill_in 'RWA Number', with: 'F1234567'
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 123.45
check "I am going to be using direct pay for this transaction"
fill_in "Approving Official's Email Address", with: '[email protected]'
select Ncr::BUILDING_NUMBERS[0], :from => 'ncr_work_order_building_number'
select Ncr::Organization.all[0], :from => 'ncr_work_order_org_code'
expect {
click_on 'Submit for approval'
}.to change { Proposal.count }.from(0).to(1)
proposal = Proposal.last
expect(proposal.public_id).to have_content("FY")
expect(page).to have_content("Proposal submitted")
expect(current_path).to eq("/proposals/#{proposal.id}")
expect(proposal.name).to eq("buying stuff")
expect(proposal.flow).to eq('linear')
work_order = proposal.client_data
expect(work_order.client).to eq('ncr')
expect(work_order.expense_type).to eq('BA80')
expect(work_order.vendor).to eq('ACME')
expect(work_order.amount).to eq(123.45)
expect(work_order.direct_pay).to eq(true)
expect(work_order.building_number).to eq(Ncr::BUILDING_NUMBERS[0])
expect(work_order.org_code).to eq(Ncr::Organization.all[0].to_s)
expect(work_order.description).to eq('desc content')
expect(proposal.requester).to eq(requester)
expect(proposal.approvers.map(&:email_address)).to eq(%w(
[email protected]
[email protected]
))
end
it "defaults to the approver from the last request" do
proposal = FactoryGirl.create(:proposal, :with_approvers,
requester: requester)
visit '/ncr/work_orders/new'
expect(find_field("Approving Official's Email Address").value).to eq(
proposal.approvers.first.email_address)
end
it "requires a project_title" do
visit '/ncr/work_orders/new'
expect {
click_on 'Submit for approval'
}.to_not change { Proposal.count }
expect(page).to have_content("Project title can't be blank")
end
with_feature 'HIDE_BA61_OPTION' do
it "removes the radio button" do
visit '/ncr/work_orders/new'
expect(page).to_not have_content('BA61')
end
it "defaults to BA80" do
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "buying stuff"
fill_in 'Description', with: "desc content"
# no need to select BA80
fill_in 'RWA Number', with: 'F1234567'
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 123.45
fill_in "Approving Official's Email Address", with: '[email protected]'
select Ncr::BUILDING_NUMBERS[0], :from => 'ncr_work_order_building_number'
select Ncr::Organization.all[0], :from => 'ncr_work_order_org_code'
expect {
click_on 'Submit for approval'
}.to change { Proposal.count }.from(0).to(1)
proposal = Proposal.last
expect(proposal.client_data.expense_type).to eq('BA80')
end
end
it "doesn't save when the amount is too high" do
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "buying stuff"
choose 'BA80'
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 10_000
expect {
click_on 'Submit for approval'
}.to_not change { Proposal.count }
expect(current_path).to eq('/ncr/work_orders')
expect(page).to have_content("Amount must be less than or equal to $3,000")
# keeps the form values
expect(find_field('Amount').value).to eq('10000')
end
it "includes has overwritten field names" do
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "buying stuff"
choose 'BA80'
fill_in 'RWA Number', with: 'B9876543'
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 123.45
fill_in "Approving Official's Email Address", with: '[email protected]'
select Ncr::BUILDING_NUMBERS[0], :from => 'ncr_work_order_building_number'
select Ncr::Organization.all[0], :from => 'ncr_work_order_org_code'
click_on 'Submit for approval'
expect(current_path).to eq("/proposals/#{Proposal.last.id}")
expect(page).to have_content("RWA Number")
end
it "hides fields based on expense", js: true do
visit '/ncr/work_orders/new'
expect(page).to have_no_field("RWA Number")
expect(page).to have_no_field("Work Order")
expect(page).to have_no_field("emergency")
choose 'BA61'
expect(page).to have_no_field("RWA Number")
expect(page).to have_no_field("Work Order")
expect(page).to have_field("emergency")
expect(find_field("emergency", visible: false)).to be_visible
choose 'BA80'
expect(page).to have_field("RWA Number")
expect(page).to have_field("Work Order")
expect(page).to have_no_field("emergency")
expect(find_field("RWA Number")).to be_visible
end
it "allows attachments to be added during intake without JS" do
visit '/ncr/work_orders/new'
expect(page).to have_content("Attachments")
expect(page).not_to have_selector(".js-am-minus")
expect(page).not_to have_selector(".js-am-plus")
expect(page).to have_selector("input[type=file]", count: 10)
end
it "allows attachments to be added during intake with JS", :js => true do
visit '/ncr/work_orders/new'
expect(page).to have_content("Attachments")
first_minus = find(".js-am-minus")
first_plus = find(".js-am-plus")
expect(first_minus).to be_visible
expect(first_plus).to be_visible
expect(first_minus).to be_disabled
expect(find("input[type=file]")[:name]).to eq("attachments[]")
first_plus.click # Adds one row
expect(page).to have_selector(".js-am-minus", count: 2)
expect(page).to have_selector(".js-am-plus", count: 2)
expect(page).to have_selector("input[type=file]", count: 2)
end
let (:work_order) {
wo = FactoryGirl.create(:ncr_work_order, requester: requester)
wo.add_approvals('[email protected]')
wo
}
let(:ncr_proposal) {
work_order.proposal
}
it "can be edited if pending" do
visit "/ncr/work_orders/#{work_order.id}/edit"
expect(find_field("ncr_work_order_building_number").value).to eq(
Ncr::BUILDING_NUMBERS[0])
fill_in 'Vendor', with: 'New ACME'
click_on 'Submit for approval'
expect(current_path).to eq("/proposals/#{ncr_proposal.id}")
expect(page).to have_content("New ACME")
expect(page).to have_content("modified")
# Verify it is actually saved
work_order.reload
expect(work_order.vendor).to eq("New ACME")
end
it "creates a special comment when editing" do
visit "/ncr/work_orders/#{work_order.id}/edit"
fill_in 'Vendor', with: "New Test Vendor"
fill_in 'Description', with: "New Description"
click_on 'Submit for approval'
expect(page).to have_content("Request modified by")
expect(page).to have_content("Description was changed to New Description")
expect(page).to have_content("Vendor was changed to New Test Vendor")
end
it "has 'Discard Changes' link" do
visit "/ncr/work_orders/#{work_order.id}/edit"
expect(page).to have_content("Discard Changes")
click_on "Discard Changes"
expect(current_path).to eq("/proposals/#{work_order.proposal.id}")
end
it "has a disabled field if first approval is done" do
visit "/ncr/work_orders/#{work_order.id}/edit"
expect(find("[name=approver_email]")["disabled"]).to be_nil
work_order.approvals.first.approve!
visit "/ncr/work_orders/#{work_order.id}/edit"
expect(find("[name=approver_email]")["disabled"]).to eq("disabled")
# And we can still submit
fill_in 'Vendor', with: 'New ACME'
click_on 'Submit for approval'
expect(current_path).to eq("/proposals/#{ncr_proposal.id}")
# Verify it is actually saved
work_order.reload
expect(work_order.vendor).to eq("New ACME")
end
it "can be edited if rejected" do
ncr_proposal.update_attributes(status: 'rejected') # avoid workflow
visit "/ncr/work_orders/#{work_order.id}/edit"
expect(current_path).to eq("/ncr/work_orders/#{work_order.id}/edit")
end
it "can be edited if approved" do
ncr_proposal.update_attributes(status: 'approved') # avoid workflow
visit "/ncr/work_orders/#{work_order.id}/edit"
expect(current_path).to eq("/ncr/work_orders/#{work_order.id}/edit")
end
it "cannot be edited by someone other than the requester" do
ncr_proposal.set_requester(FactoryGirl.create(:user))
visit "/ncr/work_orders/#{work_order.id}/edit"
expect(current_path).to eq("/ncr/work_orders/new")
expect(page).to have_content('not the requester')
end
it "shows a edit link from a pending cart" do
visit "/proposals/#{ncr_proposal.id}"
expect(page).to have_content('Modify Request')
click_on('Modify Request')
expect(current_path).to eq("/ncr/work_orders/#{work_order.id}/edit")
end
it "shows a edit link from a rejected cart" do
ncr_proposal.update_attribute(:status, 'rejected') # avoid state machine
visit "/proposals/#{ncr_proposal.id}"
expect(page).to have_content('Modify Request')
end
it "shows a edit link for an approved cart" do
ncr_proposal.update_attribute(:status, 'approved') # avoid state machine
visit "/proposals/#{ncr_proposal.id}"
expect(page).to have_content('Modify Request')
end
it "does not show a edit link for another client" do
ncr_proposal.client_data = nil
ncr_proposal.save()
visit "/proposals/#{ncr_proposal.id}"
expect(page).not_to have_content('Modify Request')
end
it "does not show a edit link for non requester" do
ncr_proposal.set_requester(FactoryGirl.create(:user))
visit "/proposals/#{ncr_proposal.id}"
expect(page).not_to have_content('Modify Request')
end
context "selected common values on proposal page" do
before do
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "buying stuff"
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 123.45
fill_in "Approving Official's Email Address", with: '[email protected]'
select Ncr::BUILDING_NUMBERS[0], :from => 'ncr_work_order_building_number'
select Ncr::Organization.all[0], :from => 'ncr_work_order_org_code'
end
it "approves emergencies" do
choose 'BA61'
check "This request was an emergency and I received a verbal Notice to Proceed (NTP)"
expect {
click_on 'Submit for approval'
}.to change { Proposal.count }.from(0).to(1)
proposal = Proposal.last
expect(page).to have_content("Proposal submitted")
expect(current_path).to eq("/proposals/#{proposal.id}")
expect(page).to have_content("0 of 0 approved")
expect(proposal.client_data.emergency).to eq(true)
expect(proposal.approved?).to eq(true)
end
it "does not set emergencies if form type changes" do
choose 'BA61'
check "This request was an emergency and I received a verbal Notice to Proceed (NTP)"
choose 'BA80'
fill_in 'RWA Number', with: 'R9876543'
expect {
click_on 'Submit for approval'
}.to change { Proposal.count }.from(0).to(1)
proposal = Proposal.last
expect(page).to have_content("Proposal submitted")
expect(current_path).to eq("/proposals/#{proposal.id}")
expect(proposal.client_data.emergency).to eq(false)
expect(proposal.approved?).to eq(false)
end
end
end
end
| 1 | 13,319 | Might be good to test that no emails were sent out, either. You should be able to use `deliveries` for this | 18F-C2 | rb |
@@ -80,7 +80,7 @@ public class Connection implements Closeable {
serialized.put("sessionId", sessionId);
}
- LOG.info(JSON.toJson(serialized.build()));
+ LOG.finest(JSON.toJson(serialized.build()));
socket.sendText(JSON.toJson(serialized.build()));
if (!command.getSendsResponse() ) { | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.devtools;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.openqa.selenium.json.Json.MAP_TYPE;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Multimap;
import org.openqa.selenium.devtools.target.model.SessionID;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.json.JsonInput;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.WebSocket;
import java.io.Closeable;
import java.io.StringReader;
import java.time.Duration;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import java.util.logging.Logger;
public class Connection implements Closeable {
private static final Logger LOG = Logger.getLogger(Connection.class.getName());
private static final Json JSON = new Json();
private static final AtomicLong NEXT_ID = new AtomicLong(1L);
private final WebSocket socket;
private final Map<Long, Consumer<JsonInput>> methodCallbacks = new LinkedHashMap<>();
private final Multimap<Event<?>, Consumer<?>> eventCallbacks = HashMultimap.create();
public Connection(HttpClient client, String url) {
Objects.requireNonNull(client, "HTTP client must be set.");
Objects.requireNonNull(url, "URL to connect to must be set.");
socket = client.openSocket(new HttpRequest(GET, url), new Listener());
}
public <X> CompletableFuture<X> send(SessionID sessionId, Command<X> command) {
long id = NEXT_ID.getAndIncrement();
CompletableFuture<X> result = new CompletableFuture<>();
if (command.getSendsResponse()) {
methodCallbacks.put(id, input -> {
X value = command.getMapper().apply(input);
result.complete(value);
});
}
ImmutableMap.Builder<String, Object> serialized = ImmutableMap.builder();
serialized.put("id", id);
serialized.put("method", command.getMethod());
serialized.put("params", command.getParams());
if (sessionId != null) {
serialized.put("sessionId", sessionId);
}
LOG.info(JSON.toJson(serialized.build()));
socket.sendText(JSON.toJson(serialized.build()));
if (!command.getSendsResponse() ) {
result.complete(null);
}
return result;
}
public <X> X sendAndWait(SessionID sessionId, Command<X> command, Duration timeout) {
try {
return send(sessionId, command).get(timeout.toMillis(), MILLISECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IllegalStateException("Thread has been interrupted", e);
} catch (ExecutionException e) {
Throwable cause = e;
if (e.getCause() != null) {
cause = e.getCause();
}
throw new DevToolsException(cause);
} catch (TimeoutException e) {
throw new org.openqa.selenium.TimeoutException(e);
}
}
public <X> void addListener(Event<X> event, Consumer<X> handler) {
Objects.requireNonNull(event);
Objects.requireNonNull(handler);
synchronized (eventCallbacks) {
eventCallbacks.put(event, handler);
}
}
public void clearListeners() {
synchronized (eventCallbacks) {
eventCallbacks.clear();
}
}
@Override
public void close() {
socket.close();
}
private class Listener extends WebSocket.Listener {
@Override
public void onText(CharSequence data) {
// It's kind of gross to decode the data twice, but this lets us get started on something
// that feels nice to users.
// TODO: decode once, and once only
String asString = String.valueOf(data);
LOG.info(asString);
Map<String, Object> raw = JSON.toType(asString, MAP_TYPE);
if (raw.get("id") instanceof Number && raw.get("result") != null) {
Consumer<JsonInput> consumer = methodCallbacks.remove(((Number) raw.get("id")).longValue());
if (consumer == null) {
return;
}
try (StringReader reader = new StringReader(asString);
JsonInput input = JSON.newInput(reader)) {
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "result":
consumer.accept(input);
break;
default:
input.skipValue();
}
}
input.endObject();
}
} else if (raw.get("method") instanceof String && raw.get("params") instanceof Map) {
LOG.fine("Seen: " + raw);
synchronized (eventCallbacks) {
// TODO: Also only decode once.
eventCallbacks.keySet().stream()
.filter(event -> raw.get("method").equals(event.getMethod()))
.forEach(event -> {
// TODO: This is grossly inefficient. I apologise, and we should fix this.
try (StringReader reader = new StringReader(asString);
JsonInput input = JSON.newInput(reader)) {
Object value = null;
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "params":
value = event.getMapper().apply(input);
break;
default:
input.skipValue();
break;
}
}
input.endObject();
if (value == null) {
// Do nothing.
return;
}
final Object finalValue = value;
for (Consumer<?> action : eventCallbacks.get(event)) {
@SuppressWarnings("unchecked") Consumer<Object> obj = (Consumer<Object>) action;
obj.accept(finalValue);
}
}
});
}
} else {
LOG.warning("Unhandled type: " + data);
}
}
}
}
| 1 | 17,119 | Right now this is experimental and deeply flaky. We left this at `info` to make debugging user reports a lot easier. | SeleniumHQ-selenium | java |
@@ -17,6 +17,11 @@ namespace UpdateVendors
//------------------------------------------------------------------------------
";
+ private static readonly List<string> FilesToSkip = new List<string>()
+ {
+ @"\Serilog\Events\LogEventLevel.cs",
+ };
+
private static readonly string DownloadDirectory = Path.Combine(Environment.CurrentDirectory, "downloads");
private static string _vendorProjectDirectory;
| 1 | using System;
using System.Collections.Generic;
using System.IO;
using System.IO.Compression;
using System.Linq;
using System.Net;
using System.Text;
using System.Text.RegularExpressions;
namespace UpdateVendors
{
public class Program
{
private const string AutoGeneratedMessage = @"//------------------------------------------------------------------------------
// <auto-generated />
// This file was automatically generated by the UpdateVendors tool.
//------------------------------------------------------------------------------
";
private static readonly string DownloadDirectory = Path.Combine(Environment.CurrentDirectory, "downloads");
private static string _vendorProjectDirectory;
public static void Main()
{
InitializeCleanDirectory(DownloadDirectory);
var solutionDirectory = GetSolutionDirectory();
_vendorProjectDirectory = Path.Combine(solutionDirectory, "src", "Datadog.Trace", "Vendors");
InitializeCleanDirectory(_vendorProjectDirectory);
UpdateVendor(
libraryName: "Serilog",
branchDownload: "https://github.com/serilog/serilog/archive/v2.8.0.zip",
pathToSrc: new[] { "serilog-2.8.0", "src", "Serilog" },
transform: filePath => RewriteCsFileWithStandardTransform(filePath, originalNamespace: "Serilog"));
UpdateVendor(
libraryName: "Serilog.Sinks.File",
branchDownload: "https://github.com/serilog/serilog-sinks-file/archive/v4.0.0.zip",
pathToSrc: new[] { "serilog-sinks-file-4.0.0", "src", "Serilog.Sinks.File" },
transform: filePath => RewriteCsFileWithStandardTransform(filePath, originalNamespace: "Serilog"));
UpdateVendor(
libraryName: "StatsdClient",
branchDownload: "https://github.com/DataDog/dogstatsd-csharp-client/archive/3.3.0.zip",
pathToSrc: new[] { "dogstatsd-csharp-client-3.3.0", "src", "StatsdClient" },
transform: filePath => RewriteCsFileWithStandardTransform(filePath, originalNamespace: "StatsdClient"));
}
private static void RewriteCsFileWithStandardTransform(string filePath, string originalNamespace, Func<string, string, string> extraTransform = null)
{
if (string.Equals(Path.GetExtension(filePath), ".cs", StringComparison.OrdinalIgnoreCase))
{
RewriteFileWithTransform(
filePath,
content =>
{
// Disable analyzer
var builder = new StringBuilder(AutoGeneratedMessage, content.Length * 2);
builder.Append(content);
// Prevent namespace conflicts
builder.Replace($"using {originalNamespace}", $"using Datadog.Trace.Vendors.{originalNamespace}");
builder.Replace($"namespace {originalNamespace}", $"namespace Datadog.Trace.Vendors.{originalNamespace}");
// Don't expose anything we don't intend to
// by replacing all "public" access modifiers with "internal"
return Regex.Replace(
builder.ToString(),
@"public(\s+((abstract|sealed|static)\s+)?(partial\s+)?(class|struct|interface|enum|delegate))",
match => $"internal{match.Groups[1]}");
});
}
}
private static void UpdateVendor(
string libraryName,
string branchDownload,
string[] pathToSrc,
Action<string> transform = null)
{
Console.WriteLine($"Starting {libraryName} upgrade.");
var zipLocation = Path.Combine(DownloadDirectory, $"{libraryName}.zip");
var extractLocation = Path.Combine(DownloadDirectory, $"{libraryName}");
using (var repoDownloadClient = new WebClient())
{
repoDownloadClient.DownloadFile(branchDownload, zipLocation);
}
Console.WriteLine($"Downloaded {libraryName} upgrade.");
ZipFile.ExtractToDirectory(zipLocation, extractLocation);
Console.WriteLine($"Unzipped {libraryName} upgrade.");
var sourceLocation = Path.Combine(pathToSrc.Prepend(extractLocation).ToArray());
var projFile = Path.Combine(sourceLocation, $"{libraryName}.csproj");
// Rename the proj file to a txt for reference
File.Copy(projFile, projFile + ".txt");
File.Delete(projFile);
Console.WriteLine($"Renamed {libraryName} project file.");
// Delete the assembly properties
var assemblyPropertiesFolder = Path.Combine(sourceLocation, @"Properties");
SafeDeleteDirectory(assemblyPropertiesFolder);
Console.WriteLine($"Deleted {libraryName} assembly properties file.");
if (transform != null)
{
Console.WriteLine($"Running transforms on files for {libraryName}.");
var files = Directory.GetFiles(
sourceLocation,
"*.*",
SearchOption.AllDirectories);
foreach (var file in files)
{
if (ShouldDropFile(file))
{
File.Delete(file);
}
else
{
transform(file);
}
}
Console.WriteLine($"Finished transforms on files for {libraryName}.");
}
// Move it all to the vendors directory
Console.WriteLine($"Copying source of {libraryName} to vendor project.");
var vendorFinalPath = Path.Combine(_vendorProjectDirectory, libraryName);
SafeDeleteDirectory(vendorFinalPath);
Directory.Move(sourceLocation, vendorFinalPath);
Console.WriteLine($"Finished {libraryName} upgrade.");
}
private static bool ShouldDropFile(string filePath)
{
var drops = new List<string>()
{
"StatsdClient\\DogStatsdService.cs",
"StatsdClient\\MetricsTimer.cs",
"StatsdClient\\IDogStatsD.cs",
"StatsdClient\\DogStatsD.cs",
};
foreach (var drop in drops)
{
if (filePath.Contains(drop, StringComparison.OrdinalIgnoreCase))
{
return true;
}
}
return false;
}
private static void RewriteFileWithTransform(string filePath, Func<string, string> transform)
{
var fileContent = File.ReadAllText(filePath);
fileContent = transform(fileContent);
File.WriteAllText(
filePath,
fileContent,
new UTF8Encoding(encoderShouldEmitUTF8Identifier: false));
}
private static void InitializeCleanDirectory(string directoryPath)
{
SafeDeleteDirectory(directoryPath);
Directory.CreateDirectory(directoryPath);
}
private static void SafeDeleteDirectory(string directoryPath)
{
if (Directory.Exists(directoryPath))
{
Directory.Delete(directoryPath, recursive: true);
}
}
private static string GetSolutionDirectory()
{
var startDirectory = Environment.CurrentDirectory;
var currentDirectory = Directory.GetParent(startDirectory);
const string searchItem = @"Datadog.Trace.sln";
while (true)
{
var slnFile = currentDirectory.GetFiles(searchItem).SingleOrDefault();
if (slnFile != null)
{
break;
}
currentDirectory = currentDirectory.Parent;
if (currentDirectory == null || !currentDirectory.Exists)
{
throw new Exception($"Unable to find solution directory from: {startDirectory}");
}
}
return currentDirectory.FullName;
}
}
}
| 1 | 16,507 | Is this new list being consumed yet? | DataDog-dd-trace-dotnet | .cs |
@@ -18,18 +18,14 @@ import (
"bytes"
"encoding/json"
"io"
- weakrand "math/rand"
"net"
"net/http"
"strconv"
- "time"
"github.com/caddyserver/caddy/v2"
)
func init() {
- weakrand.Seed(time.Now().UnixNano())
-
caddy.RegisterModule(tlsPlaceholderWrapper{})
}
| 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"bytes"
"encoding/json"
"io"
weakrand "math/rand"
"net"
"net/http"
"strconv"
"time"
"github.com/caddyserver/caddy/v2"
)
func init() {
weakrand.Seed(time.Now().UnixNano())
caddy.RegisterModule(tlsPlaceholderWrapper{})
}
// RequestMatcher is a type that can match to a request.
// A route matcher MUST NOT modify the request, with the
// only exception being its context.
type RequestMatcher interface {
Match(*http.Request) bool
}
// Handler is like http.Handler except ServeHTTP may return an error.
//
// If any handler encounters an error, it should be returned for proper
// handling. Return values should be propagated down the middleware chain
// by returning it unchanged. Returned errors should not be re-wrapped
// if they are already HandlerError values.
type Handler interface {
ServeHTTP(http.ResponseWriter, *http.Request) error
}
// HandlerFunc is a convenience type like http.HandlerFunc.
type HandlerFunc func(http.ResponseWriter, *http.Request) error
// ServeHTTP implements the Handler interface.
func (f HandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
return f(w, r)
}
// Middleware chains one Handler to the next by being passed
// the next Handler in the chain.
type Middleware func(Handler) Handler
// MiddlewareHandler is like Handler except it takes as a third
// argument the next handler in the chain. The next handler will
// never be nil, but may be a no-op handler if this is the last
// handler in the chain. Handlers which act as middleware should
// call the next handler's ServeHTTP method so as to propagate
// the request down the chain properly. Handlers which act as
// responders (content origins) need not invoke the next handler,
// since the last handler in the chain should be the first to
// write the response.
type MiddlewareHandler interface {
ServeHTTP(http.ResponseWriter, *http.Request, Handler) error
}
// emptyHandler is used as a no-op handler.
var emptyHandler Handler = HandlerFunc(func(http.ResponseWriter, *http.Request) error { return nil })
// An implicit suffix middleware that, if reached, sets the StatusCode to the
// error stored in the ErrorCtxKey. This is to prevent situations where the
// Error chain does not actually handle the error (for instance, it matches only
// on some errors). See #3053
var errorEmptyHandler Handler = HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
httpError := r.Context().Value(ErrorCtxKey)
if handlerError, ok := httpError.(HandlerError); ok {
w.WriteHeader(handlerError.StatusCode)
} else {
w.WriteHeader(http.StatusInternalServerError)
}
return nil
})
// ResponseHandler pairs a response matcher with custom handling
// logic. Either the status code can be changed to something else
// while using the original response body, or, if a status code
// is not set, it can execute a custom route list; this is useful
// for executing handler routes based on the properties of an HTTP
// response that has not been written out to the client yet.
//
// To use this type, provision it at module load time, then when
// ready to use, match the response against its matcher; if it
// matches (or doesn't have a matcher), change the status code on
// the response if configured; otherwise invoke the routes by
// calling `rh.Routes.Compile(next).ServeHTTP(rw, req)` (or similar).
type ResponseHandler struct {
// The response matcher for this handler. If empty/nil,
// it always matches.
Match *ResponseMatcher `json:"match,omitempty"`
// To write the original response body but with a different
// status code, set this field to the desired status code.
// If set, this takes priority over routes.
StatusCode WeakString `json:"status_code,omitempty"`
// The list of HTTP routes to execute if no status code is
// specified. If evaluated, the original response body
// will not be written.
Routes RouteList `json:"routes,omitempty"`
}
// Provision sets up the routse in rh.
func (rh *ResponseHandler) Provision(ctx caddy.Context) error {
if rh.Routes != nil {
err := rh.Routes.Provision(ctx)
if err != nil {
return err
}
}
return nil
}
// WeakString is a type that unmarshals any JSON value
// as a string literal, with the following exceptions:
//
// 1. actual string values are decoded as strings; and
// 2. null is decoded as empty string;
//
// and provides methods for getting the value as various
// primitive types. However, using this type removes any
// type safety as far as deserializing JSON is concerned.
type WeakString string
// UnmarshalJSON satisfies json.Unmarshaler according to
// this type's documentation.
func (ws *WeakString) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
return io.EOF
}
if b[0] == byte('"') && b[len(b)-1] == byte('"') {
var s string
err := json.Unmarshal(b, &s)
if err != nil {
return err
}
*ws = WeakString(s)
return nil
}
if bytes.Equal(b, []byte("null")) {
return nil
}
*ws = WeakString(b)
return nil
}
// MarshalJSON marshals was a boolean if true or false,
// a number if an integer, or a string otherwise.
func (ws WeakString) MarshalJSON() ([]byte, error) {
if ws == "true" {
return []byte("true"), nil
}
if ws == "false" {
return []byte("false"), nil
}
if num, err := strconv.Atoi(string(ws)); err == nil {
return json.Marshal(num)
}
return json.Marshal(string(ws))
}
// Int returns ws as an integer. If ws is not an
// integer, 0 is returned.
func (ws WeakString) Int() int {
num, _ := strconv.Atoi(string(ws))
return num
}
// Float64 returns ws as a float64. If ws is not a
// float value, the zero value is returned.
func (ws WeakString) Float64() float64 {
num, _ := strconv.ParseFloat(string(ws), 64)
return num
}
// Bool returns ws as a boolean. If ws is not a
// boolean, false is returned.
func (ws WeakString) Bool() bool {
return string(ws) == "true"
}
// String returns ws as a string.
func (ws WeakString) String() string {
return string(ws)
}
// StatusCodeMatches returns true if a real HTTP status code matches
// the configured status code, which may be either a real HTTP status
// code or an integer representing a class of codes (e.g. 4 for all
// 4xx statuses).
func StatusCodeMatches(actual, configured int) bool {
if actual == configured {
return true
}
if configured < 100 &&
actual >= configured*100 &&
actual < (configured+1)*100 {
return true
}
return false
}
// tlsPlaceholderWrapper is a no-op listener wrapper that marks
// where the TLS listener should be in a chain of listener wrappers.
// It should only be used if another listener wrapper must be placed
// in front of the TLS handshake.
type tlsPlaceholderWrapper struct{}
func (tlsPlaceholderWrapper) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.listeners.tls",
New: func() caddy.Module { return new(tlsPlaceholderWrapper) },
}
}
func (tlsPlaceholderWrapper) WrapListener(ln net.Listener) net.Listener { return ln }
const (
// DefaultHTTPPort is the default port for HTTP.
DefaultHTTPPort = 80
// DefaultHTTPSPort is the default port for HTTPS.
DefaultHTTPSPort = 443
)
// Interface guard
var _ caddy.ListenerWrapper = (*tlsPlaceholderWrapper)(nil)
| 1 | 15,725 | moved to `errors.go` which is the only places `math/rand` is used in this package | caddyserver-caddy | go |
@@ -3,8 +3,8 @@
namespace Shopsys\FrameworkBundle\Component\Error;
use AppKernel;
-use Shopsys\Environment;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
+use Shopsys\FrameworkBundle\Component\Environment\EnvironmentType;
use Shopsys\FrameworkBundle\Component\Router\DomainRouterFactory;
use Symfony\Component\Filesystem\Filesystem;
use Symfony\Component\HttpFoundation\Request; | 1 | <?php
namespace Shopsys\FrameworkBundle\Component\Error;
use AppKernel;
use Shopsys\Environment;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Component\Router\DomainRouterFactory;
use Symfony\Component\Filesystem\Filesystem;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\Routing\Generator\UrlGeneratorInterface;
class ErrorPagesFacade
{
const PAGE_STATUS_CODE_404 = Response::HTTP_NOT_FOUND;
const PAGE_STATUS_CODE_500 = Response::HTTP_INTERNAL_SERVER_ERROR;
/**
* @var string
*/
private $errorPagesDir;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\Domain
*/
private $domain;
/**
* @var \Shopsys\FrameworkBundle\Component\Router\DomainRouterFactory
*/
private $domainRouterFactory;
/**
* @var \Symfony\Component\Filesystem\Filesystem
*/
private $filesystem;
/**
* @param string $errorPagesDir
* @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain
* @param \Shopsys\FrameworkBundle\Component\Router\DomainRouterFactory $domainRouterFactory
* @param \Symfony\Component\Filesystem\Filesystem $filesystem
*/
public function __construct(
$errorPagesDir,
Domain $domain,
DomainRouterFactory $domainRouterFactory,
Filesystem $filesystem
) {
$this->errorPagesDir = $errorPagesDir;
$this->domain = $domain;
$this->domainRouterFactory = $domainRouterFactory;
$this->filesystem = $filesystem;
}
public function generateAllErrorPagesForProduction()
{
foreach ($this->domain->getAll() as $domainConfig) {
$this->generateAndSaveErrorPage($domainConfig->getId(), self::PAGE_STATUS_CODE_404);
$this->generateAndSaveErrorPage($domainConfig->getId(), self::PAGE_STATUS_CODE_500);
}
}
/**
* @param int $domainId
* @param int $statusCode
* @return string
*/
public function getErrorPageContentByDomainIdAndStatusCode($domainId, $statusCode)
{
$errorPageContent = file_get_contents($this->getErrorPageFilename($domainId, $statusCode));
if ($errorPageContent === false) {
throw new \Shopsys\FrameworkBundle\Component\Error\Exception\ErrorPageNotFoundException($domainId, $statusCode);
}
return $errorPageContent;
}
/**
* @param int $statusCode
* @return int
*/
public function getErrorPageStatusCodeByStatusCode($statusCode)
{
if ($statusCode === Response::HTTP_NOT_FOUND || $statusCode === Response::HTTP_FORBIDDEN) {
return self::PAGE_STATUS_CODE_404;
} else {
return self::PAGE_STATUS_CODE_500;
}
}
/**
* @param int $domainId
* @param int $statusCode
*/
private function generateAndSaveErrorPage($domainId, $statusCode)
{
$domainRouter = $this->domainRouterFactory->getRouter($domainId);
$errorPageUrl = $domainRouter->generate(
'front_error_page_format',
[
'_format' => 'html',
'code' => $statusCode,
],
UrlGeneratorInterface::ABSOLUTE_URL
);
$errorPageContent = $this->getUrlContent($errorPageUrl, $statusCode);
$this->filesystem->dumpFile(
$this->getErrorPageFilename($domainId, $statusCode),
$errorPageContent
);
}
/**
* @param int $domainId
* @param int $statusCode
* @return string
*/
private function getErrorPageFilename($domainId, $statusCode)
{
return $this->errorPagesDir . $statusCode . '_ ' . $domainId . '.html';
}
/**
* @param string $errorPageUrl
* @param int $expectedStatusCode
* @return string
*/
private function getUrlContent($errorPageUrl, $expectedStatusCode)
{
$errorPageKernel = new AppKernel(Environment::ENVIRONMENT_PRODUCTION, false);
$errorPageFakeRequest = Request::create($errorPageUrl);
$errorPageResponse = $errorPageKernel->handle($errorPageFakeRequest);
$errorPageKernel->terminate($errorPageFakeRequest, $errorPageResponse);
if ($expectedStatusCode !== $errorPageResponse->getStatusCode()) {
throw new \Shopsys\FrameworkBundle\Component\Error\Exception\BadErrorPageStatusCodeException(
$errorPageUrl,
$expectedStatusCode,
$errorPageResponse->getStatusCode()
);
}
return $errorPageResponse->getContent();
}
}
| 1 | 9,620 | This use is wrong, corrent is: `Shopsys\FrameworkBundle\Component\Environment` It is the same in classes below | shopsys-shopsys | php |
@@ -203,6 +203,9 @@ public class RegistrationRequest {
if (pendingConfiguration.port != null) {
pendingRequest.configuration.port = pendingConfiguration.port;
}
+ if (pendingConfiguration.remoteHost != null) {
+ pendingRequest.configuration.remoteHost = pendingConfiguration.remoteHost;
+ }
// make sure we have a valid host
pendingRequest.configuration.fixUpHost(); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.grid.common;
import org.openqa.grid.common.exception.GridConfigurationException;
import org.openqa.grid.internal.utils.configuration.GridNodeConfiguration;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.json.JsonException;
import java.util.Map;
import java.util.TreeMap;
/**
* Helper to register to the grid. Using JSON to exchange the object between the node and the hub.
*/
public class RegistrationRequest {
// some special param for capability
public static final String MAX_INSTANCES = "maxInstances";
// see enum SeleniumProtocol
public static final String SELENIUM_PROTOCOL = "seleniumProtocol";
public static final String PATH = "path";
private String name;
private String description;
private GridNodeConfiguration configuration;
/**
* Create a new registration request using the default values of a
* {@link GridNodeConfiguration}
*/
public RegistrationRequest() {
this(new GridNodeConfiguration());
}
/**
* Create a new registration request using the supplied {@link GridNodeConfiguration}
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
*/
public RegistrationRequest(GridNodeConfiguration configuration) {
this(configuration, null, null);
}
/**
* Create a new registration request using the supplied {@link GridNodeConfiguration}, and name
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
* @param name the name for the remote
*/
public RegistrationRequest(GridNodeConfiguration configuration, String name) {
this(configuration, name, null);
}
/**
* Create a new registration request using the supplied {@link GridNodeConfiguration}, name, and
* description
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
* @param name the name for the remote
* @param description the description for the remote host
*/
public RegistrationRequest(GridNodeConfiguration configuration, String name, String description) {
this.configuration = (configuration == null) ? new GridNodeConfiguration() : configuration;
this.name = name;
this.description = description;
// make sure we have something that looks like a valid host
this.configuration.fixUpHost();
// make sure the capabilities are updated with required fields
this.configuration.fixUpCapabilities();
}
public String getName() {
return name;
}
public String getDescription() {
return description;
}
public GridNodeConfiguration getConfiguration() {
return configuration;
}
public Map<String, Object> toJson() {
Map<String, Object> json = new TreeMap<>();
json.put("class", getClass());
json.put("name", getName());
json.put("description", getDescription());
json.put("configuration", getConfiguration());
return json;
}
/**
* Create an object from a registration request formatted as a json string.
*/
public static RegistrationRequest fromJson(Map<String, Object> raw) throws JsonException {
// If we could, we'd just get Json to coerce this for us, but that would lead to endless
// recursion as the first thing it would do would be to call this very method. *sigh*
Json json = new Json();
RegistrationRequest request = new RegistrationRequest();
if (raw.get("name") instanceof String) {
request.name = (String) raw.get("name");
}
if (raw.get("description") instanceof String) {
request.description = (String) raw.get("description");
}
if (raw.get("configuration") instanceof Map) {
// This is nasty. Look away now!
String converted = json.toJson(raw.get("configuration"));
request.configuration = GridConfiguredJson.toType(converted, GridNodeConfiguration.class);
}
return request;
}
/**
* Build a RegistrationRequest.
*/
public static RegistrationRequest build() {
return RegistrationRequest.build(new GridNodeConfiguration(), null, null);
}
/**
* Build a RegistrationRequest from the provided {@link GridNodeConfiguration}. This is different
* than {@code new RegistrationRequest(GridNodeConfiguration)} because it will first load any
* specified {@link GridNodeConfiguration#nodeConfigFile} and then merge the provided
* configuration onto it.
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
*/
public static RegistrationRequest build(GridNodeConfiguration configuration) {
return RegistrationRequest.build(configuration, null, null);
}
/**
* Build a RegistrationRequest from the provided {@link GridNodeConfiguration}, use the provided
* name. This is different than {@code new RegistrationRequest(GridNodeConfiguration, String)}
* because it will first load any specified {@link GridNodeConfiguration#nodeConfigFile} and then
* merge the provided configuration onto it.
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
* @param name the name for the remote
*/
public static RegistrationRequest build(GridNodeConfiguration configuration, String name) {
return RegistrationRequest.build(configuration, name, null);
}
/**
* Build a RegistrationRequest from the provided {@link GridNodeConfiguration}, use the provided
* name and description. This is different than {@code new RegistrationRequest(GridNodeConfiguration,
* String, String)} because it will first load any specified {@link
* GridNodeConfiguration#nodeConfigFile} and then merge the provided configuration onto it.
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
* @param name the name for the remote
* @param description the description for the remote host
*/
public static RegistrationRequest build(GridNodeConfiguration configuration, String name, String description) {
RegistrationRequest pendingRequest = new RegistrationRequest(configuration, name, description);
GridNodeConfiguration pendingConfiguration = pendingRequest.configuration;
if (pendingConfiguration.nodeConfigFile != null) {
pendingRequest.configuration = GridNodeConfiguration.loadFromJSON(pendingConfiguration.nodeConfigFile);
}
pendingRequest.configuration.merge(pendingConfiguration);
//update important merge protected values for the pendingRequest we are building.
if (pendingConfiguration.host != null) {
pendingRequest.configuration.host = pendingConfiguration.host;
}
if (pendingConfiguration.port != null) {
pendingRequest.configuration.port = pendingConfiguration.port;
}
// make sure we have a valid host
pendingRequest.configuration.fixUpHost();
// make sure the capabilities are updated with required fields
pendingRequest.configuration.fixUpCapabilities();
pendingRequest.configuration.dropCapabilitiesThatDoesNotMatchCurrentPlatform();
return pendingRequest;
}
/**
* Validate the current setting and throw a config exception is an invalid setup is detected.
*
* @throws GridConfigurationException grid configuration
*/
public void validate() throws GridConfigurationException {
// validations occur here in the getters called on the configuration.
try {
configuration.getHubHost();
configuration.getHubPort();
} catch (RuntimeException e) {
throw new GridConfigurationException(e.getMessage());
}
}
}
| 1 | 15,979 | This is not really needed, the line added in `GridNodeConfiguration.java` is what really fixes the `remoteHost` regression. | SeleniumHQ-selenium | rb |
@@ -323,11 +323,6 @@ func updateContainerMetadata(metadata *dockerapi.DockerContainerMetadata, contai
container.SetLabels(metadata.Labels)
}
- // Update Volume
- if metadata.Volumes != nil {
- task.UpdateMountPoints(container, metadata.Volumes)
- }
-
// Set Exitcode if it's not set
if metadata.ExitCode != nil {
container.SetKnownExitCode(metadata.ExitCode) | 1 | // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package engine contains the core logic for managing tasks
package engine
import (
"regexp"
"strconv"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/containermetadata"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/emptyvolume"
"github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/statechange"
"github.com/aws/amazon-ecs-agent/agent/statemanager"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
"github.com/aws/amazon-ecs-agent/agent/utils"
utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"context"
"github.com/cihub/seelog"
"github.com/pkg/errors"
)
const (
//DockerEndpointEnvVariable is the environment variable that can override the Docker endpoint
DockerEndpointEnvVariable = "DOCKER_HOST"
// DockerDefaultEndpoint is the default value for the Docker endpoint
DockerDefaultEndpoint = "unix:///var/run/docker.sock"
capabilityPrefix = "com.amazonaws.ecs.capability."
capabilityTaskIAMRole = "task-iam-role"
capabilityTaskIAMRoleNetHost = "task-iam-role-network-host"
capabilityTaskCPUMemLimit = "task-cpu-mem-limit"
attributePrefix = "ecs.capability."
labelPrefix = "com.amazonaws.ecs."
labelTaskARN = labelPrefix + "task-arn"
labelContainerName = labelPrefix + "container-name"
labelTaskDefinitionFamily = labelPrefix + "task-definition-family"
labelTaskDefinitionVersion = labelPrefix + "task-definition-version"
labelCluster = labelPrefix + "cluster"
cniSetupTimeout = 1 * time.Minute
cniCleanupTimeout = 30 * time.Second
)
// DockerTaskEngine is a state machine for managing a task and its containers
// in ECS.
//
// DockerTaskEngine implements an abstraction over the DockerGoClient so that
// it does not have to know about tasks, only containers
// The DockerTaskEngine interacts with Docker to implement a TaskEngine
type DockerTaskEngine struct {
// implements TaskEngine
cfg *config.Config
ctx context.Context
initialized bool
mustInitLock sync.Mutex
// state stores all tasks this task engine is aware of, including their
// current state and mappings to/from dockerId and name.
// This is used to checkpoint state to disk so tasks may survive agent
// failures or updates
state dockerstate.TaskEngineState
managedTasks map[string]*managedTask
taskStopGroup *utilsync.SequentialWaitGroup
events <-chan dockerapi.DockerContainerChangeEvent
stateChangeEvents chan statechange.Event
saver statemanager.Saver
client dockerapi.DockerClient
cniClient ecscni.CNIClient
containerChangeEventStream *eventstream.EventStream
stopEngine context.CancelFunc
// tasksLock is a mutex that the task engine must acquire before changing
// any task's state which it manages. Since this is a lock that encompasses
// all tasks, it must not acquire it for any significant duration
// The write mutex should be taken when adding and removing tasks from managedTasks.
tasksLock sync.RWMutex
enableConcurrentPull bool
credentialsManager credentials.Manager
_time ttime.Time
_timeOnce sync.Once
imageManager ImageManager
containerStatusToTransitionFunction map[apicontainer.ContainerStatus]transitionApplyFunc
metadataManager containermetadata.Manager
// taskSteadyStatePollInterval is the duration that a managed task waits
// once the task gets into steady state before polling the state of all of
// the task's containers to re-evaluate if the task is still in steady state
// This is set to defaultTaskSteadyStatePollInterval in production code.
// This can be used by tests that are looking to ensure that the steady state
// verification logic gets executed to set it to a low interval
taskSteadyStatePollInterval time.Duration
resourceFields *taskresource.ResourceFields
}
// NewDockerTaskEngine returns a created, but uninitialized, DockerTaskEngine.
// The distinction between created and initialized is that when created it may
// be serialized/deserialized, but it will not communicate with docker until it
// is also initialized.
func NewDockerTaskEngine(cfg *config.Config,
client dockerapi.DockerClient,
credentialsManager credentials.Manager,
containerChangeEventStream *eventstream.EventStream,
imageManager ImageManager,
state dockerstate.TaskEngineState,
metadataManager containermetadata.Manager,
resourceFields *taskresource.ResourceFields) *DockerTaskEngine {
dockerTaskEngine := &DockerTaskEngine{
cfg: cfg,
client: client,
saver: statemanager.NewNoopStateManager(),
state: state,
managedTasks: make(map[string]*managedTask),
taskStopGroup: utilsync.NewSequentialWaitGroup(),
stateChangeEvents: make(chan statechange.Event),
enableConcurrentPull: false,
credentialsManager: credentialsManager,
containerChangeEventStream: containerChangeEventStream,
imageManager: imageManager,
cniClient: ecscni.NewClient(&ecscni.Config{
PluginsPath: cfg.CNIPluginsPath,
MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion,
}),
metadataManager: metadataManager,
taskSteadyStatePollInterval: defaultTaskSteadyStatePollInterval,
resourceFields: resourceFields,
}
dockerTaskEngine.initializeContainerStatusToTransitionFunction()
return dockerTaskEngine
}
func (engine *DockerTaskEngine) initializeContainerStatusToTransitionFunction() {
containerStatusToTransitionFunction := map[apicontainer.ContainerStatus]transitionApplyFunc{
apicontainer.ContainerPulled: engine.pullContainer,
apicontainer.ContainerCreated: engine.createContainer,
apicontainer.ContainerRunning: engine.startContainer,
apicontainer.ContainerResourcesProvisioned: engine.provisionContainerResources,
apicontainer.ContainerStopped: engine.stopContainer,
}
engine.containerStatusToTransitionFunction = containerStatusToTransitionFunction
}
// ImagePullDeleteLock ensures that pulls and deletes do not run at the same time and pulls can be run at the same time for docker >= 1.11.1
// Pulls are serialized as a temporary workaround for a devicemapper issue. (see https://github.com/docker/docker/issues/9718)
// Deletes must not run at the same time as pulls to prevent deletion of images that are being used to launch new tasks.
var ImagePullDeleteLock sync.RWMutex
// UnmarshalJSON restores a previously marshaled task-engine state from json
func (engine *DockerTaskEngine) UnmarshalJSON(data []byte) error {
return engine.state.UnmarshalJSON(data)
}
// MarshalJSON marshals into state directly
func (engine *DockerTaskEngine) MarshalJSON() ([]byte, error) {
return engine.state.MarshalJSON()
}
// Init initializes a DockerTaskEngine such that it may communicate with docker
// and operate normally.
// This function must be called before any other function, except serializing and deserializing, can succeed without error.
func (engine *DockerTaskEngine) Init(ctx context.Context) error {
// TODO, pass in a a context from main from background so that other things can stop us, not just the tests
derivedCtx, cancel := context.WithCancel(ctx)
engine.stopEngine = cancel
engine.ctx = derivedCtx
// Determine whether the engine can perform concurrent "docker pull" based on docker version
engine.enableConcurrentPull = engine.isParallelPullCompatible()
// Open the event stream before we sync state so that e.g. if a container
// goes from running to stopped after we sync with it as "running" we still
// have the "went to stopped" event pending so we can be up to date.
err := engine.openEventstream(derivedCtx)
if err != nil {
return err
}
engine.synchronizeState()
// Now catch up and start processing new events per normal
go engine.handleDockerEvents(derivedCtx)
engine.initialized = true
return nil
}
// MustInit blocks and retries until an engine can be initialized.
func (engine *DockerTaskEngine) MustInit(ctx context.Context) {
if engine.initialized {
return
}
engine.mustInitLock.Lock()
defer engine.mustInitLock.Unlock()
errorOnce := sync.Once{}
taskEngineConnectBackoff := utils.NewSimpleBackoff(200*time.Millisecond, 2*time.Second, 0.20, 1.5)
utils.RetryWithBackoff(taskEngineConnectBackoff, func() error {
if engine.initialized {
return nil
}
err := engine.Init(ctx)
if err != nil {
errorOnce.Do(func() {
seelog.Errorf("Task engine: could not connect to docker daemon: %v", err)
})
}
return err
})
}
// SetSaver sets the saver that is used by the DockerTaskEngine
func (engine *DockerTaskEngine) SetSaver(saver statemanager.Saver) {
engine.saver = saver
}
// Shutdown makes a best-effort attempt to cleanup after the task engine.
// This should not be relied on for anything more complicated than testing.
func (engine *DockerTaskEngine) Shutdown() {
engine.stopEngine()
engine.Disable()
}
// Disable prevents this engine from managing any additional tasks.
func (engine *DockerTaskEngine) Disable() {
engine.tasksLock.Lock()
}
// isTaskManaged checks if task for the corresponding arn is present
func (engine *DockerTaskEngine) isTaskManaged(arn string) bool {
engine.tasksLock.RLock()
defer engine.tasksLock.RUnlock()
_, ok := engine.managedTasks[arn]
return ok
}
// synchronizeState explicitly goes through each docker container stored in
// "state" and updates its KnownStatus appropriately, as well as queueing up
// events to push upstream.
func (engine *DockerTaskEngine) synchronizeState() {
engine.tasksLock.Lock()
defer engine.tasksLock.Unlock()
imageStates := engine.state.AllImageStates()
if len(imageStates) != 0 {
engine.imageManager.AddAllImageStates(imageStates)
}
tasks := engine.state.AllTasks()
var tasksToStart []*apitask.Task
for _, task := range tasks {
task.InitializeResources(engine.resourceFields)
conts, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
// task hasn't started processing, no need to check container status
tasksToStart = append(tasksToStart, task)
continue
}
for _, cont := range conts {
engine.synchronizeContainerStatus(cont, task)
}
tasksToStart = append(tasksToStart, task)
// Put tasks that are stopped by acs but hasn't been stopped in wait group
if task.GetDesiredStatus().Terminal() && task.GetStopSequenceNumber() != 0 {
engine.taskStopGroup.Add(task.GetStopSequenceNumber(), 1)
}
}
for _, task := range tasksToStart {
engine.startTask(task)
}
engine.saver.Save()
}
// updateContainerMetadata sets the container metadata from the docker inspect
func updateContainerMetadata(metadata *dockerapi.DockerContainerMetadata, container *apicontainer.Container, task *apitask.Task) {
container.SetCreatedAt(metadata.CreatedAt)
container.SetStartedAt(metadata.StartedAt)
container.SetFinishedAt(metadata.FinishedAt)
// Set the labels if it's not set
if len(metadata.Labels) != 0 && len(container.GetLabels()) == 0 {
container.SetLabels(metadata.Labels)
}
// Update Volume
if metadata.Volumes != nil {
task.UpdateMountPoints(container, metadata.Volumes)
}
// Set Exitcode if it's not set
if metadata.ExitCode != nil {
container.SetKnownExitCode(metadata.ExitCode)
}
// Set port mappings
if len(metadata.PortBindings) != 0 && len(container.GetKnownPortBindings()) == 0 {
container.SetKnownPortBindings(metadata.PortBindings)
}
// update the container health information
if container.HealthStatusShouldBeReported() {
container.SetHealthStatus(metadata.Health)
}
}
// synchronizeContainerStatus checks and updates the container status with docker
func (engine *DockerTaskEngine) synchronizeContainerStatus(container *apicontainer.DockerContainer, task *apitask.Task) {
if container.DockerID == "" {
seelog.Debugf("Task engine [%s]: found container potentially created while we were down: %s",
task.Arn, container.DockerName)
// Figure out the dockerid
describedContainer, err := engine.client.InspectContainer(engine.ctx,
container.DockerName, dockerclient.InspectContainerTimeout)
if err != nil {
seelog.Warnf("Task engine [%s]: could not find matching container for expected name [%s]: %v",
task.Arn, container.DockerName, err)
} else {
// update the container metadata in case the container was created during agent restart
metadata := dockerapi.MetadataFromContainer(describedContainer)
updateContainerMetadata(&metadata, container.Container, task)
container.DockerID = describedContainer.ID
container.Container.SetKnownStatus(dockerapi.DockerStateToState(describedContainer.State))
// update mappings that need dockerid
engine.state.AddContainer(container, task)
engine.imageManager.RecordContainerReference(container.Container)
}
return
}
currentState, metadata := engine.client.DescribeContainer(engine.ctx, container.DockerID)
if metadata.Error != nil {
currentState = apicontainer.ContainerStopped
// If this is a Docker API error
if metadata.Error.ErrorName() == dockerapi.CannotDescribeContainerErrorName {
seelog.Warnf("Task engine [%s]: could not describe previously known container [id=%s; name=%s]; assuming dead: %v",
task.Arn, container.DockerID, container.DockerName, metadata.Error)
if !container.Container.KnownTerminal() {
container.Container.ApplyingError = apierrors.NewNamedError(&ContainerVanishedError{})
engine.imageManager.RemoveContainerReferenceFromImageState(container.Container)
}
} else {
// If this is a container state error
updateContainerMetadata(&metadata, container.Container, task)
container.Container.ApplyingError = apierrors.NewNamedError(metadata.Error)
}
} else {
// update the container metadata in case the container status/metadata changed during agent restart
updateContainerMetadata(&metadata, container.Container, task)
engine.imageManager.RecordContainerReference(container.Container)
if engine.cfg.ContainerMetadataEnabled && !container.Container.IsMetadataFileUpdated() {
go engine.updateMetadataFile(task, container)
}
}
if currentState > container.Container.GetKnownStatus() {
// update the container known status
container.Container.SetKnownStatus(currentState)
}
// Update task ExecutionStoppedAt timestamp
task.RecordExecutionStoppedAt(container.Container)
}
// checkTaskState inspects the state of all containers within a task and writes
// their state to the managed task's container channel.
func (engine *DockerTaskEngine) checkTaskState(task *apitask.Task) {
taskContainers, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
seelog.Warnf("Task engine [%s]: could not check task state; no task in state", task.Arn)
return
}
for _, container := range task.Containers {
dockerContainer, ok := taskContainers[container.Name]
if !ok {
continue
}
status, metadata := engine.client.DescribeContainer(engine.ctx, dockerContainer.DockerID)
engine.tasksLock.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if ok {
managedTask.emitDockerContainerChange(dockerContainerChange{
container: container,
event: dockerapi.DockerContainerChangeEvent{
Status: status,
DockerContainerMetadata: metadata,
},
})
}
}
}
// sweepTask deletes all the containers associated with a task
func (engine *DockerTaskEngine) sweepTask(task *apitask.Task) {
for _, cont := range task.Containers {
err := engine.removeContainer(task, cont)
if err != nil {
seelog.Debugf("Task engine [%s]: unable to remove old container [%s]: %v",
task.Arn, cont.Name, err)
}
// Internal container(created by ecs-agent) state isn't recorded
if cont.IsInternal() {
continue
}
err = engine.imageManager.RemoveContainerReferenceFromImageState(cont)
if err != nil {
seelog.Errorf("Task engine [%s]: Unable to remove container [%s] reference from image state: %v",
task.Arn, cont.Name, err)
}
}
// Clean metadata directory for task
if engine.cfg.ContainerMetadataEnabled {
err := engine.metadataManager.Clean(task.Arn)
if err != nil {
seelog.Warnf("Task engine [%s]: clean task metadata failed: %v", task.Arn, err)
}
}
engine.saver.Save()
}
func (engine *DockerTaskEngine) deleteTask(task *apitask.Task) {
for _, resource := range task.GetResources() {
err := resource.Cleanup()
if err != nil {
seelog.Warnf("Task engine [%s]: unable to cleanup resource %s: %v",
task.Arn, resource.GetName(), err)
} else {
seelog.Debugf("Task engine [%s]: resource %s cleanup complete", task.Arn,
resource.GetName())
}
}
// Now remove ourselves from the global state and cleanup channels
engine.tasksLock.Lock()
engine.state.RemoveTask(task)
eni := task.GetTaskENI()
if eni == nil {
seelog.Debugf("Task engine [%s]: no eni associated with task", task.Arn)
} else {
seelog.Debugf("Task engine [%s]: removing the eni from agent state", task.Arn)
engine.state.RemoveENIAttachment(eni.MacAddress)
}
seelog.Debugf("Task engine [%s]: finished removing task data, removing task from managed tasks", task.Arn)
delete(engine.managedTasks, task.Arn)
engine.tasksLock.Unlock()
engine.saver.Save()
}
func (engine *DockerTaskEngine) emitTaskEvent(task *apitask.Task, reason string) {
event, err := api.NewTaskStateChangeEvent(task, reason)
if err != nil {
seelog.Debugf("Task engine [%s]: unable to create task state change event: %v", task.Arn, err)
return
}
seelog.Infof("Task engine [%s]: Task engine: sending change event [%s]", task.Arn, event.String())
engine.stateChangeEvents <- event
}
// startTask creates a managedTask construct to track the task and then begins
// pushing it towards its desired state when allowed startTask is protected by
// the tasksLock lock of 'AddTask'. It should not be called from anywhere
// else and should exit quickly to allow AddTask to do more work.
func (engine *DockerTaskEngine) startTask(task *apitask.Task) {
// Create a channel that may be used to communicate with this task, survey
// what tasks need to be waited for for this one to start, and then spin off
// a goroutine to oversee this task
thisTask := engine.newManagedTask(task)
thisTask._time = engine.time()
go thisTask.overseeTask()
}
func (engine *DockerTaskEngine) time() ttime.Time {
engine._timeOnce.Do(func() {
if engine._time == nil {
engine._time = &ttime.DefaultTime{}
}
})
return engine._time
}
// openEventstream opens, but does not consume, the docker event stream
func (engine *DockerTaskEngine) openEventstream(ctx context.Context) error {
events, err := engine.client.ContainerEvents(ctx)
if err != nil {
return err
}
engine.events = events
return nil
}
// handleDockerEvents must be called after openEventstream; it processes each
// event that it reads from the docker eventstream
func (engine *DockerTaskEngine) handleDockerEvents(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case event := <-engine.events:
engine.handleDockerEvent(event)
}
}
}
// handleDockerEvent is the entrypoint for task modifications originating with
// events occurring through Docker, outside the task engine itself.
// handleDockerEvent is responsible for taking an event that correlates to a
// container and placing it in the context of the task to which that container
// belongs.
func (engine *DockerTaskEngine) handleDockerEvent(event dockerapi.DockerContainerChangeEvent) {
seelog.Debugf("Task engine: handling a docker event: %s", event.String())
task, ok := engine.state.TaskByID(event.DockerID)
if !ok {
seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to task",
event.DockerID)
return
}
cont, ok := engine.state.ContainerByID(event.DockerID)
if !ok {
seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to container",
event.DockerID)
return
}
// Container health status change doesnot affect the container status
// no need to process this in task manager
if event.Type == apicontainer.ContainerHealthEvent {
if cont.Container.HealthStatusShouldBeReported() {
seelog.Debugf("Task engine: updating container [%s(%s)] health status: %v",
cont.Container.Name, cont.DockerID, event.DockerContainerMetadata.Health)
cont.Container.SetHealthStatus(event.DockerContainerMetadata.Health)
}
return
}
engine.tasksLock.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
// hold the lock until the message is sent so we don't send on a closed channel
defer engine.tasksLock.RUnlock()
if !ok {
seelog.Criticalf("Task engine: could not find managed task [%s] corresponding to a docker event: %s",
task.Arn, event.String())
return
}
seelog.Debugf("Task engine [%s]: writing docker event to the task: %s",
task.Arn, event.String())
managedTask.emitDockerContainerChange(dockerContainerChange{container: cont.Container, event: event})
seelog.Debugf("Task engine [%s]: wrote docker event to the task: %s",
task.Arn, event.String())
}
// StateChangeEvents returns channels to read task and container state changes. These
// changes should be read as soon as possible as them not being read will block
// processing the task referenced by the event.
func (engine *DockerTaskEngine) StateChangeEvents() chan statechange.Event {
return engine.stateChangeEvents
}
// AddTask starts tracking a task
func (engine *DockerTaskEngine) AddTask(task *apitask.Task) {
err := task.PostUnmarshalTask(engine.cfg, engine.credentialsManager, engine.resourceFields)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to add task to the engine: %v", task.Arn, err)
task.SetKnownStatus(apitask.TaskStopped)
task.SetDesiredStatus(apitask.TaskStopped)
engine.emitTaskEvent(task, err.Error())
return
}
engine.tasksLock.Lock()
defer engine.tasksLock.Unlock()
existingTask, exists := engine.state.TaskByArn(task.Arn)
if !exists {
// This will update the container desired status
task.UpdateDesiredStatus()
engine.state.AddTask(task)
if dependencygraph.ValidDependencies(task) {
engine.startTask(task)
} else {
seelog.Errorf("Task engine [%s]: unable to progress task with circular dependencies", task.Arn)
task.SetKnownStatus(apitask.TaskStopped)
task.SetDesiredStatus(apitask.TaskStopped)
err := TaskDependencyError{task.Arn}
engine.emitTaskEvent(task, err.Error())
}
return
}
// Update task
engine.updateTaskUnsafe(existingTask, task)
}
// ListTasks returns the tasks currently managed by the DockerTaskEngine
func (engine *DockerTaskEngine) ListTasks() ([]*apitask.Task, error) {
return engine.state.AllTasks(), nil
}
// GetTaskByArn returns the task identified by that ARN
func (engine *DockerTaskEngine) GetTaskByArn(arn string) (*apitask.Task, bool) {
return engine.state.TaskByArn(arn)
}
func (engine *DockerTaskEngine) pullContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
switch container.Type {
case apicontainer.ContainerCNIPause:
// ContainerCNIPause image are managed at startup
return dockerapi.DockerContainerMetadata{}
case apicontainer.ContainerEmptyHostVolume:
// ContainerEmptyHostVolume image is either local (must be imported) or remote (must be pulled)
if emptyvolume.LocalImage {
return engine.client.ImportLocalEmptyVolumeImage()
}
}
if engine.imagePullRequired(engine.cfg.ImagePullBehavior, container, task.Arn) {
// Record the pullStoppedAt timestamp
defer func() {
timestamp := engine.time().Now()
task.SetPullStoppedAt(timestamp)
}()
if engine.enableConcurrentPull {
seelog.Infof("Task engine [%s]: pulling container %s concurrently", task.Arn, container.Name)
return engine.concurrentPull(task, container)
}
seelog.Infof("Task engine [%s]: pulling container %s serially", task.Arn, container.Name)
return engine.serialPull(task, container)
}
// No pull image is required, just update container reference and use cached image.
engine.updateContainerReference(false, container, task.Arn)
// Return the metadata without any error
return dockerapi.DockerContainerMetadata{Error: nil}
}
// imagePullRequired returns true if pulling image is required, or return false if local image cache
// should be used, by inspecting the agent pull behavior variable defined in config. The caller has
// to make sure the container passed in is not an internal container.
func (engine *DockerTaskEngine) imagePullRequired(imagePullBehavior config.ImagePullBehaviorType,
container *apicontainer.Container,
taskArn string) bool {
switch imagePullBehavior {
case config.ImagePullOnceBehavior:
// If this image has been pulled successfully before, don't pull the image,
// otherwise pull the image as usual, regardless whether the image exists or not
// (the image can be prepopulated with the AMI and never be pulled).
imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image)
if ok && imageState.GetPullSucceeded() {
seelog.Infof("Task engine [%s]: image %s has been pulled once, not pulling it again",
taskArn, container.Image)
return false
}
return true
case config.ImagePullPreferCachedBehavior:
// If the behavior is prefer cached, don't pull if we found cached image
// by inspecting the image.
_, err := engine.client.InspectImage(container.Image)
if err != nil {
return true
}
seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s",
taskArn, container.Image, container.Name)
return false
default:
// Need to pull the image for always and default agent pull behavior
return true
}
}
func (engine *DockerTaskEngine) concurrentPull(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image - %s",
task.Arn, container.Image)
ImagePullDeleteLock.RLock()
seelog.Debugf("Task engine [%s]: Acquired ImagePullDeleteLock, start pulling image - %s",
task.Arn, container.Image)
defer seelog.Debugf("Task engine [%s]: Released ImagePullDeleteLock after pulling image - %s",
task.Arn, container.Image)
defer ImagePullDeleteLock.RUnlock()
// Record the task pull_started_at timestamp
pullStart := engine.time().Now()
defer func(startTime time.Time) {
seelog.Infof("Task engine [%s]: Finished pulling container %s in %s",
task.Arn, container.Image, time.Since(startTime).String())
}(pullStart)
ok := task.SetPullStartedAt(pullStart)
if ok {
seelog.Infof("Task engine [%s]: Recording timestamp for starting image pulltime: %s",
task.Arn, pullStart)
}
return engine.pullAndUpdateContainerReference(task, container)
}
func (engine *DockerTaskEngine) serialPull(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image - %s",
task.Arn, container.Image)
ImagePullDeleteLock.Lock()
seelog.Debugf("Task engine [%s]: acquired ImagePullDeleteLock, start pulling image - %s",
task.Arn, container.Image)
defer seelog.Debugf("Task engine [%s]: released ImagePullDeleteLock after pulling image - %s",
task.Arn, container.Image)
defer ImagePullDeleteLock.Unlock()
pullStart := engine.time().Now()
defer func(startTime time.Time) {
seelog.Infof("Task engine [%s]: finished pulling image [%s] in %s",
task.Arn, container.Image, time.Since(startTime).String())
}(pullStart)
ok := task.SetPullStartedAt(pullStart)
if ok {
seelog.Infof("Task engine [%s]: recording timestamp for starting image pull: %s",
task.Arn, pullStart.String())
}
return engine.pullAndUpdateContainerReference(task, container)
}
func (engine *DockerTaskEngine) pullAndUpdateContainerReference(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
// If a task is blocked here for some time, and before it starts pulling image,
// the task's desired status is set to stopped, then don't pull the image
if task.GetDesiredStatus() == apitask.TaskStopped {
seelog.Infof("Task engine [%s]: task's desired status is stopped, skipping container [%s] pull",
task.Arn, container.Name)
container.SetDesiredStatus(apicontainer.ContainerStopped)
return dockerapi.DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}}
}
// Set the credentials for pull from ECR if necessary
if container.ShouldPullWithExecutionRole() {
executionCredentials, ok := engine.credentialsManager.GetTaskCredentials(task.GetExecutionCredentialsID())
if !ok {
seelog.Infof("Task engine [%s]: unable to acquire ECR credentials for container [%s]",
task.Arn, container.Name)
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotPullECRContainerError{
FromError: errors.New("engine ecr credentials: not found"),
},
}
}
iamCredentials := executionCredentials.GetIAMRoleCredentials()
container.SetRegistryAuthCredentials(iamCredentials)
// Clean up the ECR pull credentials after pulling
defer container.SetRegistryAuthCredentials(credentials.IAMRoleCredentials{})
}
metadata := engine.client.PullImage(container.Image, container.RegistryAuthentication)
// Don't add internal images(created by ecs-agent) into imagemanger state
if container.IsInternal() {
return metadata
}
pullSucceeded := metadata.Error == nil
engine.updateContainerReference(pullSucceeded, container, task.Arn)
return metadata
}
func (engine *DockerTaskEngine) updateContainerReference(pullSucceeded bool, container *apicontainer.Container, taskArn string) {
err := engine.imageManager.RecordContainerReference(container)
if err != nil {
seelog.Errorf("Task engine [%s]: Unable to add container reference to image state: %v",
taskArn, err)
}
imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image)
if ok && pullSucceeded {
imageState.SetPullSucceeded(true)
}
engine.state.AddImageState(imageState)
engine.saver.Save()
}
func (engine *DockerTaskEngine) createContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: creating container: %s", task.Arn, container.Name)
client := engine.client
if container.DockerConfig.Version != nil {
client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
}
dockerContainerName := ""
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
containerMap = make(map[string]*apicontainer.DockerContainer)
} else {
// looking for container that has docker name but not created
for _, v := range containerMap {
if v.Container.Name == container.Name {
dockerContainerName = v.DockerName
break
}
}
}
// Resolve HostConfig
// we have to do this in create, not start, because docker no longer handles
// merging create config with start hostconfig the same; e.g. memory limits
// get lost
dockerClientVersion, versionErr := client.APIVersion()
if versionErr != nil {
return dockerapi.DockerContainerMetadata{Error: CannotGetDockerClientVersionError{versionErr}}
}
hostConfig, hcerr := task.DockerHostConfig(container, containerMap, dockerClientVersion)
if hcerr != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(hcerr)}
}
if container.AWSLogAuthExecutionRole() {
err := task.ApplyExecutionRoleLogsAuth(hostConfig, engine.credentialsManager)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
}
config, err := task.DockerConfig(container, dockerClientVersion)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
// Augment labels with some metadata from the agent. Explicitly do this last
// such that it will always override duplicates in the provided raw config
// data.
config.Labels[labelTaskARN] = task.Arn
config.Labels[labelContainerName] = container.Name
config.Labels[labelTaskDefinitionFamily] = task.Family
config.Labels[labelTaskDefinitionVersion] = task.Version
config.Labels[labelCluster] = engine.cfg.Cluster
if dockerContainerName == "" {
// only alphanumeric and hyphen characters are allowed
reInvalidChars := regexp.MustCompile("[^A-Za-z0-9-]+")
name := reInvalidChars.ReplaceAllString(container.Name, "")
dockerContainerName = "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex()
// Pre-add the container in case we stop before the next, more useful,
// AddContainer call. This ensures we have a way to get the container if
// we die before 'createContainer' returns because we can inspect by
// name
engine.state.AddContainer(&apicontainer.DockerContainer{
DockerName: dockerContainerName,
Container: container,
}, task)
seelog.Infof("Task engine [%s]: created container name mapping for task: %s -> %s",
task.Arn, container.Name, dockerContainerName)
engine.saver.ForceSave()
}
// Create metadata directory and file then populate it with common metadata of all containers of this task
// Afterwards add this directory to the container's mounts if file creation was successful
if engine.cfg.ContainerMetadataEnabled && !container.IsInternal() {
mderr := engine.metadataManager.Create(config, hostConfig, task, container.Name)
if mderr != nil {
seelog.Warnf("Task engine [%s]: unable to create metadata for container %s: %v",
task.Arn, container.Name, mderr)
}
}
createContainerBegin := time.Now()
metadata := client.CreateContainer(engine.ctx, config, hostConfig,
dockerContainerName, dockerclient.CreateContainerTimeout)
if metadata.DockerID != "" {
seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s",
task.Arn, container.Name, metadata.DockerID)
engine.state.AddContainer(&apicontainer.DockerContainer{DockerID: metadata.DockerID,
DockerName: dockerContainerName,
Container: container}, task)
}
container.SetLabels(config.Labels)
seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s, took %s",
task.Arn, container.Name, metadata.DockerID, time.Since(createContainerBegin))
return metadata
}
func (engine *DockerTaskEngine) startContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: starting container: %s", task.Arn, container.Name)
client := engine.client
if container.DockerConfig.Version != nil {
client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
}
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStartContainerError{
FromError: errors.Errorf("Container belongs to unrecognized task %s", task.Arn),
},
}
}
dockerContainer, ok := containerMap[container.Name]
if !ok {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStartContainerError{
FromError: errors.Errorf("Container not recorded as created"),
},
}
}
startContainerBegin := time.Now()
dockerContainerMD := client.StartContainer(engine.ctx, dockerContainer.DockerID, engine.cfg.ContainerStartTimeout)
// Get metadata through container inspection and available task information then write this to the metadata file
// Performs this in the background to avoid delaying container start
// TODO: Add a state to the apicontainer.Container for the status of the metadata file (Whether it needs update) and
// add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted
if dockerContainerMD.Error == nil &&
engine.cfg.ContainerMetadataEnabled &&
!container.IsInternal() {
go func() {
err := engine.metadataManager.Update(engine.ctx, dockerContainer.DockerID, task, container.Name)
if err != nil {
seelog.Warnf("Task engine [%s]: failed to update metadata file for container %s: %v",
task.Arn, container.Name, err)
return
}
container.SetMetadataFileUpdated()
seelog.Debugf("Task engine [%s]: updated metadata file for container %s",
task.Arn, container.Name)
}()
}
seelog.Infof("Task engine [%s]: started docker container for task: %s -> %s, took %s",
task.Arn, container.Name, dockerContainerMD.DockerID, time.Since(startContainerBegin))
return dockerContainerMD
}
func (engine *DockerTaskEngine) provisionContainerResources(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: setting up container resources for container [%s]",
task.Arn, container.Name)
cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, container)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: ContainerNetworkingError{
fromError: errors.Wrap(err,
"container resource provisioning: unable to build cni configuration"),
},
}
}
// Invoke the libcni to config the network namespace for the container
result, err := engine.cniClient.SetupNS(engine.ctx, cniConfig, cniSetupTimeout)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v",
task.Arn, err)
return dockerapi.DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
Error: ContainerNetworkingError{errors.Wrap(err,
"container resource provisioning: failed to setup network namespace")},
}
}
taskIP := result.IPs[0].Address.IP.String()
seelog.Infof("Task engine [%s]: associated with ip address '%s'", task.Arn, taskIP)
engine.state.AddTaskIPAddress(taskIP, task.Arn)
return dockerapi.DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
}
}
// cleanupPauseContainerNetwork will clean up the network namespace of pause container
func (engine *DockerTaskEngine) cleanupPauseContainerNetwork(task *apitask.Task, container *apicontainer.Container) error {
seelog.Infof("Task engine [%s]: cleaning up the network namespace", task.Arn)
cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, container)
if err != nil {
return errors.Wrapf(err,
"engine: failed cleanup task network namespace, task: %s", task.String())
}
return engine.cniClient.CleanupNS(engine.ctx, cniConfig, cniCleanupTimeout)
}
func (engine *DockerTaskEngine) buildCNIConfigFromTaskContainer(task *apitask.Task, container *apicontainer.Container) (*ecscni.Config, error) {
cfg, err := task.BuildCNIConfig()
if err != nil {
return nil, errors.Wrapf(err, "engine: build cni configuration from task failed")
}
if engine.cfg.OverrideAWSVPCLocalIPv4Address != nil &&
len(engine.cfg.OverrideAWSVPCLocalIPv4Address.IP) != 0 &&
len(engine.cfg.OverrideAWSVPCLocalIPv4Address.Mask) != 0 {
cfg.IPAMV4Address = engine.cfg.OverrideAWSVPCLocalIPv4Address
}
if len(engine.cfg.AWSVPCAdditionalLocalRoutes) != 0 {
cfg.AdditionalLocalRoutes = engine.cfg.AWSVPCAdditionalLocalRoutes
}
// Get the pid of container
containers, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return nil, errors.New("engine: failed to find the pause container, no containers in the task")
}
pauseContainer, ok := containers[container.Name]
if !ok {
return nil, errors.New("engine: failed to find the pause container")
}
containerInspectOutput, err := engine.client.InspectContainer(
engine.ctx,
pauseContainer.DockerName,
dockerclient.InspectContainerTimeout,
)
if err != nil {
return nil, err
}
cfg.ContainerPID = strconv.Itoa(containerInspectOutput.State.Pid)
cfg.ContainerID = containerInspectOutput.ID
cfg.BlockInstanceMetdata = engine.cfg.AWSVPCBlockInstanceMetdata
return cfg, nil
}
func (engine *DockerTaskEngine) stopContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: stopping container [%s]", task.Arn, container.Name)
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStopContainerError{
FromError: errors.Errorf("Container belongs to unrecognized task %s", task.Arn),
},
}
}
dockerContainer, ok := containerMap[container.Name]
if !ok {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStopContainerError{errors.Errorf("Container not recorded as created")},
}
}
// Cleanup the pause container network namespace before stop the container
if container.Type == apicontainer.ContainerCNIPause {
err := engine.cleanupPauseContainerNetwork(task, container)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v",
task.Arn, err)
}
seelog.Infof("Task engine [%s]: cleaned pause container network namespace", task.Arn)
}
// timeout is defined by the const 'stopContainerTimeout' and the 'DockerStopTimeout' in the config
timeout := engine.cfg.DockerStopTimeout + dockerclient.StopContainerTimeout
return engine.client.StopContainer(engine.ctx, dockerContainer.DockerID, timeout)
}
func (engine *DockerTaskEngine) removeContainer(task *apitask.Task, container *apicontainer.Container) error {
seelog.Infof("Task engine [%s]: removing container: %s", task.Arn, container.Name)
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return errors.New("No such task: " + task.Arn)
}
dockerContainer, ok := containerMap[container.Name]
if !ok {
return errors.New("No container named '" + container.Name + "' created in " + task.Arn)
}
return engine.client.RemoveContainer(engine.ctx, dockerContainer.DockerName, dockerclient.RemoveContainerTimeout)
}
// updateTaskUnsafe determines if a new transition needs to be applied to the
// referenced task, and if needed applies it. It should not be called anywhere
// but from 'AddTask' and is protected by the tasksLock lock there.
func (engine *DockerTaskEngine) updateTaskUnsafe(task *apitask.Task, update *apitask.Task) {
managedTask, ok := engine.managedTasks[task.Arn]
if !ok {
seelog.Criticalf("Task engine [%s]: ACS message for a task we thought we managed, but don't! Aborting.",
task.Arn)
return
}
// Keep the lock because sequence numbers cannot be correct unless they are
// also read in the order addtask was called
// This does block the engine's ability to ingest any new events (including
// stops for past tasks, ack!), but this is necessary for correctness
updateDesiredStatus := update.GetDesiredStatus()
seelog.Debugf("Task engine [%s]: putting update on the acs channel: [%s] with seqnum [%d]",
task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber)
managedTask.emitACSTransition(acsTransition{
desiredStatus: updateDesiredStatus,
seqnum: update.StopSequenceNumber,
})
seelog.Debugf("Task engine [%s]: update taken off the acs channel: [%s] with seqnum [%d]",
task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber)
}
// transitionContainer calls applyContainerState, and then notifies the managed
// task of the change. transitionContainer is called by progressTask and
// by handleStoppedToRunningContainerTransition.
func (engine *DockerTaskEngine) transitionContainer(task *apitask.Task, container *apicontainer.Container, to apicontainer.ContainerStatus) {
// Let docker events operate async so that we can continue to handle ACS / other requests
// This is safe because 'applyContainerState' will not mutate the task
metadata := engine.applyContainerState(task, container, to)
engine.tasksLock.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if ok {
managedTask.emitDockerContainerChange(dockerContainerChange{
container: container,
event: dockerapi.DockerContainerChangeEvent{
Status: to,
DockerContainerMetadata: metadata,
},
})
}
}
// applyContainerState moves the container to the given state by calling the
// function defined in the transitionFunctionMap for the state
func (engine *DockerTaskEngine) applyContainerState(task *apitask.Task, container *apicontainer.Container, nextState apicontainer.ContainerStatus) dockerapi.DockerContainerMetadata {
transitionFunction, ok := engine.transitionFunctionMap()[nextState]
if !ok {
seelog.Criticalf("Task engine [%s]: unsupported desired state transition for container [%s]: %s",
task.Arn, container.Name, nextState.String())
return dockerapi.DockerContainerMetadata{Error: &impossibleTransitionError{nextState}}
}
metadata := transitionFunction(task, container)
if metadata.Error != nil {
seelog.Infof("Task engine [%s]: error transitioning container [%s] to [%s]: %v",
task.Arn, container.Name, nextState.String(), metadata.Error)
} else {
seelog.Debugf("Task engine [%s]: transitioned container [%s] to [%s]",
task.Arn, container.Name, nextState.String())
engine.saver.Save()
}
return metadata
}
// transitionFunctionMap provides the logic for the simple state machine of the
// DockerTaskEngine. Each desired state maps to a function that can be called
// to try and move the task to that desired state.
func (engine *DockerTaskEngine) transitionFunctionMap() map[apicontainer.ContainerStatus]transitionApplyFunc {
return engine.containerStatusToTransitionFunction
}
type transitionApplyFunc (func(*apitask.Task, *apicontainer.Container) dockerapi.DockerContainerMetadata)
// State is a function primarily meant for testing usage; it is explicitly not
// part of the TaskEngine interface and should not be relied upon.
// It returns an internal representation of the state of this DockerTaskEngine.
func (engine *DockerTaskEngine) State() dockerstate.TaskEngineState {
return engine.state
}
// Version returns the underlying docker version.
func (engine *DockerTaskEngine) Version() (string, error) {
return engine.client.Version(engine.ctx, dockerclient.VersionTimeout)
}
func (engine *DockerTaskEngine) updateMetadataFile(task *apitask.Task, cont *apicontainer.DockerContainer) {
err := engine.metadataManager.Update(engine.ctx, cont.DockerID, task, cont.Container.Name)
if err != nil {
seelog.Errorf("Task engine [%s]: failed to update metadata file for container %s: %v",
task.Arn, cont.Container.Name, err)
} else {
cont.Container.SetMetadataFileUpdated()
seelog.Debugf("Task engine [%s]: updated metadata file for container %s",
task.Arn, cont.Container.Name)
}
}
| 1 | 20,115 | For my understanding, why was this removed? | aws-amazon-ecs-agent | go |
@@ -36,14 +36,13 @@ namespace Nethermind.Runner
protected override (CommandLineApplication, Func<IConfigProvider>, Func<string>) BuildCommandLineApp()
{
string pluginsDirectory = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "plugins");
- Console.WriteLine($"Loading plugins from: {pluginsDirectory}");
if (Directory.Exists(pluginsDirectory))
{
var plugins = Directory.GetFiles(pluginsDirectory, "*.dll");
foreach (string plugin in plugins)
{
- Console.WriteLine($"Loading plugin {plugin} from {pluginsDirectory}");
string pluginName = plugin.Contains("/") ? plugin.Split("/").Last() : plugin.Split("\\").Last();
+ Console.WriteLine($"Loading plugin: {pluginName}");
AssemblyLoadContext.Default.LoadFromAssemblyPath(plugin);
}
} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection;
using System.Runtime.Loader;
using Microsoft.Extensions.CommandLineUtils;
using Nethermind.Config;
using Nethermind.Logging;
using NLog;
using NLog.Config;
namespace Nethermind.Runner
{
public class RunnerApp : RunnerAppBase, IRunnerApp
{
private const string DefaultConfigsDirectory = "configs";
private readonly string _defaultConfigFile = Path.Combine(DefaultConfigsDirectory, "mainnet.cfg");
protected override (CommandLineApplication, Func<IConfigProvider>, Func<string>) BuildCommandLineApp()
{
string pluginsDirectory = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "plugins");
Console.WriteLine($"Loading plugins from: {pluginsDirectory}");
if (Directory.Exists(pluginsDirectory))
{
var plugins = Directory.GetFiles(pluginsDirectory, "*.dll");
foreach (string plugin in plugins)
{
Console.WriteLine($"Loading plugin {plugin} from {pluginsDirectory}");
string pluginName = plugin.Contains("/") ? plugin.Split("/").Last() : plugin.Split("\\").Last();
AssemblyLoadContext.Default.LoadFromAssemblyPath(plugin);
}
}
var loadedAssemblies = AppDomain.CurrentDomain.GetAssemblies().ToList();
loadedAssemblies
.SelectMany(x => x.GetReferencedAssemblies())
.Distinct()
.Where(y => loadedAssemblies.Any((a) => a.FullName == y.FullName) == false)
.ToList()
.ForEach(x => loadedAssemblies.Add(AppDomain.CurrentDomain.Load(x)));
Type configurationType = typeof(IConfig);
var configs = AppDomain.CurrentDomain.GetAssemblies()
.SelectMany(a => a.GetTypes())
.Where(t => configurationType.IsAssignableFrom(t) && !t.IsInterface)
.ToList();
CommandLineApplication app = new CommandLineApplication {Name = "Nethermind.Runner"};
app.HelpOption("-?|-h|--help");
CommandOption configFile = app.Option("-c|--config <configFile>", "config file path", CommandOptionType.SingleValue);
CommandOption dbBasePath = app.Option("-d|--baseDbPath <baseDbPath>", "base db path", CommandOptionType.SingleValue);
CommandOption logLevelOverride = app.Option("-l|--log <logLevel>", "log level", CommandOptionType.SingleValue);
foreach (Type configType in configs)
{
foreach (PropertyInfo propertyInfo in configType.GetProperties(BindingFlags.Public | BindingFlags.Instance))
{
app.Option($"--{configType.Name.Replace("Config", String.Empty)}.{propertyInfo.Name}", $"{configType.Name}.{propertyInfo.Name}", CommandOptionType.SingleValue);
}
}
IConfigProvider BuildConfigProvider()
{
// TODO: dynamically switch log levels from CLI!
if (logLevelOverride.HasValue())
{
string logLevel = logLevelOverride.Value();
NLog.LogLevel nLogLevel = NLog.LogLevel.Info;
switch (logLevel.ToUpperInvariant())
{
case "OFF":
nLogLevel = NLog.LogLevel.Off;
break;
case "ERROR":
nLogLevel = NLog.LogLevel.Error;
break;
case "WARN":
nLogLevel = NLog.LogLevel.Warn;
break;
case "INFO":
nLogLevel = NLog.LogLevel.Info;
break;
case "DEBUG":
nLogLevel = NLog.LogLevel.Debug;
break;
case "TRACE":
nLogLevel = NLog.LogLevel.Trace;
break;
}
Console.WriteLine($"Enabling log level override: {logLevel.ToUpperInvariant()}");
foreach (LoggingRule rule in LogManager.Configuration.LoggingRules)
{
rule.DisableLoggingForLevels(NLog.LogLevel.Trace, nLogLevel);
rule.EnableLoggingForLevels(nLogLevel, NLog.LogLevel.Off);
}
//Call to update existing Loggers created with GetLogger() or //GetCurrentClassLogger()
LogManager.ReconfigExistingLoggers();
}
ConfigProvider configProvider = new ConfigProvider();
Dictionary<string, string> args = new Dictionary<string, string>();
foreach (CommandOption commandOption in app.Options)
{
if (commandOption.HasValue())
{
args.Add(commandOption.LongName, commandOption.Value());
}
}
IConfigSource argsSource = new ArgsConfigSource(args);
configProvider.AddSource(argsSource);
configProvider.AddSource(new EnvConfigSource());
string configFilePath = configFile.HasValue() ? configFile.Value() : _defaultConfigFile;
string configPathVariable = Environment.GetEnvironmentVariable("NETHERMIND_CONFIG");
if (!string.IsNullOrWhiteSpace(configPathVariable))
{
configFilePath = configPathVariable;
}
configFilePath = configFilePath.GetApplicationResourcePath();
if (!Path.HasExtension(configFilePath) && !configFilePath.Contains(Path.DirectorySeparatorChar))
{
string redirectedConfigPath = Path.Combine(DefaultConfigsDirectory, string.Concat(configFilePath, ".cfg"));
Console.WriteLine($"Redirecting config {configFilePath} to {redirectedConfigPath}");
configFilePath = redirectedConfigPath;
if (!File.Exists(configFilePath))
{
throw new InvalidOperationException($"Configuration: {configFilePath} was not found.");
}
}
if (!Path.HasExtension(configFilePath))
{
configFilePath = string.Concat(configFilePath, ".cfg");
}
// Fallback to "{executingDirectory}/configs/{configFile}" if "configs" catalog was not specified.
if (!File.Exists(configFilePath))
{
string configName = Path.GetFileName(configFilePath);
string configDirectory = Path.GetDirectoryName(configFilePath);
string redirectedConfigPath = Path.Combine(configDirectory, DefaultConfigsDirectory, configName);
Console.WriteLine($"Redirecting config {configFilePath} to {redirectedConfigPath}");
configFilePath = redirectedConfigPath;
if (!File.Exists(configFilePath))
{
throw new InvalidOperationException($"Configuration: {configFilePath} was not found.");
}
}
Console.WriteLine($"Reading config file from {configFilePath}");
configProvider.AddSource(new JsonConfigSource(configFilePath));
return configProvider;
}
string GetBaseDbPath()
{
return dbBasePath.HasValue() ? dbBasePath.Value() : null;
}
return (app, BuildConfigProvider, GetBaseDbPath);
}
}
} | 1 | 23,064 | the whole idea was to display information on the plugin directory in case it was missing - need to add if...else... and display the plugin dir if it is configured (non empty) but cannot be found | NethermindEth-nethermind | .cs |
@@ -154,6 +154,11 @@ public class LoginActivity extends BaseActivity implements CustomTabActivityHelp
loginView.requestFocus();
return;
}
+ if (TextUtils.isEmpty(password)) {
+ Toast.makeText(this, getString(R.string.error_field_required), Toast.LENGTH_SHORT).show();
+ passwordView.requestFocus();
+ return;
+ }
if (!(password.length() >= 6)) {
passwordView.setError(getString(R.string.error_invalid_password));
passwordView.requestFocus(); | 1 | package openfoodfacts.github.scrachx.openfood.views;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.pm.ActivityInfo;
import android.hardware.Sensor;
import android.hardware.SensorManager;
import android.net.Uri;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.support.annotation.NonNull;
import android.support.customtabs.CustomTabsIntent;
import android.support.design.widget.Snackbar;
import android.support.v4.content.ContextCompat;
import android.support.v7.widget.Toolbar;
import android.text.TextUtils;
import android.util.Log;
import android.view.MenuItem;
import android.widget.Button;
import android.widget.EditText;
import android.widget.LinearLayout;
import android.widget.TextView;
import android.widget.Toast;
import com.afollestad.materialdialogs.MaterialDialog;
import net.steamcrafted.loadtoast.LoadToast;
import java.io.IOException;
import java.net.HttpCookie;
import butterknife.BindView;
import butterknife.OnClick;
import okhttp3.ResponseBody;
import openfoodfacts.github.scrachx.openfood.BuildConfig;
import openfoodfacts.github.scrachx.openfood.R;
import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIService;
import openfoodfacts.github.scrachx.openfood.utils.ShakeDetector;
import openfoodfacts.github.scrachx.openfood.utils.Utils;
import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabActivityHelper;
import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabsHelper;
import openfoodfacts.github.scrachx.openfood.views.customtabs.WebViewFallback;
import retrofit2.Call;
import retrofit2.Callback;
import retrofit2.Response;
import retrofit2.Retrofit;
/**
* A login screen that offers login via login/password.
* This Activity connect to the Chrome Custom Tabs Service on startup to prefetch the url.
*/
public class LoginActivity extends BaseActivity implements CustomTabActivityHelper.ConnectionCallback {
@BindView(R.id.toolbar)
Toolbar toolbar;
@BindView(R.id.editTextLogin)
EditText loginView;
@BindView(R.id.editTextPass)
EditText passwordView;
@BindView(R.id.textInfoLogin)
TextView infoLogin;
@BindView(R.id.buttonSave)
Button save;
@BindView(R.id.createaccount)
TextView createAccount;
@BindView(R.id.login_linearlayout)
LinearLayout linearLayout;
private OpenFoodAPIService apiClient;
private CustomTabActivityHelper customTabActivityHelper;
private Uri userLoginUri;
private Uri resetPasswordUri;
private SensorManager mSensorManager;
private Sensor mAccelerometer;
private ShakeDetector mShakeDetector;
// boolean to determine if scan on shake feature should be enabled
private boolean scanOnShake;
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (item.getItemId() == android.R.id.home) {
super.onBackPressed();
return true;
}
return false;
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (getResources().getBoolean(R.bool.portrait_only)) {
setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);
}
setContentView(R.layout.activity_login);
setTitle(getString(R.string.txtSignIn));
setSupportActionBar(toolbar);
if (getSupportActionBar() != null) {
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
}
userLoginUri = Uri.parse(getString(R.string.website) + "cgi/user.pl");
resetPasswordUri = Uri.parse(getString(R.string.website) + "cgi/reset_password.pl");
// prefetch the uri
customTabActivityHelper = new CustomTabActivityHelper();
customTabActivityHelper.setConnectionCallback(this);
customTabActivityHelper.mayLaunchUrl(userLoginUri, null, null);
createAccount.setEnabled(true);
final SharedPreferences settings = getSharedPreferences("login", 0);
String loginS = settings.getString(getResources().getString(R.string.user), getResources().getString(R.string.txt_anonymous));
if (loginS.equals(getResources().getString(R.string.user))) {
new MaterialDialog.Builder(this)
.title(R.string.log_in)
.content(R.string.login_true)
.neutralText(R.string.ok_button)
.show();
}
apiClient = new Retrofit.Builder()
.baseUrl(BuildConfig.HOST)
.client(Utils.HttpClientBuilder())
.build()
.create(OpenFoodAPIService.class);
// Get the user preference for scan on shake feature and open ContinuousScanActivity if the user has enabled the feature
mSensorManager = (SensorManager) getSystemService(Context.SENSOR_SERVICE);
mAccelerometer = mSensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER);
mShakeDetector = new ShakeDetector();
SharedPreferences shakePreference = PreferenceManager.getDefaultSharedPreferences(this);
scanOnShake = shakePreference.getBoolean("shakeScanMode", false);
mShakeDetector.setOnShakeListener(new ShakeDetector.OnShakeDetected() {
@Override
public void onShake(int count) {
if (scanOnShake) {
Utils.scan(LoginActivity.this);
}
}
});
}
@OnClick(R.id.buttonSave)
protected void attemptLogin() {
String login = loginView.getText().toString();
String password = passwordView.getText().toString();
if (TextUtils.isEmpty(login)) {
loginView.setError(getString(R.string.error_field_required));
loginView.requestFocus();
return;
}
if (!(password.length() >= 6)) {
passwordView.setError(getString(R.string.error_invalid_password));
passwordView.requestFocus();
return;
}
Snackbar snackbar = Snackbar
.make(linearLayout, R.string.toast_retrieving, Snackbar.LENGTH_LONG);
snackbar.show();
final LoadToast lt = new LoadToast(this);
save.setClickable(false);
lt.setText(getString(R.string.toast_retrieving));
lt.setBackgroundColor(ContextCompat.getColor(this, R.color.blue));
lt.setTextColor(ContextCompat.getColor(this, R.color.white));
lt.show();
final Activity context = this;
apiClient.signIn(login, password, "Sign-in").enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(@NonNull Call<ResponseBody> call, @NonNull Response<ResponseBody> response) {
if (!response.isSuccessful()) {
Toast.makeText(context, context.getString(R.string.errorWeb), Toast.LENGTH_LONG).show();
Utils.hideKeyboard(context);
return;
}
String htmlNoParsed = null;
try {
htmlNoParsed = response.body().string();
} catch (IOException e) {
Log.e("LOGIN", "Unable to parse the login response page", e);
}
SharedPreferences.Editor editor = context.getSharedPreferences("login", 0).edit();
if (htmlNoParsed == null || htmlNoParsed.contains("Incorrect user name or password.") || htmlNoParsed.contains("See you soon!")) {
Toast.makeText(context, context.getString(R.string.errorLogin), Toast.LENGTH_LONG).show();
passwordView.setText("");
loginView.setText("");
infoLogin.setText(R.string.txtInfoLoginNo);
lt.hide();
} else {
// store the user session id (user_session and user_id)
for (HttpCookie httpCookie : HttpCookie.parse(response.headers().get("set-cookie"))) {
if (httpCookie.getDomain().equals(".openbeautyfacts.org") && httpCookie.getPath().equals("/")) {
String[] cookieValues = httpCookie.getValue().split("&");
for (int i = 0; i < cookieValues.length; i++) {
editor.putString(cookieValues[i], cookieValues[++i]);
}
break;
}
}
Snackbar snackbar = Snackbar
.make(linearLayout, R.string.connection, Snackbar.LENGTH_LONG);
snackbar.show();
Toast.makeText(context, context.getResources().getText(R.string.txtToastSaved), Toast.LENGTH_LONG).show();
editor.putString("user", login);
editor.putString("pass", password);
editor.apply();
infoLogin.setText(R.string.txtInfoLoginOk);
setResult(RESULT_OK, new Intent());
finish();
}
Utils.hideKeyboard(context);
}
@Override
public void onFailure(@NonNull Call<ResponseBody> call, @NonNull Throwable t) {
Toast.makeText(context, context.getString(R.string.errorWeb), Toast.LENGTH_LONG).show();
Utils.hideKeyboard(context);
t.printStackTrace();
}
});
save.setClickable(true);
}
@OnClick(R.id.createaccount)
protected void onCreateUser() {
CustomTabsIntent customTabsIntent = CustomTabsHelper.getCustomTabsIntent(getBaseContext(), customTabActivityHelper.getSession());
CustomTabActivityHelper.openCustomTab(this, customTabsIntent, userLoginUri, new WebViewFallback());
}
@OnClick(R.id.forgotpassword)
public void forgotpassword() {
CustomTabsIntent customTabsIntent = CustomTabsHelper.getCustomTabsIntent(getBaseContext(), customTabActivityHelper.getSession());
CustomTabActivityHelper.openCustomTab(this, customTabsIntent, resetPasswordUri, new WebViewFallback());
}
@Override
public void onCustomTabsConnected() {
createAccount.setEnabled(true);
}
@Override
public void onCustomTabsDisconnected() {
//TODO find out what do do with it
createAccount.setEnabled(false);
}
@Override
protected void onStart() {
super.onStart();
customTabActivityHelper.bindCustomTabsService(this);
}
@Override
protected void onStop() {
super.onStop();
customTabActivityHelper.unbindCustomTabsService(this);
createAccount.setEnabled(false);
}
@Override
protected void onDestroy() {
super.onDestroy();
customTabActivityHelper.setConnectionCallback(null);
}
@Override
public void onPause() {
super.onPause();
if (scanOnShake) {
// unregister the listener
mSensorManager.unregisterListener(mShakeDetector, mAccelerometer);
}
}
@Override
public void onResume() {
super.onResume();
if (scanOnShake) {
//register the listener
mSensorManager.registerListener(mShakeDetector, mAccelerometer, SensorManager.SENSOR_DELAY_UI);
}
}
}
| 1 | 67,089 | Can you please set this as an error on the password view, rather than a toast, just to make sure that it is kept consistent. Check out a couple of lines below. | openfoodfacts-openfoodfacts-androidapp | java |
@@ -114,7 +114,7 @@ func consumerConnectFlow(t *testing.T, tequilapi *tequilapi_client.Client, consu
})
assert.NoError(t, err)
- connectionStatus, err = tequilapi.Connect(consumerID, proposal.ProviderID, endpoints.ConnectOptions{true})
+ connectionStatus, err = tequilapi.Connect(consumerID, proposal.ProviderID, "openvpn", endpoints.ConnectOptions{true})
assert.NoError(t, err)
err = waitForCondition(func() (bool, error) { | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package e2e
import (
"testing"
"github.com/cihub/seelog"
tequilapi_client "github.com/mysteriumnetwork/node/tequilapi/client"
"github.com/mysteriumnetwork/node/tequilapi/endpoints"
"github.com/stretchr/testify/assert"
)
var (
consumerPassphrase = "localconsumer"
providerID = "0xd1a23227bd5ad77f36ba62badcb78a410a1db6c5"
providerPassphrase = "localprovider"
)
func TestConsumerConnectsToProvider(t *testing.T) {
tequilapiProvider := newTequilapiProvider()
tequilapiConsumer := newTequilapiConsumer()
t.Run("ProviderRegistersIdentityFlow", func(t *testing.T) {
identityRegistrationFlow(t, tequilapiProvider, providerID, providerPassphrase)
})
var consumerID string
t.Run("ConsumerCreatesAndRegistersIdentityFlow", func(t *testing.T) {
consumerID = identityCreateFlow(t, tequilapiConsumer, consumerPassphrase)
identityRegistrationFlow(t, tequilapiConsumer, consumerID, consumerPassphrase)
})
t.Run("ConsumerConnectFlow", func(t *testing.T) {
proposal := consumerPicksProposal(t, tequilapiConsumer)
consumerConnectFlow(t, tequilapiConsumer, consumerID, proposal)
})
}
func identityCreateFlow(t *testing.T, tequilapi *tequilapi_client.Client, idPassphrase string) string {
id, err := tequilapi.NewIdentity(idPassphrase)
assert.NoError(t, err)
seelog.Info("Created new identity: ", id.Address)
return id.Address
}
func identityRegistrationFlow(t *testing.T, tequilapi *tequilapi_client.Client, id, idPassphrase string) {
err := tequilapi.Unlock(id, idPassphrase)
assert.NoError(t, err)
registrationData, err := tequilapi.IdentityRegistrationStatus(id)
assert.NoError(t, err)
assert.False(t, registrationData.Registered)
err = registerIdentity(registrationData)
assert.NoError(t, err)
seelog.Info("Registered identity: ", id)
// now we check identity again
err = waitForCondition(func() (bool, error) {
regStatus, err := tequilapi.IdentityRegistrationStatus(id)
return regStatus.Registered, err
})
assert.NoError(t, err)
}
// expect exactly one proposal
func consumerPicksProposal(t *testing.T, tequilapi *tequilapi_client.Client) tequilapi_client.ProposalDTO {
var proposals []tequilapi_client.ProposalDTO
err := waitForCondition(func() (state bool, stateErr error) {
proposals, stateErr = tequilapi.Proposals()
return len(proposals) == 1, stateErr
})
if err != nil {
assert.Error(t, err)
assert.FailNow(t, "Exactly one proposal is expected - something is not right!")
}
seelog.Info("Selected proposal is: ", proposals[0])
return proposals[0]
}
func consumerConnectFlow(t *testing.T, tequilapi *tequilapi_client.Client, consumerID string, proposal tequilapi_client.ProposalDTO) {
err := topUpAccount(consumerID)
assert.Nil(t, err)
connectionStatus, err := tequilapi.Status()
assert.NoError(t, err)
assert.Equal(t, "NotConnected", connectionStatus.Status)
nonVpnIp, err := tequilapi.GetIP()
assert.NoError(t, err)
seelog.Info("Original consumer IP: ", nonVpnIp)
err = waitForCondition(func() (bool, error) {
status, err := tequilapi.Status()
return status.Status == "NotConnected", err
})
assert.NoError(t, err)
connectionStatus, err = tequilapi.Connect(consumerID, proposal.ProviderID, endpoints.ConnectOptions{true})
assert.NoError(t, err)
err = waitForCondition(func() (bool, error) {
status, err := tequilapi.Status()
return status.Status == "Connected", err
})
assert.NoError(t, err)
vpnIp, err := tequilapi.GetIP()
assert.NoError(t, err)
seelog.Info("Changed consumer IP: ", vpnIp)
// sessions history should be created after connect
sessionsDTO, err := tequilapi.GetSessions()
assert.NoError(t, err)
assert.Equal(t, 1, len(sessionsDTO.Sessions))
se := sessionsDTO.Sessions[0]
assert.Equal(t, uint64(0), se.Duration)
assert.Equal(t, uint64(0), se.BytesSent)
assert.Equal(t, uint64(0), se.BytesReceived)
assert.Equal(t, "e2e-land", se.ProviderCountry)
assert.Equal(t, "openvpn", se.ServiceType)
assert.Equal(t, proposal.ProviderID, se.ProviderID)
assert.Equal(t, connectionStatus.SessionID, se.SessionID)
assert.Equal(t, "New", se.Status)
err = tequilapi.Disconnect()
assert.NoError(t, err)
err = waitForCondition(func() (bool, error) {
status, err := tequilapi.Status()
return status.Status == "NotConnected", err
})
assert.NoError(t, err)
// sessions history should be updated after disconnect
sessionsDTO, err = tequilapi.GetSessions()
assert.NoError(t, err)
assert.Equal(t, 1, len(sessionsDTO.Sessions))
se = sessionsDTO.Sessions[0]
assert.NotEqual(t, uint64(0), se.BytesSent)
assert.NotEqual(t, uint64(0), se.BytesReceived)
assert.Equal(t, "Completed", se.Status)
}
| 1 | 12,600 | Shouldn't we pass here a proposal too? | mysteriumnetwork-node | go |
@@ -85,5 +85,11 @@ def test_dupe_solid_repo_definition():
},
)
- with pytest.raises(DagsterInvalidDefinitionError):
+ with pytest.raises(DagsterInvalidDefinitionError) as exc_info:
repo.get_all_pipelines()
+
+ assert str(exc_info.value) == (
+ 'You have defined two solids named "same" in repository "error_repo". '
+ 'Solid names must be unique within a repository. The solid has been defined '
+ 'in pipeline "first" and it has been defined again in pipeline "second."'
+ ) | 1 | from collections import defaultdict
import pytest
from dagster import (
DagsterInvalidDefinitionError,
PipelineDefinition,
RepositoryDefinition,
SolidDefinition,
lambda_solid,
)
def create_single_node_pipeline(name, called):
called[name] = called[name] + 1
return PipelineDefinition(
name=name,
solids=[
SolidDefinition(
name=name + '_solid',
inputs=[],
outputs=[],
transform_fn=lambda *_args, **_kwargs: None,
)
],
)
def test_repo_definition():
called = defaultdict(int)
repo = RepositoryDefinition(
name='some_repo',
pipeline_dict={
'foo': lambda: create_single_node_pipeline('foo', called),
'bar': lambda: create_single_node_pipeline('bar', called),
},
)
foo_pipeline = repo.get_pipeline('foo')
assert isinstance(foo_pipeline, PipelineDefinition)
assert foo_pipeline.name == 'foo'
assert 'foo' in called
assert called['foo'] == 1
assert 'bar' not in called
bar_pipeline = repo.get_pipeline('bar')
assert isinstance(bar_pipeline, PipelineDefinition)
assert bar_pipeline.name == 'bar'
assert 'foo' in called
assert called['foo'] == 1
assert 'bar' in called
assert called['bar'] == 1
foo_pipeline = repo.get_pipeline('foo')
assert isinstance(foo_pipeline, PipelineDefinition)
assert foo_pipeline.name == 'foo'
assert 'foo' in called
assert called['foo'] == 1
pipelines = repo.get_all_pipelines()
assert set(['foo', 'bar']) == {pipeline.name for pipeline in pipelines}
assert repo.get_solid_def('foo_solid').name == 'foo_solid'
assert repo.get_solid_def('bar_solid').name == 'bar_solid'
def test_dupe_solid_repo_definition():
@lambda_solid(name='same')
def noop():
pass
@lambda_solid(name='same')
def noop2():
pass
repo = RepositoryDefinition(
'error_repo',
pipeline_dict={
'first': lambda: PipelineDefinition(name='first', solids=[noop]),
'second': lambda: PipelineDefinition(name='second', solids=[noop2]),
},
)
with pytest.raises(DagsterInvalidDefinitionError):
repo.get_all_pipelines()
| 1 | 13,195 | im not a huge fan of exact text match in these tests, not sure what a good solution is that solves the same problem | dagster-io-dagster | py |
@@ -182,6 +182,13 @@ func (s *server) setupRouting() {
})),
)
+ handle("/stamps/topup/{id}/{amount}", web.ChainHandlers(
+ s.gatewayModeForbidEndpointHandler,
+ web.FinalHandler(jsonhttp.MethodHandler{
+ "PATCH": http.HandlerFunc(s.postageTopUpHandler),
+ })),
+ )
+
handle("/stewardship/{address}", jsonhttp.MethodHandler{
"PUT": web.ChainHandlers(
s.gatewayModeForbidEndpointHandler, | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"fmt"
"net/http"
"strings"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"resenje.org/web"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/logging/httpaccess"
"github.com/ethersphere/bee/pkg/swarm"
)
func (s *server) setupRouting() {
const (
apiVersion = "v1" // Only one api version exists, this should be configurable with more.
rootPath = "/" + apiVersion
)
router := mux.NewRouter()
// handle is a helper closure which simplifies the router setup.
handle := func(path string, handler http.Handler) {
router.Handle(path, handler)
router.Handle(rootPath+path, handler)
}
router.NotFoundHandler = http.HandlerFunc(jsonhttp.NotFoundHandler)
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Ethereum Swarm Bee")
})
router.HandleFunc("/robots.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "User-agent: *\nDisallow: /")
})
handle("/bytes", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
s.newTracingHandler("bytes-upload"),
web.FinalHandlerFunc(s.bytesUploadHandler),
),
})
handle("/bytes/{address}", jsonhttp.MethodHandler{
"GET": web.ChainHandlers(
s.newTracingHandler("bytes-download"),
web.FinalHandlerFunc(s.bytesGetHandler),
),
})
handle("/chunks", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(swarm.ChunkWithSpanSize),
web.FinalHandlerFunc(s.chunkUploadHandler),
),
})
handle("/chunks/stream", web.ChainHandlers(
s.newTracingHandler("chunks-stream-upload"),
web.FinalHandlerFunc(s.chunkUploadStreamHandler),
))
handle("/chunks/{addr}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.chunkGetHandler),
})
handle("/soc/{owner}/{id}", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(swarm.ChunkWithSpanSize),
web.FinalHandlerFunc(s.socUploadHandler),
),
})
handle("/feeds/{owner}/{topic}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.feedGetHandler),
"POST": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(swarm.ChunkWithSpanSize),
web.FinalHandlerFunc(s.feedPostHandler),
),
})
handle("/bzz", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
s.newTracingHandler("bzz-upload"),
web.FinalHandlerFunc(s.bzzUploadHandler),
),
})
handle("/bzz/{address}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u := r.URL
u.Path += "/"
http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
}))
handle("/bzz/{address}/{path:.*}", jsonhttp.MethodHandler{
"GET": web.ChainHandlers(
s.newTracingHandler("bzz-download"),
web.FinalHandlerFunc(s.bzzDownloadHandler),
),
"PATCH": web.ChainHandlers(
s.newTracingHandler("bzz-patch"),
web.FinalHandlerFunc(s.bzzPatchHandler),
),
})
handle("/pss/send/{topic}/{targets}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(swarm.ChunkSize),
web.FinalHandlerFunc(s.pssPostHandler),
),
})),
)
handle("/pss/subscribe/{topic}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandlerFunc(s.pssWsHandler),
))
handle("/tags", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.listTagsHandler),
"POST": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(1024),
web.FinalHandlerFunc(s.createTagHandler),
),
})),
)
handle("/tags/{id}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.getTagHandler),
"DELETE": http.HandlerFunc(s.deleteTagHandler),
"PATCH": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(1024),
web.FinalHandlerFunc(s.doneSplitHandler),
),
})),
)
handle("/pins", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.listPinnedRootHashes),
})),
)
handle("/pins/{reference}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.getPinnedRootHash),
"POST": http.HandlerFunc(s.pinRootHash),
"DELETE": http.HandlerFunc(s.unpinRootHash),
})),
)
handle("/stamps", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampsHandler),
})),
)
handle("/stamps/{id}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.postageGetStampHandler),
})),
)
handle("/stamps/{amount}/{depth}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.postageCreateHandler),
})),
)
handle("/stewardship/{address}", jsonhttp.MethodHandler{
"PUT": web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandlerFunc(s.stewardshipPutHandler),
),
})
s.Handler = web.ChainHandlers(
httpaccess.NewHTTPAccessLogHandler(s.logger, logrus.InfoLevel, s.tracer, "api access"),
handlers.CompressHandler,
// todo: add recovery handler
s.responseCodeMetricsHandler,
s.pageviewMetricsHandler,
func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if o := r.Header.Get("Origin"); o != "" && s.checkOrigin(r) {
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Origin", o)
w.Header().Set("Access-Control-Allow-Headers", "Origin, Accept, Authorization, Content-Type, X-Requested-With, Access-Control-Request-Headers, Access-Control-Request-Method, Swarm-Tag, Swarm-Pin, Swarm-Encrypt, Swarm-Index-Document, Swarm-Error-Document, Swarm-Collection, Swarm-Postage-Batch-Id, Gas-Price")
w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, OPTIONS, POST, PUT, DELETE")
w.Header().Set("Access-Control-Max-Age", "3600")
}
h.ServeHTTP(w, r)
})
},
s.gatewayModeForbidHeadersHandler,
web.FinalHandler(router),
)
}
func (s *server) gatewayModeForbidEndpointHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if s.GatewayMode {
s.logger.Tracef("gateway mode: forbidden %s", r.URL.String())
jsonhttp.Forbidden(w, nil)
return
}
h.ServeHTTP(w, r)
})
}
func (s *server) gatewayModeForbidHeadersHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if s.GatewayMode {
if strings.ToLower(r.Header.Get(SwarmPinHeader)) == "true" {
s.logger.Tracef("gateway mode: forbidden pinning %s", r.URL.String())
jsonhttp.Forbidden(w, "pinning is disabled")
return
}
if strings.ToLower(r.Header.Get(SwarmEncryptHeader)) == "true" {
s.logger.Tracef("gateway mode: forbidden encryption %s", r.URL.String())
jsonhttp.Forbidden(w, "encryption is disabled")
return
}
}
h.ServeHTTP(w, r)
})
}
| 1 | 15,282 | this should _not_ be in the api. all postage APIs have moved to the debugAPI | ethersphere-bee | go |
@@ -51,9 +51,13 @@ def _path_hash(path, transform, kwargs):
return digest_string(srcinfo)
def _is_internal_node(node):
- # at least one of an internal nodes children are dicts
- # some (group args) may not be dicts
- return any(isinstance(x, dict) for x in itervalues(node))
+ return not _is_leaf_node(node)
+
+def _is_leaf_node(node):
+ """
+ A leaf node either has no children or defines a `file` key
+ """
+ return not node or node.get(RESERVED['file'])
def _pythonize_name(name):
safename = re.sub('[^A-Za-z0-9]+', '_', name).strip('_') | 1 | """
parse build file, serialize package
"""
from collections import defaultdict, Iterable
import importlib
import json
from types import ModuleType
import os
import re
from pandas.errors import ParserError
from six import iteritems, itervalues
import yaml
from tqdm import tqdm
from .const import DEFAULT_BUILDFILE, PACKAGE_DIR_NAME, PARSERS, RESERVED
from .core import PackageFormat, BuildException, exec_yaml_python, load_yaml
from .hashing import digest_file, digest_string
from .package import Package, ParquetLib
from .store import PackageStore, VALID_NAME_RE, StoreException
from .util import FileWithReadProgress
from . import check_functions as qc # pylint:disable=W0611
def _have_pyspark():
"""
Check if we're running Pyspark
"""
if _have_pyspark.flag is None:
try:
if Package.get_parquet_lib() is ParquetLib.SPARK:
import pyspark # pylint:disable=W0612
_have_pyspark.flag = True
else:
_have_pyspark.flag = False
except ImportError:
_have_pyspark.flag = False
return _have_pyspark.flag
_have_pyspark.flag = None
def _path_hash(path, transform, kwargs):
"""
Generate a hash of source file path + transform + args
"""
sortedargs = ["%s:%r:%s" % (key, value, type(value))
for key, value in sorted(iteritems(kwargs))]
srcinfo = "{path}:{transform}:{{{kwargs}}}".format(path=os.path.abspath(path),
transform=transform,
kwargs=",".join(sortedargs))
return digest_string(srcinfo)
def _is_internal_node(node):
# at least one of an internal nodes children are dicts
# some (group args) may not be dicts
return any(isinstance(x, dict) for x in itervalues(node))
def _pythonize_name(name):
safename = re.sub('[^A-Za-z0-9]+', '_', name).strip('_')
if safename and safename[0].isdigit():
safename = "n%s" % safename
if not VALID_NAME_RE.match(safename):
raise BuildException("Unable to determine a Python-legal name for %r" % name)
return safename
def _run_checks(dataframe, checks, checks_contents, nodename, rel_path, target, env='default'):
_ = env # TODO: env support for checks
print("Running data integrity checks...")
checks_list = re.split(r'[,\s]+', checks.strip())
unknown_checks = set(checks_list) - set(checks_contents)
if unknown_checks:
raise BuildException("Unknown check(s) '%s' for %s @ %s" %
(", ".join(list(unknown_checks)), rel_path, target))
for check in checks_list:
res = exec_yaml_python(checks_contents[check], dataframe, nodename, rel_path, target)
if not res and res is not None:
raise BuildException("Data check failed: %s on %s @ %s" % (
check, rel_path, target))
def _build_node(build_dir, package, name, node, fmt, target='pandas', checks_contents=None,
dry_run=False, env='default', ancestor_args={}):
"""
Parameters
----------
ancestor_args : dict
Any key:value pairs inherited from an ancestor. Users can this define kwargs
that affect entire subtrees (e.g. transform: csv for 500 .txt files)
Position of definition in file also matters and allows for composition
and overriding of ancestor or peer values.
"""
if _is_internal_node(node):
# add anything whose value is not a dict to the group_arg stack
# TODO: there might be some pandas kwargs that take dictionaries as values
# in which case this code will break by treating a kwarg as a package node
# NOTE: YAML parsing does not guarantee key order so we have to set all
# of the group args in one shot for consistent application to subtrees
group_args = ancestor_args.copy()
group_args.update({ k: v for k, v in iteritems(node) if not isinstance(v, dict) })
groups = { k: v for k, v in iteritems(node) if isinstance(v, dict) }
for child_name, child_table in groups.items():
if not isinstance(child_name, str) or not VALID_NAME_RE.match(child_name):
raise StoreException("Invalid node name: %r" % child_name)
if child_name == RESERVED['file']:
raise StoreException("Reserved word 'file' not permitted on group node")
_build_node(build_dir, package, name + '/' + child_name, child_table, fmt,
checks_contents=checks_contents, dry_run=dry_run, env=env, ancestor_args=group_args)
else: # leaf node
rel_path = node.get(RESERVED['file'])
if not rel_path:
raise BuildException("Leaf nodes must define a %s key" % RESERVED['file'])
path = os.path.join(build_dir, rel_path)
# get either the locally defined transform or inherit from an ancestor
transform = node.get(RESERVED['transform']) or ancestor_args.get(RESERVED['transform'])
ID = 'id' # pylint:disable=C0103
if transform:
transform = transform.lower()
if (transform not in PARSERS) and (transform != ID):
raise BuildException("Unknown transform '%s' for %s @ %s" %
(transform, rel_path, target))
else: # guess transform if user doesn't provide one
_, ext = splitext_no_dot(rel_path)
transform = ext
if transform not in PARSERS:
transform = ID
print("Inferring 'transform: %s' for %s" % (transform, rel_path))
# TODO: parse/check environments:
# environments = node.get(RESERVED['environments'])
checks = node.get(RESERVED['checks'])
if transform == ID:
#TODO move this to a separate function
if checks:
with open(path, 'r') as fd:
data = fd.read()
_run_checks(data, checks, checks_contents, name, rel_path, target, env=env)
if not dry_run:
print("Copying %s..." % path)
package.save_file(path, name, rel_path)
else:
handler_args = _remove_keywords(ancestor_args)
# merge ancestor args with local args (local wins if conflict)
handler_args.update(_remove_keywords(node))
# Check Cache
store = PackageStore()
path_hash = _path_hash(path, transform, handler_args)
source_hash = digest_file(path)
cachedobjs = []
if os.path.exists(store.cache_path(path_hash)):
with open(store.cache_path(path_hash), 'r') as entry:
cache_entry = json.load(entry)
if cache_entry['source_hash'] == source_hash:
cachedobjs = cache_entry['obj_hashes']
assert isinstance(cachedobjs, list)
if cachedobjs:
# FIXME: Add already present object to the package
package.save_cached_df(cachedobjs, name, rel_path, transform, target, fmt)
else:
# read source file into DataFrame
print("Serializing %s..." % path)
if _have_pyspark():
dataframe = _file_to_spark_data_frame(transform, path, target, handler_args)
else:
dataframe = _file_to_data_frame(transform, path, target, handler_args)
if checks:
# TODO: test that design works for internal nodes... e.g. iterating
# over the children and getting/checking the data, err msgs, etc.
_run_checks(dataframe, checks, checks_contents, name, rel_path, target, env=env)
# serialize DataFrame to file(s)
if not dry_run:
print("Saving as binary dataframe...")
obj_hashes = package.save_df(dataframe, name, rel_path, transform, target, fmt)
# Add to cache
cache_entry = dict(
source_hash=source_hash,
obj_hashes=obj_hashes
)
with open(store.cache_path(path_hash), 'w') as entry:
json.dump(cache_entry, entry)
def _remove_keywords(d):
"""
copy the dict, filter_keywords
Parameters
----------
d : dict
"""
return { k:v for k, v in iteritems(d) if k not in RESERVED }
def _file_to_spark_data_frame(ext, path, target, handler_args):
from pyspark import sql as sparksql
_ = target # TODO: why is this unused?
ext = ext.lower() # ensure that case doesn't matter
logic = PARSERS.get(ext)
kwargs = dict(logic['kwargs'])
kwargs.update(handler_args)
spark = sparksql.SparkSession.builder.getOrCreate()
dataframe = None
reader = None
# FIXME: Add json support?
if logic['attr'] == "read_csv":
sep = kwargs.get('sep')
reader = spark.read.format("csv").option("header", "true")
if sep:
reader = reader.option("delimiter", sep)
dataframe = reader.load(path)
for col in dataframe.columns:
pcol = _pythonize_name(col)
if col != pcol:
dataframe = dataframe.withColumnRenamed(col, pcol)
else:
dataframe = _file_to_data_frame(ext, path, target, handler_args)
return dataframe
def _file_to_data_frame(ext, path, target, handler_args):
_ = target # TODO: why is this unused?
logic = PARSERS.get(ext)
the_module = importlib.import_module(logic['module'])
if not isinstance(the_module, ModuleType):
raise BuildException("Missing required module: %s." % logic['module'])
# allow user to specify handler kwargs and override default kwargs
kwargs = logic['kwargs'].copy()
kwargs.update(handler_args)
failover = logic.get('failover', None)
handler = getattr(the_module, logic['attr'], None)
if handler is None:
raise BuildException("Invalid handler: %r" % logic['attr'])
dataframe = None
try_again = False
try:
size = os.path.getsize(path)
with tqdm(total=size, unit='B', unit_scale=True) as progress:
def _callback(count):
progress.update(count)
with FileWithReadProgress(path, _callback) as fd:
dataframe = handler(fd, **kwargs)
except (UnicodeDecodeError, ParserError) as error:
if failover:
warning = "Warning: failed fast parse on input %s.\n" % path
warning += "Switching to Python engine."
print(warning)
try_again = True
else:
raise error
except ValueError as error:
raise BuildException(str(error))
if try_again:
failover_args = {}
failover_args.update(failover)
failover_args.update(kwargs)
dataframe = handler(path, **failover_args)
# cast object columns to strings
for name, col in dataframe.iteritems():
if col.dtype == 'object':
dataframe[name] = col.astype(str)
return dataframe
def build_package(username, package, yaml_path, checks_path=None, dry_run=False, env='default'):
"""
Builds a package from a given Yaml file and installs it locally.
Returns the name of the package.
"""
def find(key, value):
"""
find matching nodes recursively;
only descend iterables
"""
if isinstance(value, Iterable):
for k, v in iteritems(value):
if k == key:
yield v
elif isinstance(v, dict):
for result in find(key, v):
yield result
elif isinstance(v, list):
for item in v:
for result in find(key, item):
yield result
build_data = load_yaml(yaml_path)
# default to 'checks.yml' if build.yml contents: contains checks, but
# there's no inlined checks: defined by build.yml
if (checks_path is None and list(find('checks', build_data['contents'])) and
'checks' not in build_data):
checks_path = 'checks.yml'
checks_contents = load_yaml(checks_path, optional=True)
elif checks_path is not None:
checks_contents = load_yaml(checks_path)
else:
checks_contents = None
build_package_from_contents(username, package, os.path.dirname(yaml_path), build_data,
checks_contents=checks_contents, dry_run=dry_run, env=env)
def build_package_from_contents(username, package, build_dir, build_data,
checks_contents=None, dry_run=False, env='default'):
contents = build_data.get('contents', {})
if not isinstance(contents, dict):
raise BuildException("'contents' must be a dictionary")
pkgformat = build_data.get('format', PackageFormat.default.value)
if not isinstance(pkgformat, str):
raise BuildException("'format' must be a string")
try:
pkgformat = PackageFormat(pkgformat)
except ValueError:
raise BuildException("Unsupported format: %r" % pkgformat)
# HDF5 no longer supported.
if pkgformat is PackageFormat.HDF5:
raise BuildException("HDF5 format is no longer supported; please use PARQUET instead.")
# inline checks take precedence
checks_contents = {} if checks_contents is None else checks_contents
checks_contents.update(build_data.get('checks', {}))
store = PackageStore()
newpackage = store.create_package(username, package, dry_run=dry_run)
_build_node(build_dir, newpackage, '', contents, pkgformat,
checks_contents=checks_contents, dry_run=dry_run, env=env)
if not dry_run:
newpackage.save_contents()
def splitext_no_dot(filename):
"""
Wrap os.path.splitext to return the name and the extension
without the '.' (e.g., csv instead of .csv)
"""
name, ext = os.path.splitext(filename)
ext = ext.lower()
return name, ext.strip('.')
def generate_contents(startpath, outfilename=DEFAULT_BUILDFILE):
"""
Generate a build file (yaml) based on the contents of a
directory tree.
"""
def _ignored_name(name):
return (
name.startswith('.') or
name == PACKAGE_DIR_NAME or
name.endswith('~') or
name == outfilename
)
def _generate_contents(dir_path):
safename_duplicates = defaultdict(list)
for name in os.listdir(dir_path):
if _ignored_name(name):
continue
path = os.path.join(dir_path, name)
if os.path.isdir(path):
nodename = name
ext = None
elif os.path.isfile(path):
nodename, ext = splitext_no_dot(name)
else:
continue
safename = _pythonize_name(nodename)
safename_duplicates[safename].append((name, nodename, ext))
safename_to_name = {}
for safename, duplicates in iteritems(safename_duplicates):
for name, nodename, ext in duplicates:
if len(duplicates) > 1 and ext:
new_safename = _pythonize_name(name) # Name with ext
else:
new_safename = safename
existing_name = safename_to_name.get(new_safename)
if existing_name is not None:
raise BuildException(
"Duplicate node names. %r was renamed to %r, which overlaps with %r" % (
name, new_safename, existing_name)
)
safename_to_name[new_safename] = name
contents = {}
for safename, name in iteritems(safename_to_name):
path = os.path.join(dir_path, name)
if os.path.isdir(path):
data = _generate_contents(path)
else:
rel_path = os.path.relpath(path, startpath)
data = dict(file=rel_path)
contents[safename] = data
return contents
return dict(
contents=_generate_contents(startpath)
)
def generate_build_file(startpath, outfilename=DEFAULT_BUILDFILE):
"""
Generate a build file (yaml) based on the contents of a
directory tree.
"""
buildfilepath = os.path.join(startpath, outfilename)
if os.path.exists(buildfilepath):
raise BuildException("Build file %s already exists." % buildfilepath)
contents = generate_contents(startpath, outfilename)
with open(buildfilepath, 'w') as outfile:
yaml.dump(contents, outfile, default_flow_style=False)
return buildfilepath
| 1 | 15,492 | Including both functions seems like overkill for this PR since only _is_internal_node is ever used. Keep them if you think they'll both be used in the future, but if not, the code will be easier to read if you collapse the logic into a single function. | quiltdata-quilt | py |
@@ -41,7 +41,7 @@ func GenerateTestCertificate(path string, certFileName string, keyFileName strin
SerialNumber: big.NewInt(1234),
Subject: pkix.Name{
Country: []string{"test"},
- Organization: []string{"testor"},
+ Organization: []string{"tester"},
},
DNSNames: []string{"localhost"},
NotBefore: time.Now(), | 1 | /*
Copyright 2018 The KubeEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"math/big"
"os"
"time"
certutil "k8s.io/client-go/util/cert"
)
//GenerateTestCertificate generates fake certificates and stores them at the path specified.
//It accepts 3 arguments path, certFileName and keyFileName
// "path" is the directory path at which the directory is to be created,
// "certFileName" & "keyFileName" refers to the name of the file to be created without the extension
func GenerateTestCertificate(path string, certFileName string, keyFileName string) error {
template := &x509.Certificate{
IsCA: true,
BasicConstraintsValid: true,
SubjectKeyId: []byte{1, 2, 3},
SerialNumber: big.NewInt(1234),
Subject: pkix.Name{
Country: []string{"test"},
Organization: []string{"testor"},
},
DNSNames: []string{"localhost"},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(5, 5, 5),
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
}
// generate private key
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return err
}
publicKey := &privateKey.PublicKey
// create a self-signed certificate. template = parent
var parent = template
cert, err := x509.CreateCertificate(rand.Reader, template, parent, publicKey, privateKey)
if err != nil {
return err
}
err = os.MkdirAll(path, 0777)
if err != nil {
return err
}
pKey := x509.MarshalPKCS1PrivateKey(privateKey)
certFilePEM := pem.Block{
Type: certutil.CertificateBlockType,
Bytes: cert}
err = createPEMfile(path+certFileName+".crt", certFilePEM)
if err != nil {
return err
}
keyFilePEM := pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: pKey}
err = createPEMfile(path+keyFileName+".key", keyFilePEM)
if err != nil {
return err
}
return nil
}
//createPEMfile() creates an encoded file at the path given, with PEM Block specified
func createPEMfile(path string, pemBlock pem.Block) error {
// this will create plain text PEM file.
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
err = pem.Encode(file, &pemBlock)
return err
}
| 1 | 23,303 | It seems that the two words mean the same thing. | kubeedge-kubeedge | go |
@@ -271,13 +271,13 @@ void RaftPart::start(std::vector<HostAddr>&& peers, bool asLearner) {
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
+ term_ = proposedTerm_ = lastLogTerm_;
// Set the quorum number
quorum_ = (peers.size() + 1) / 2;
auto logIdAndTerm = lastCommittedLogId();
committedLogId_ = logIdAndTerm.first;
- term_ = proposedTerm_ = logIdAndTerm.second;
if (lastLogId_ < committedLogId_) {
LOG(INFO) << idStr_ << "Reset lastLogId " << lastLogId_ | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/raftex/RaftPart.h"
#include <folly/io/async/EventBaseManager.h>
#include <folly/executors/IOThreadPoolExecutor.h>
#include <folly/gen/Base.h>
#include "gen-cpp2/RaftexServiceAsyncClient.h"
#include "base/CollectNSucceeded.h"
#include "thrift/ThriftClientManager.h"
#include "network/NetworkUtils.h"
#include "thread/NamedThread.h"
#include "kvstore/wal/FileBasedWal.h"
#include "kvstore/raftex/LogStrListIterator.h"
#include "kvstore/raftex/Host.h"
#include "time/WallClock.h"
#include "base/SlowOpTracker.h"
DEFINE_uint32(raft_heartbeat_interval_secs, 5,
"Seconds between each heartbeat");
DEFINE_uint64(raft_snapshot_timeout, 60 * 5, "Max seconds between two snapshot requests");
DEFINE_uint32(max_batch_size, 256, "The max number of logs in a batch");
DEFINE_int32(wal_ttl, 86400, "Default wal ttl");
DEFINE_int64(wal_file_size, 16 * 1024 * 1024, "Default wal file size");
DEFINE_int32(wal_buffer_size, 8 * 1024 * 1024, "Default wal buffer size");
DEFINE_int32(wal_buffer_num, 2, "Default wal buffer number");
DEFINE_bool(trace_raft, false, "Enable trace one raft request");
namespace nebula {
namespace raftex {
using nebula::network::NetworkUtils;
using nebula::thrift::ThriftClientManager;
using nebula::wal::FileBasedWal;
using nebula::wal::FileBasedWalPolicy;
class AppendLogsIterator final : public LogIterator {
public:
AppendLogsIterator(LogID firstLogId,
TermID termId,
RaftPart::LogCache logs,
folly::Function<std::string(AtomicOp op)> opCB)
: firstLogId_(firstLogId)
, termId_(termId)
, logId_(firstLogId)
, logs_(std::move(logs))
, opCB_(std::move(opCB)) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
AppendLogsIterator(const AppendLogsIterator&) = delete;
AppendLogsIterator(AppendLogsIterator&&) = default;
AppendLogsIterator& operator=(const AppendLogsIterator&) = delete;
AppendLogsIterator& operator=(AppendLogsIterator&&) = default;
bool leadByAtomicOp() const {
return leadByAtomicOp_;
}
bool hasNonAtomicOpLogs() const {
return hasNonAtomicOpLogs_;
}
LogID firstLogId() const {
return firstLogId_;
}
// Return true if the current log is a AtomicOp, otherwise return false
bool processAtomicOp() {
while (idx_ < logs_.size()) {
auto& tup = logs_.at(idx_);
auto logType = std::get<1>(tup);
if (logType != LogType::ATOMIC_OP) {
// Not a AtomicOp
return false;
}
// Process AtomicOp log
CHECK(!!opCB_);
opResult_ = opCB_(std::move(std::get<3>(tup)));
if (opResult_.size() > 0) {
// AtomicOp Succeeded
return true;
} else {
// AtomicOp failed, move to the next log, but do not increment the logId_
++idx_;
}
}
// Reached the end
return false;
}
LogIterator& operator++() override {
++idx_;
++logId_;
if (idx_ < logs_.size()) {
currLogType_ = logType();
valid_ = currLogType_ != LogType::ATOMIC_OP;
if (valid_) {
hasNonAtomicOpLogs_ = true;
}
valid_ = valid_ && lastLogType_ != LogType::COMMAND;
lastLogType_ = currLogType_;
} else {
valid_ = false;
}
return *this;
}
// The iterator becomes invalid when exhausting the logs
// **OR** running into a AtomicOp log
bool valid() const override {
return valid_;
}
LogID logId() const override {
DCHECK(valid());
return logId_;
}
TermID logTerm() const override {
return termId_;
}
ClusterID logSource() const override {
DCHECK(valid());
return std::get<0>(logs_.at(idx_));
}
folly::StringPiece logMsg() const override {
DCHECK(valid());
if (currLogType_ == LogType::ATOMIC_OP) {
return opResult_;
} else {
return std::get<2>(logs_.at(idx_));
}
}
// Return true when there is no more log left for processing
bool empty() const {
return idx_ >= logs_.size();
}
// Resume the iterator so that we can continue to process the remaining logs
void resume() {
CHECK(!valid_);
if (!empty()) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
}
LogType logType() const {
return std::get<1>(logs_.at(idx_));
}
private:
size_t idx_{0};
bool leadByAtomicOp_{false};
bool hasNonAtomicOpLogs_{false};
bool valid_{true};
LogType lastLogType_{LogType::NORMAL};
LogType currLogType_{LogType::NORMAL};
std::string opResult_;
LogID firstLogId_;
TermID termId_;
LogID logId_;
RaftPart::LogCache logs_;
folly::Function<std::string(AtomicOp op)> opCB_;
};
/********************************************************
*
* Implementation of RaftPart
*
*******************************************************/
RaftPart::RaftPart(ClusterID clusterId,
GraphSpaceID spaceId,
PartitionID partId,
HostAddr localAddr,
const folly::StringPiece walRoot,
std::shared_ptr<folly::IOThreadPoolExecutor> pool,
std::shared_ptr<thread::GenericThreadPool> workers,
std::shared_ptr<folly::Executor> executor,
std::shared_ptr<SnapshotManager> snapshotMan)
: idStr_{folly::stringPrintf("[Port: %d, Space: %d, Part: %d] ",
localAddr.second, spaceId, partId)}
, clusterId_{clusterId}
, spaceId_{spaceId}
, partId_{partId}
, addr_{localAddr}
, status_{Status::STARTING}
, role_{Role::FOLLOWER}
, leader_{0, 0}
, ioThreadPool_{pool}
, bgWorkers_{workers}
, executor_(executor)
, snapshot_(snapshotMan)
, weight_(1) {
FileBasedWalPolicy policy;
policy.ttl = FLAGS_wal_ttl;
policy.fileSize = FLAGS_wal_file_size;
policy.bufferSize = FLAGS_wal_buffer_size;
policy.numBuffers = FLAGS_wal_buffer_num;
wal_ = FileBasedWal::getWal(walRoot,
idStr_,
policy,
[this] (LogID logId,
TermID logTermId,
ClusterID logClusterId,
const std::string& log) {
return this->preProcessLog(logId,
logTermId,
logClusterId,
log);
});
logs_.reserve(FLAGS_max_batch_size);
CHECK(!!executor_) << idStr_ << "Should not be nullptr";
}
RaftPart::~RaftPart() {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition has stopped
CHECK(status_ == Status::STOPPED);
LOG(INFO) << idStr_ << " The part has been destroyed...";
}
const char* RaftPart::roleStr(Role role) const {
switch (role) {
case Role::LEADER:
return "Leader";
case Role::FOLLOWER:
return "Follower";
case Role::CANDIDATE:
return "Candidate";
case Role::LEARNER:
return "Learner";
default:
LOG(FATAL) << idStr_ << "Invalid role";
}
return nullptr;
}
void RaftPart::start(std::vector<HostAddr>&& peers, bool asLearner) {
std::lock_guard<std::mutex> g(raftLock_);
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
// Set the quorum number
quorum_ = (peers.size() + 1) / 2;
auto logIdAndTerm = lastCommittedLogId();
committedLogId_ = logIdAndTerm.first;
term_ = proposedTerm_ = logIdAndTerm.second;
if (lastLogId_ < committedLogId_) {
LOG(INFO) << idStr_ << "Reset lastLogId " << lastLogId_
<< " to be the committedLogId " << committedLogId_;
lastLogId_ = committedLogId_;
lastLogTerm_ = term_;
wal_->reset();
}
LOG(INFO) << idStr_ << "There are "
<< peers.size()
<< " peer hosts, and total "
<< peers.size() + 1
<< " copies. The quorum is " << quorum_ + 1
<< ", as learner " << asLearner
<< ", lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", committedLogId " << committedLogId_
<< ", term " << term_;
// Start all peer hosts
for (auto& addr : peers) {
LOG(INFO) << idStr_ << "Add peer " << addr;
auto hostPtr = std::make_shared<Host>(addr, shared_from_this());
hosts_.emplace_back(hostPtr);
}
// Change the status
status_ = Status::RUNNING;
if (asLearner) {
role_ = Role::LEARNER;
}
startTimeMs_ = time::WallClock::fastNowInMilliSec();
// Set up a leader election task
size_t delayMS = 100 + folly::Random::rand32(900);
bgWorkers_->addDelayTask(delayMS, [self = shared_from_this()] {
self->statusPolling();
});
}
void RaftPart::stop() {
VLOG(2) << idStr_ << "Stopping the partition";
decltype(hosts_) hosts;
{
std::unique_lock<std::mutex> lck(raftLock_);
status_ = Status::STOPPED;
leader_ = {0, 0};
role_ = Role::FOLLOWER;
hosts = std::move(hosts_);
}
for (auto& h : hosts) {
h->stop();
}
VLOG(2) << idStr_ << "Invoked stop() on all peer hosts";
for (auto& h : hosts) {
VLOG(2) << idStr_ << "Waiting " << h->idStr() << " to stop";
h->waitForStop();
VLOG(2) << idStr_ << h->idStr() << "has stopped";
}
hosts.clear();
LOG(INFO) << idStr_ << "Partition has been stopped";
}
AppendLogResult RaftPart::canAppendLogs() {
CHECK(!raftLock_.try_lock());
if (status_ == Status::STARTING) {
LOG(ERROR) << idStr_ << "The partition is still starting";
return AppendLogResult::E_NOT_READY;
}
if (status_ == Status::STOPPED) {
LOG(ERROR) << idStr_ << "The partition is stopped";
return AppendLogResult::E_STOPPED;
}
if (role_ != Role::LEADER) {
PLOG_EVERY_N(ERROR, 100) << idStr_ << "The partition is not a leader";
return AppendLogResult::E_NOT_A_LEADER;
}
return AppendLogResult::SUCCEEDED;
}
void RaftPart::addLearner(const HostAddr& addr) {
CHECK(!raftLock_.try_lock());
if (addr == addr_) {
LOG(INFO) << idStr_ << "I am learner!";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&addr] (const auto& h) {
return h->address() == addr;
});
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(addr, shared_from_this(), true));
LOG(INFO) << idStr_ << "Add learner " << addr;
} else {
LOG(INFO) << idStr_ << "The host " << addr << " has been existed as "
<< ((*it)->isLearner() ? " learner " : " group member");
}
}
void RaftPart::preProcessTransLeader(const HostAddr& target) {
CHECK(!raftLock_.try_lock());
LOG(INFO) << idStr_ << "Pre process transfer leader to " << target;
switch (role_) {
case Role::FOLLOWER: {
if (target != addr_ && target != HostAddr(0, 0)) {
LOG(INFO) << idStr_ << "I am follower, just wait for the new leader.";
} else {
LOG(INFO) << idStr_ << "I will be the new leader, trigger leader election now!";
bgWorkers_->addTask([self = shared_from_this()] {
{
std::unique_lock<std::mutex> lck(self->raftLock_);
self->role_ = Role::CANDIDATE;
self->leader_ = HostAddr(0, 0);
}
self->leaderElection();
});
}
break;
}
default: {
LOG(INFO) << idStr_ << "My role is " << roleStr(role_)
<< ", so do nothing when pre process transfer leader";
break;
}
}
}
void RaftPart::commitTransLeader(const HostAddr& target) {
CHECK(!raftLock_.try_lock());
LOG(INFO) << idStr_ << "Commit transfer leader to " << target;
switch (role_) {
case Role::LEADER: {
if (target != addr_ && !hosts_.empty()) {
auto iter = std::find_if(hosts_.begin(), hosts_.end(), [] (const auto& h) {
return !h->isLearner();
});
if (iter != hosts_.end()) {
lastMsgRecvDur_.reset();
role_ = Role::FOLLOWER;
leader_ = HostAddr(0, 0);
LOG(INFO) << idStr_ << "Give up my leadership!";
}
} else {
LOG(INFO) << idStr_ << "I am already the leader!";
}
break;
}
case Role::FOLLOWER:
case Role::CANDIDATE: {
LOG(INFO) << idStr_ << "I am " << roleStr(role_) << ", just wait for the new leader!";
break;
}
case Role::LEARNER: {
LOG(INFO) << idStr_ << "I am learner, not in the raft group, skip the log";
break;
}
}
}
void RaftPart::updateQuorum() {
CHECK(!raftLock_.try_lock());
int32_t total = 0;
for (auto& h : hosts_) {
if (!h->isLearner()) {
total++;
}
}
quorum_ = (total + 1) / 2;
}
void RaftPart::addPeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
if (role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am learner, promote myself to be follower";
role_ = Role::FOLLOWER;
updateQuorum();
} else {
LOG(INFO) << idStr_ << "I am already in the raft group!";
}
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&peer] (const auto& h) {
return h->address() == peer;
});
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(peer, shared_from_this()));
updateQuorum();
LOG(INFO) << idStr_ << "Add peer " << peer;
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The host " << peer
<< " has been existed as learner, promote it!";
(*it)->setLearner(false);
updateQuorum();
} else {
LOG(INFO) << idStr_ << "The host " << peer << " has been existed as follower!";
}
}
}
void RaftPart::removePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
// The part will be removed in REMOVE_PART_ON_SRC phase
LOG(INFO) << idStr_ << "Remove myself from the raft group.";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&peer] (const auto& h) {
return h->address() == peer;
});
if (it == hosts_.end()) {
LOG(INFO) << idStr_ << "The peer " << peer << " not exist!";
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The peer is learner, remove it directly!";
hosts_.erase(it);
return;
}
hosts_.erase(it);
updateQuorum();
LOG(INFO) << idStr_ << "Remove peer " << peer;
}
}
void RaftPart::preProcessRemovePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (role_ == Role::LEADER) {
LOG(INFO) << idStr_ << "I am leader, skip remove peer in preProcessLog";
return;
}
removePeer(peer);
}
void RaftPart::commitRemovePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (role_ == Role::FOLLOWER || role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am " << roleStr(role_)
<< ", skip remove peer in commit";
return;
}
CHECK(Role::LEADER == role_);
removePeer(peer);
}
folly::Future<AppendLogResult> RaftPart::appendAsync(ClusterID source,
std::string log) {
if (source < 0) {
source = clusterId_;
}
return appendLogAsync(source, LogType::NORMAL, std::move(log));
}
folly::Future<AppendLogResult> RaftPart::atomicOpAsync(AtomicOp op) {
return appendLogAsync(clusterId_, LogType::ATOMIC_OP, "", std::move(op));
}
folly::Future<AppendLogResult> RaftPart::sendCommandAsync(std::string log) {
return appendLogAsync(clusterId_, LogType::COMMAND, std::move(log));
}
folly::Future<AppendLogResult> RaftPart::appendLogAsync(ClusterID source,
LogType logType,
std::string log,
AtomicOp op) {
if (blocking_ && (logType == LogType::NORMAL || logType == LogType::ATOMIC_OP)) {
return AppendLogResult::E_WRITE_BLOCKING;
}
LogCache swappedOutLogs;
auto retFuture = folly::Future<AppendLogResult>::makeEmpty();
if (bufferOverFlow_) {
PLOG_EVERY_N(WARNING, 30) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
return AppendLogResult::E_BUFFER_OVERFLOW;
}
{
std::lock_guard<std::mutex> lck(logsLock_);
VLOG(2) << idStr_ << "Checking whether buffer overflow";
if (logs_.size() >= FLAGS_max_batch_size) {
// Buffer is full
LOG(WARNING) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
bufferOverFlow_ = true;
return AppendLogResult::E_BUFFER_OVERFLOW;
}
VLOG(2) << idStr_ << "Appending logs to the buffer";
// Append new logs to the buffer
DCHECK_GE(source, 0);
logs_.emplace_back(source, logType, std::move(log), std::move(op));
switch (logType) {
case LogType::ATOMIC_OP:
retFuture = cachingPromise_.getSingleFuture();
break;
case LogType::COMMAND:
retFuture = cachingPromise_.getAndRollSharedFuture();
break;
case LogType::NORMAL:
retFuture = cachingPromise_.getSharedFuture();
break;
}
bool expected = false;
if (replicatingLogs_.compare_exchange_strong(expected, true)) {
// We need to send logs to all followers
VLOG(2) << idStr_ << "Preparing to send AppendLog request";
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
std::swap(swappedOutLogs, logs_);
bufferOverFlow_ = false;
} else {
VLOG(2) << idStr_
<< "Another AppendLogs request is ongoing,"
" just return";
return retFuture;
}
}
LogID firstId = 0;
TermID termId = 0;
AppendLogResult res;
{
std::lock_guard<std::mutex> g(raftLock_);
res = canAppendLogs();
if (res == AppendLogResult::SUCCEEDED) {
firstId = lastLogId_ + 1;
termId = term_;
}
}
if (!checkAppendLogResult(res)) {
// Mosy likely failed because the parttion is not leader
PLOG_EVERY_N(ERROR, 100) << idStr_ << "Cannot append logs, clean the buffer";
return res;
}
// Replicate buffered logs to all followers
// Replication will happen on a separate thread and will block
// until majority accept the logs, the leadership changes, or
// the partition stops
VLOG(2) << idStr_ << "Calling appendLogsInternal()";
AppendLogsIterator it(
firstId,
termId,
std::move(swappedOutLogs),
[this] (AtomicOp opCB) -> std::string {
CHECK(opCB != nullptr);
auto opRet = opCB();
if (opRet.empty()) {
// Failed
sendingPromise_.setOneSingleValue(AppendLogResult::E_ATOMIC_OP_FAILURE);
}
return opRet;
});
appendLogsInternal(std::move(it), termId);
return retFuture;
}
void RaftPart::appendLogsInternal(AppendLogsIterator iter, TermID termId) {
TermID currTerm = 0;
LogID prevLogId = 0;
TermID prevLogTerm = 0;
LogID committed = 0;
LogID lastId = 0;
if (iter.valid()) {
VLOG(2) << idStr_ << "Ready to append logs from id "
<< iter.logId() << " (Current term is "
<< currTerm << ")";
} else {
LOG(ERROR) << idStr_ << "Only happend when Atomic op failed";
replicatingLogs_ = false;
return;
}
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
// The partition is not running
VLOG(2) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
// Is not a leader any more
VLOG(2) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
if (term_ != termId) {
VLOG(2) << idStr_ << "Term has been updated, origin "
<< termId << ", new " << term_;
res = AppendLogResult::E_TERM_OUT_OF_DATE;
break;
}
currTerm = term_;
prevLogId = lastLogId_;
prevLogTerm = lastLogTerm_;
committed = committedLogId_;
// Step 1: Write WAL
SlowOpTracker tracker;
if (!wal_->appendLogs(iter)) {
LOG(ERROR) << idStr_ << "Failed to write into WAL";
res = AppendLogResult::E_WAL_FAILURE;
break;
}
lastId = wal_->lastLogId();
if (tracker.slow()) {
tracker.output(idStr_, folly::stringPrintf("Write WAL, total %ld",
lastId - prevLogId + 1));
}
VLOG(2) << idStr_ << "Succeeded writing logs ["
<< iter.firstLogId() << ", " << lastId << "] to WAL";
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "Failed append logs";
return;
}
// Step 2: Replicate to followers
auto* eb = ioThreadPool_->getEventBase();
replicateLogs(eb,
std::move(iter),
currTerm,
lastId,
committed,
prevLogTerm,
prevLogId);
return;
}
void RaftPart::replicateLogs(folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId) {
using namespace folly; // NOLINT since the fancy overload of | operator
decltype(hosts_) hosts;
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
// The partition is not running
VLOG(2) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
// Is not a leader any more
VLOG(2) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
hosts = hosts_;
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "Replicate logs failed";
return;
}
VLOG(2) << idStr_ << "About to replicate logs to all peer hosts";
SlowOpTracker tracker;
collectNSucceeded(
gen::from(hosts)
| gen::map([self = shared_from_this(),
eb,
currTerm,
lastLogId,
prevLogId,
prevLogTerm,
committedId] (std::shared_ptr<Host> hostPtr) {
VLOG(2) << self->idStr_
<< "Appending logs to "
<< hostPtr->idStr();
return via(eb, [=] () -> Future<cpp2::AppendLogResponse> {
return hostPtr->appendLogs(eb,
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId);
});
})
| gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts] (size_t index, cpp2::AppendLogResponse& resp) {
return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED
&& !hosts[index]->isLearner();
})
.via(executor_.get())
.then([self = shared_from_this(),
eb,
it = std::move(iter),
currTerm,
lastLogId,
committedId,
prevLogId,
prevLogTerm,
pHosts = std::move(hosts),
tracker] (folly::Try<AppendLogResponses>&& result) mutable {
VLOG(2) << self->idStr_ << "Received enough response";
CHECK(!result.hasException());
if (tracker.slow()) {
tracker.output(self->idStr_, folly::stringPrintf("Total send logs: %ld",
lastLogId - prevLogId + 1));
}
self->processAppendLogResponses(*result,
eb,
std::move(it),
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId,
std::move(pHosts));
return *result;
});
}
void RaftPart::processAppendLogResponses(
const AppendLogResponses& resps,
folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId,
std::vector<std::shared_ptr<Host>> hosts) {
// Make sure majority have succeeded
size_t numSucceeded = 0;
for (auto& res : resps) {
if (!hosts[res.first]->isLearner()
&& res.second.get_error_code() == cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
}
}
if (numSucceeded >= quorum_) {
// Majority have succeeded
VLOG(2) << idStr_ << numSucceeded
<< " hosts have accepted the logs";
LogID firstLogId = 0;
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
LOG(INFO) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
LOG(INFO) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
if (currTerm != term_) {
LOG(INFO) << idStr_ << "The leader has changed, ABA problem.";
res = AppendLogResult::E_TERM_OUT_OF_DATE;
break;
}
lastLogId_ = lastLogId;
lastLogTerm_ = currTerm;
lastMsgSentDur_.reset();
auto walIt = wal_->iterator(committedId + 1, lastLogId);
SlowOpTracker tracker;
// Step 3: Commit the batch
if (commitLogs(std::move(walIt))) {
committedLogId_ = lastLogId;
firstLogId = lastLogId_ + 1;
} else {
LOG(FATAL) << idStr_ << "Failed to commit logs";
}
if (tracker.slow()) {
tracker.output(idStr_, folly::stringPrintf("Total commit: %ld",
committedLogId_ - committedId));
}
VLOG(2) << idStr_ << "Leader succeeded in committing the logs "
<< committedId + 1 << " to " << lastLogId;
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "processAppendLogResponses failed!";
return;
}
// Step 4: Fulfill the promise
if (iter.hasNonAtomicOpLogs()) {
sendingPromise_.setOneSharedValue(AppendLogResult::SUCCEEDED);
}
if (iter.leadByAtomicOp()) {
sendingPromise_.setOneSingleValue(AppendLogResult::SUCCEEDED);
}
// Step 5: Check whether need to continue
// the log replication
{
std::lock_guard<std::mutex> lck(logsLock_);
CHECK(replicatingLogs_);
// Continue to process the original AppendLogsIterator if necessary
iter.resume();
// If no more valid logs to be replicated in iter, create a new one if we have new log
if (iter.empty()) {
VLOG(2) << idStr_ << "logs size " << logs_.size();
if (logs_.size() > 0) {
// continue to replicate the logs
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
iter = AppendLogsIterator(
firstLogId,
currTerm,
std::move(logs_),
[this] (AtomicOp op) -> std::string {
auto opRet = op();
if (opRet.empty()) {
// Failed
sendingPromise_.setOneSingleValue(
AppendLogResult::E_ATOMIC_OP_FAILURE);
}
return opRet;
});
logs_.clear();
bufferOverFlow_ = false;
}
// Reset replicatingLogs_ one of the following is true:
// 1. old iter is empty && logs_.size() == 0
// 2. old iter is empty && logs_.size() > 0, but all logs in new iter is atomic op,
// and all of them failed, which would make iter is empty again
if (iter.empty()) {
replicatingLogs_ = false;
VLOG(2) << idStr_ << "No more log to be replicated";
return;
}
}
}
this->appendLogsInternal(std::move(iter), currTerm);
} else {
// Not enough hosts accepted the log, re-try
LOG(WARNING) << idStr_ << "Only " << numSucceeded
<< " hosts succeeded, Need to try again";
replicateLogs(eb,
std::move(iter),
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId);
}
}
bool RaftPart::needToSendHeartbeat() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::RUNNING &&
role_ == Role::LEADER &&
lastMsgSentDur_.elapsedInSec() >= FLAGS_raft_heartbeat_interval_secs * 2 / 5;
}
bool RaftPart::needToStartElection() {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING &&
role_ == Role::FOLLOWER &&
(lastMsgRecvDur_.elapsedInMSec() >= weight_ * FLAGS_raft_heartbeat_interval_secs * 1000 ||
term_ == 0)) {
LOG(INFO) << idStr_ << "Start leader election, reason: lastMsgDur "
<< lastMsgRecvDur_.elapsedInMSec()
<< ", term " << term_;
role_ = Role::CANDIDATE;
leader_ = HostAddr(0, 0);
}
return role_ == Role::CANDIDATE;
}
bool RaftPart::prepareElectionRequest(
cpp2::AskForVoteRequest& req,
std::vector<std::shared_ptr<Host>>& hosts) {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (status_ != Status::RUNNING) {
VLOG(2) << idStr_ << "The partition is not running";
return false;
}
if (UNLIKELY(status_ == Status::STOPPED)) {
VLOG(2) << idStr_
<< "The part has been stopped, skip the request";
return false;
}
if (UNLIKELY(status_ == Status::STARTING)) {
VLOG(2) << idStr_ << "The partition is still starting";
return false;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
VLOG(2) << idStr_ << "The partition is still waiting snapshot";
return false;
}
// Make sure the role is still CANDIDATE
if (role_ != Role::CANDIDATE) {
VLOG(2) << idStr_ << "A leader has been elected";
return false;
}
req.set_space(spaceId_);
req.set_part(partId_);
req.set_candidate_ip(addr_.first);
req.set_candidate_port(addr_.second);
req.set_term(++proposedTerm_); // Bump up the proposed term
req.set_last_log_id(lastLogId_);
req.set_last_log_term(lastLogTerm_);
hosts = followers();
return true;
}
typename RaftPart::Role RaftPart::processElectionResponses(
const RaftPart::ElectionResponses& results) {
std::lock_guard<std::mutex> g(raftLock_);
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(INFO) << idStr_
<< "The part has been stopped, skip the request";
return role_;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(INFO) << idStr_ << "The partition is still starting";
return role_;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
LOG(INFO) << idStr_ << "The partition is still waitiong snapshot";
return role_;
}
if (role_ != Role::CANDIDATE) {
LOG(INFO) << idStr_ << "Partition's role has changed to "
<< roleStr(role_)
<< " during the election, so discard the results";
return role_;
}
size_t numSucceeded = 0;
for (auto& r : results) {
if (r.second.get_error_code() == cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
}
}
CHECK(role_ == Role::CANDIDATE);
if (numSucceeded >= quorum_) {
LOG(INFO) << idStr_
<< "Partition is elected as the new leader for term "
<< proposedTerm_;
term_ = proposedTerm_;
role_ = Role::LEADER;
}
return role_;
}
bool RaftPart::leaderElection() {
VLOG(2) << idStr_ << "Start leader election...";
using namespace folly; // NOLINT since the fancy overload of | operator
cpp2::AskForVoteRequest voteReq;
decltype(hosts_) hosts;
if (!prepareElectionRequest(voteReq, hosts)) {
// Suppose we have three replicas A(leader), B, C, after A crashed,
// B, C will begin the election. B win, and send hb, C has gap with B
// and need the snapshot from B. Meanwhile C begin the election,
// C will be Candidate, but because C is in WAITING_SNAPSHOT,
// so prepareElectionRequest will return false and go on the election.
// Becasue C is in Candidate, so it will reject the snapshot request from B.
// Infinite loop begins.
// So we neeed to go back to the follower state to avoid the case.
std::lock_guard<std::mutex> g(raftLock_);
role_ = Role::FOLLOWER;
return false;
}
// Send out the AskForVoteRequest
LOG(INFO) << idStr_ << "Sending out an election request "
<< "(space = " << voteReq.get_space()
<< ", part = " << voteReq.get_part()
<< ", term = " << voteReq.get_term()
<< ", lastLogId = " << voteReq.get_last_log_id()
<< ", lastLogTerm = " << voteReq.get_last_log_term()
<< ", candidateIP = "
<< NetworkUtils::intToIPv4(voteReq.get_candidate_ip())
<< ", candidatePort = " << voteReq.get_candidate_port()
<< ")";
auto resps = ElectionResponses();
if (hosts.empty()) {
VLOG(2) << idStr_ << "No peer found, I will be the leader";
} else {
auto eb = ioThreadPool_->getEventBase();
auto futures = collectNSucceeded(
gen::from(hosts)
| gen::map([eb, self = shared_from_this(), &voteReq] (auto& host) {
VLOG(2) << self->idStr_
<< "Sending AskForVoteRequest to "
<< host->idStr();
return via(
eb,
[&voteReq, &host] ()
-> Future<cpp2::AskForVoteResponse> {
return host->askForVote(voteReq);
});
})
| gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts, this](size_t idx, cpp2::AskForVoteResponse& resp) {
if (resp.get_error_code() == cpp2::ErrorCode::E_LOG_STALE) {
LOG(INFO) << idStr_ << "My last log id is less than " << hosts[idx]->address()
<< ", double my election interval.";
uint64_t curWeight = weight_.load();
weight_.store(curWeight * 2);
}
return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED
&& !hosts[idx]->isLearner();
});
VLOG(2) << idStr_
<< "AskForVoteRequest has been sent to all peers"
", waiting for responses";
futures.wait();
CHECK(!futures.hasException())
<< "Got exception -- "
<< futures.result().exception().what().toStdString();
VLOG(2) << idStr_ << "Got AskForVote response back";
resps = std::move(futures).get();
}
// Process the responses
switch (processElectionResponses(resps)) {
case Role::LEADER: {
// Elected
LOG(INFO) << idStr_
<< "The partition is elected as the leader";
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING) {
leader_ = addr_;
bgWorkers_->addTask([self = shared_from_this(),
term = voteReq.get_term()] {
self->onElected(term);
});
}
}
weight_ = 1;
sendHeartbeat();
return true;
}
case Role::FOLLOWER: {
// Someone was elected
LOG(INFO) << idStr_ << "Someone else was elected";
return true;
}
case Role::CANDIDATE: {
// No one has been elected
LOG(INFO) << idStr_
<< "No one is elected, continue the election";
return false;
}
case Role::LEARNER: {
LOG(FATAL) << idStr_ << " Impossible! There must be some bugs!";
return false;
}
}
LOG(FATAL) << "Should not reach here";
return false;
}
void RaftPart::statusPolling() {
size_t delay = FLAGS_raft_heartbeat_interval_secs * 1000 / 3;
if (needToStartElection()) {
if (leaderElection()) {
VLOG(2) << idStr_ << "Stop the election";
} else {
// No leader has been elected, need to continue
// (After sleeping a random period betwen [500ms, 2s])
VLOG(2) << idStr_ << "Wait for a while and continue the leader election";
delay = (folly::Random::rand32(1500) + 500) * weight_;
}
} else if (needToSendHeartbeat()) {
VLOG(2) << idStr_ << "Need to send heartbeat";
sendHeartbeat();
}
if (needToCleanupSnapshot()) {
LOG(INFO) << idStr_ << "Clean up the snapshot";
cleanupSnapshot();
}
if (needToCleanWal()) {
wal_->cleanWAL(FLAGS_wal_ttl);
}
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING || status_ == Status::WAITING_SNAPSHOT) {
VLOG(3) << idStr_ << "Schedule new task";
bgWorkers_->addDelayTask(
delay,
[self = shared_from_this()] {
self->statusPolling();
});
}
}
}
bool RaftPart::needToCleanupSnapshot() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::WAITING_SNAPSHOT &&
role_ != Role::LEADER &&
lastSnapshotRecvDur_.elapsedInSec() >= FLAGS_raft_snapshot_timeout;
}
void RaftPart::cleanupSnapshot() {
LOG(INFO) << idStr_ << "Clean up the snapshot";
std::lock_guard<std::mutex> g(raftLock_);
reset();
status_ = Status::RUNNING;
}
bool RaftPart::needToCleanWal() {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::WAITING_SNAPSHOT) {
return false;
}
for (auto& host : hosts_) {
if (host->sendingSnapshot_) {
return false;
}
}
return true;
}
void RaftPart::processAskForVoteRequest(
const cpp2::AskForVoteRequest& req,
cpp2::AskForVoteResponse& resp) {
LOG(INFO) << idStr_
<< "Recieved a VOTING request"
<< ": space = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", candidateAddr = "
<< NetworkUtils::intToIPv4(req.get_candidate_ip()) << ":"
<< req.get_candidate_port()
<< ", term = " << req.get_term()
<< ", lastLogId = " << req.get_last_log_id()
<< ", lastLogTerm = " << req.get_last_log_term();
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(INFO) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(INFO) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
LOG(INFO) << idStr_ << "The partition is still waiting snapshot";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
LOG(INFO) << idStr_ << "The partition currently is a "
<< roleStr(role_) << ", lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", committedLogId " << committedLogId_
<< ", term " << term_;
if (role_ == Role::LEARNER) {
resp.set_error_code(cpp2::ErrorCode::E_BAD_ROLE);
return;
}
// Check term id
auto term = role_ == Role::CANDIDATE ? proposedTerm_ : term_;
if (req.get_term() <= term) {
LOG(INFO) << idStr_
<< (role_ == Role::CANDIDATE
? "The partition is currently proposing term "
: "The partition currently is on term ")
<< term
<< ". The term proposed by the candidate is"
" no greater, so it will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
// Check the last term to receive a log
if (req.get_last_log_term() < lastLogTerm_) {
LOG(INFO) << idStr_
<< "The partition's last term to receive a log is "
<< lastLogTerm_
<< ", which is newer than the candidate's log "
<< req.get_last_log_term()
<< ". So the candidate will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
if (req.get_last_log_term() == lastLogTerm_) {
// Check last log id
if (req.get_last_log_id() < lastLogId_) {
LOG(INFO) << idStr_
<< "The partition's last log id is " << lastLogId_
<< ". The candidate's last log id " << req.get_last_log_id()
<< " is smaller, so it will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
}
}
auto candidate = HostAddr(req.get_candidate_ip(), req.get_candidate_port());
auto hosts = followers();
auto it = std::find_if(hosts.begin(), hosts.end(), [&candidate] (const auto& h){
return h->address() == candidate;
});
if (it == hosts.end()) {
LOG(INFO) << idStr_ << "The candidate " << candidate << " is not my peers";
resp.set_error_code(cpp2::ErrorCode::E_WRONG_LEADER);
return;
}
// Ok, no reason to refuse, we will vote for the candidate
LOG(INFO) << idStr_ << "The partition will vote for the candidate";
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
Role oldRole = role_;
TermID oldTerm = term_;
role_ = Role::FOLLOWER;
term_ = proposedTerm_ = req.get_term();
leader_ = std::make_pair(req.get_candidate_ip(),
req.get_candidate_port());
// Reset the last message time
lastMsgRecvDur_.reset();
weight_ = 1;
// If the partition used to be a leader, need to fire the callback
if (oldRole == Role::LEADER) {
LOG(INFO) << idStr_ << "Was a leader, need to do some clean-up";
if (wal_->lastLogId() > lastLogId_) {
LOG(INFO) << idStr_ << "There is one log " << wal_->lastLogId()
<< " i did not commit when i was leader, rollback to " << lastLogId_;
wal_->rollbackToLog(lastLogId_);
}
// Need to invoke the onLostLeadership callback
bgWorkers_->addTask(
[self = shared_from_this(), oldTerm] {
self->onLostLeadership(oldTerm);
});
}
LOG(INFO) << idStr_ << "I was " << roleStr(oldRole)
<< ", discover the new leader " << leader_;
bgWorkers_->addTask([self = shared_from_this()] {
self->onDiscoverNewLeader(self->leader_);
});
return;
}
void RaftPart::processAppendLogRequest(
const cpp2::AppendLogRequest& req,
cpp2::AppendLogResponse& resp) {
if (FLAGS_trace_raft) {
LOG(INFO) << idStr_
<< "Received logAppend "
<< ": GraphSpaceId = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", leaderIp = " << req.get_leader_ip()
<< ", leaderPort = " << req.get_leader_port()
<< ", current_term = " << req.get_current_term()
<< ", lastLogId = " << req.get_last_log_id()
<< ", committedLogId = " << req.get_committed_log_id()
<< ", lastLogIdSent = " << req.get_last_log_id_sent()
<< ", lastLogTermSent = " << req.get_last_log_term_sent()
<< folly::stringPrintf(
", num_logs = %ld, logTerm = %ld",
req.get_log_str_list().size(),
req.get_log_term())
<< ", sendingSnapshot = " << req.get_sending_snapshot()
<< ", local lastLogId = " << lastLogId_
<< ", local lastLogTerm = " << lastLogTerm_
<< ", local committedLogId = " << committedLogId_
<< ", local current term = " << term_;
}
std::lock_guard<std::mutex> g(raftLock_);
resp.set_current_term(term_);
resp.set_leader_ip(leader_.first);
resp.set_leader_port(leader_.second);
resp.set_committed_log_id(committedLogId_);
resp.set_last_log_id(lastLogId_ < committedLogId_ ? committedLogId_ : lastLogId_);
resp.set_last_log_term(lastLogTerm_);
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
VLOG(2) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
VLOG(2) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
// Check leadership
cpp2::ErrorCode err = verifyLeader(req);
if (err != cpp2::ErrorCode::SUCCEEDED) {
// Wrong leadership
VLOG(2) << idStr_ << "Will not follow the leader";
resp.set_error_code(err);
return;
}
// Reset the timeout timer
lastMsgRecvDur_.reset();
if (req.get_sending_snapshot() && status_ != Status::WAITING_SNAPSHOT) {
LOG(INFO) << idStr_ << "Begin to wait for the snapshot"
<< " " << req.get_committed_log_id();
reset();
status_ = Status::WAITING_SNAPSHOT;
resp.set_error_code(cpp2::ErrorCode::E_WAITING_SNAPSHOT);
return;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
VLOG(2) << idStr_
<< "The part is receiving snapshot,"
<< "so just accept the new wals, but don't commit them."
<< "last_log_id_sent " << req.get_last_log_id_sent()
<< ", total log number " << req.get_log_str_list().size();
if (lastLogId_ > 0 && req.get_last_log_id_sent() > lastLogId_) {
// There is a gap
LOG(INFO) << idStr_ << "Local is missing logs from id "
<< lastLogId_ << ". Need to catch up";
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
}
// TODO(heng): if we have 3 node, one is leader, one is wait snapshot and return success,
// the other is follower, but leader replica log to follow failed,
// How to deal with leader crash? At this time, no leader will be elected.
size_t numLogs = req.get_log_str_list().size();
LogID firstId = req.get_last_log_id_sent() + 1;
VLOG(2) << idStr_ << "Writing log [" << firstId
<< ", " << firstId + numLogs - 1 << "] to WAL";
LogStrListIterator iter(firstId,
req.get_log_term(),
req.get_log_str_list());
if (wal_->appendLogs(iter)) {
// When leader has been sending a snapshot already, sometimes it would send a request
// with empty log list, and lastLogId in wal may be 0 because of reset.
if (numLogs != 0) {
CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId());
}
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
} else {
LOG(ERROR) << idStr_ << "Failed to append logs to WAL";
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
}
return;
}
if (req.get_last_log_id_sent() < committedLogId_ && req.get_last_log_term_sent() <= term_) {
LOG(INFO) << idStr_ << "Stale log! The log " << req.get_last_log_id_sent()
<< ", term " << req.get_last_log_term_sent()
<< " i had committed yet. My committedLogId is "
<< committedLogId_ << ", term is " << term_;
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
} else if (req.get_last_log_id_sent() < committedLogId_) {
LOG(INFO) << idStr_ << "What?? How it happens! The log id is "
<< req.get_last_log_id_sent()
<< ", the log term is " << req.get_last_log_term_sent()
<< ", but my committedLogId is " << committedLogId_
<< ", my term is " << term_
<< ", to make the cluster stable i will follow the high term"
<< " candidate and clenaup my data";
reset();
resp.set_committed_log_id(committedLogId_);
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
}
// req.get_last_log_id_sent() >= committedLogId_
if (lastLogTerm_ > 0 && req.get_last_log_term_sent() != lastLogTerm_) {
LOG(INFO) << idStr_ << "The local last log term is " << lastLogTerm_
<< ", which is different from the leader's prevLogTerm "
<< req.get_last_log_term_sent()
<< ", the prevLogId is " << req.get_last_log_id_sent()
<< ". So need to rollback to last committedLogId_ " << committedLogId_;
if (wal_->rollbackToLog(committedLogId_)) {
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
LOG(INFO) << idStr_ << "Rollback succeeded! lastLogId is " << lastLogId_
<< ", logLogTerm is " << lastLogTerm_
<< ", committedLogId is " << committedLogId_
<< ", term is " << term_;
}
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
} else if (req.get_last_log_id_sent() > lastLogId_) {
// There is a gap
LOG(INFO) << idStr_ << "Local is missing logs from id "
<< lastLogId_ << ". Need to catch up";
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
} else if (req.get_last_log_id_sent() < lastLogId_) {
LOG(INFO) << idStr_ << "Stale log! Local lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", lastLogIdSent " << req.get_last_log_id_sent()
<< ", lastLogTermSent " << req.get_last_log_term_sent();
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
}
// Append new logs
size_t numLogs = req.get_log_str_list().size();
LogID firstId = req.get_last_log_id_sent() + 1;
VLOG(2) << idStr_ << "Writing log [" << firstId
<< ", " << firstId + numLogs - 1 << "] to WAL";
LogStrListIterator iter(firstId,
req.get_log_term(),
req.get_log_str_list());
if (wal_->appendLogs(iter)) {
CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId());
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
} else {
LOG(ERROR) << idStr_ << "Failed to append logs to WAL";
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
return;
}
if (req.get_committed_log_id() > committedLogId_) {
// Commit some logs
// We can only commit logs from firstId to min(lastLogId_, leader's commit log id),
// follower can't always commit to leader's commit id because of lack of log
LogID lastLogIdCanCommit = std::min(lastLogId_, req.get_committed_log_id());
CHECK(committedLogId_ + 1 <= lastLogIdCanCommit);
if (commitLogs(wal_->iterator(committedLogId_ + 1, lastLogIdCanCommit))) {
VLOG(1) << idStr_ << "Follower succeeded committing log "
<< committedLogId_ + 1 << " to "
<< lastLogIdCanCommit;
committedLogId_ = lastLogIdCanCommit;
resp.set_committed_log_id(lastLogIdCanCommit);
} else {
LOG(ERROR) << idStr_ << "Failed to commit log "
<< committedLogId_ + 1 << " to "
<< req.get_committed_log_id();
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
return;
}
}
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
}
cpp2::ErrorCode RaftPart::verifyLeader(
const cpp2::AppendLogRequest& req) {
CHECK(!raftLock_.try_lock());
auto candidate = HostAddr(req.get_leader_ip(), req.get_leader_port());
auto hosts = followers();
auto it = std::find_if(hosts.begin(), hosts.end(), [&candidate] (const auto& h){
return h->address() == candidate;
});
if (it == hosts.end()) {
VLOG(2) << idStr_ << "The candidate leader " << candidate << " is not my peers";
return cpp2::ErrorCode::E_WRONG_LEADER;
}
VLOG(2) << idStr_ << "The current role is " << roleStr(role_);
switch (role_) {
case Role::LEARNER:
case Role::FOLLOWER: {
if (req.get_current_term() == term_ &&
req.get_leader_ip() == leader_.first &&
req.get_leader_port() == leader_.second) {
VLOG(3) << idStr_ << "Same leader";
return cpp2::ErrorCode::SUCCEEDED;
}
break;
}
case Role::LEADER: {
// In this case, the remote term has to be newer
// TODO optimize the case that the current partition is
// isolated and the term keeps going up
break;
}
case Role::CANDIDATE: {
// Since the current partition is a candidate, the remote
// term has to be newer so that it can be accepted
break;
}
}
// Make sure the remote term is greater than local's
if (req.get_current_term() < term_) {
PLOG_EVERY_N(ERROR, 100) << idStr_
<< "The current role is " << roleStr(role_)
<< ". The local term is " << term_
<< ". The remote term is not newer";
return cpp2::ErrorCode::E_TERM_OUT_OF_DATE;
}
if (role_ == Role::FOLLOWER || role_ == Role::LEARNER) {
if (req.get_current_term() == term_ && leader_ != std::make_pair(0, 0)) {
LOG(ERROR) << idStr_ << "The local term is same as remote term " << term_
<< ". But I believe leader exists.";
return cpp2::ErrorCode::E_TERM_OUT_OF_DATE;
}
}
Role oldRole = role_;
TermID oldTerm = term_;
// Ok, no reason to refuse, just follow the leader
LOG(INFO) << idStr_ << "The current role is " << roleStr(role_)
<< ". Will follow the new leader "
<< network::NetworkUtils::intToIPv4(req.get_leader_ip())
<< ":" << req.get_leader_port()
<< " [Term: " << req.get_current_term() << "]";
if (role_ != Role::LEARNER) {
role_ = Role::FOLLOWER;
}
leader_ = std::make_pair(req.get_leader_ip(),
req.get_leader_port());
term_ = proposedTerm_ = req.get_current_term();
weight_ = 1;
if (oldRole == Role::LEADER) {
VLOG(2) << idStr_ << "Was a leader, need to do some clean-up";
if (wal_->lastLogId() > lastLogId_) {
LOG(INFO) << idStr_ << "There is one log " << wal_->lastLogId()
<< " i did not commit when i was leader, rollback to " << lastLogId_;
wal_->rollbackToLog(lastLogId_);
}
// Need to invoke onLostLeadership callback
bgWorkers_->addTask([self = shared_from_this(), oldTerm] {
self->onLostLeadership(oldTerm);
});
}
bgWorkers_->addTask([self = shared_from_this()] {
self->onDiscoverNewLeader(self->leader_);
});
return cpp2::ErrorCode::SUCCEEDED;
}
void RaftPart::processSendSnapshotRequest(const cpp2::SendSnapshotRequest& req,
cpp2::SendSnapshotResponse& resp) {
VLOG(1) << idStr_ << "Receive snapshot, total rows " << req.get_rows().size()
<< ", total count received " << req.get_total_count()
<< ", total size received " << req.get_total_size()
<< ", finished " << req.get_done();
std::lock_guard<std::mutex> g(raftLock_);
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(ERROR) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(ERROR) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
if (UNLIKELY(role_ != Role::FOLLOWER && role_ != Role::LEARNER)) {
LOG(ERROR) << idStr_ << "Bad role " << roleStr(role_);
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(leader_ != HostAddr(req.get_leader_ip(), req.get_leader_port())
|| term_ != req.get_term())) {
LOG(ERROR) << idStr_ << "Term out of date, current term " << term_
<< ", received term " << req.get_term();
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
if (status_ != Status::WAITING_SNAPSHOT) {
LOG(INFO) << idStr_ << "Begin to receive the snapshot";
reset();
status_ = Status::WAITING_SNAPSHOT;
}
lastSnapshotRecvDur_.reset();
// TODO(heng): Maybe we should save them into one sst firstly?
auto ret = commitSnapshot(req.get_rows(),
req.get_committed_log_id(),
req.get_committed_log_term(),
req.get_done());
lastTotalCount_ += ret.first;
lastTotalSize_ += ret.second;
if (lastTotalCount_ != req.get_total_count()
|| lastTotalSize_ != req.get_total_size()) {
LOG(ERROR) << idStr_ << "Bad snapshot, total rows received " << lastTotalCount_
<< ", total rows sended " << req.get_total_count()
<< ", total size received " << lastTotalSize_
<< ", total size sended " << req.get_total_size();
resp.set_error_code(cpp2::ErrorCode::E_PERSIST_SNAPSHOT_FAILED);
return;
}
if (req.get_done()) {
committedLogId_ = req.get_committed_log_id();
if (lastLogId_ < committedLogId_) {
lastLogId_ = committedLogId_;
lastLogTerm_ = req.get_committed_log_term();
}
if (wal_->lastLogId() <= committedLogId_) {
LOG(INFO) << "Reset invalid wal after snapshot received";
wal_->reset();
}
status_ = Status::RUNNING;
LOG(INFO) << idStr_ << "Receive all snapshot, committedLogId_ " << committedLogId_
<< ", lastLodId " << lastLogId_ << ", lastLogTermId " << lastLogTerm_;
}
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
return;
}
folly::Future<AppendLogResult> RaftPart::sendHeartbeat() {
VLOG(2) << idStr_ << "Send heartbeat";
std::string log = "";
return appendLogAsync(clusterId_, LogType::NORMAL, std::move(log));
}
std::vector<std::shared_ptr<Host>> RaftPart::followers() const {
CHECK(!raftLock_.try_lock());
decltype(hosts_) hosts;
for (auto& h : hosts_) {
if (!h->isLearner()) {
hosts.emplace_back(h);
}
}
return hosts;
}
bool RaftPart::checkAppendLogResult(AppendLogResult res) {
if (res != AppendLogResult::SUCCEEDED) {
{
std::lock_guard<std::mutex> lck(logsLock_);
logs_.clear();
cachingPromise_.setValue(res);
cachingPromise_.reset();
bufferOverFlow_ = false;
}
sendingPromise_.setValue(res);
replicatingLogs_ = false;
return false;;
}
return true;
}
void RaftPart::reset() {
CHECK(!raftLock_.try_lock());
wal_->reset();
cleanup();
lastLogId_ = committedLogId_ = 0;
lastLogTerm_ = 0;
lastTotalCount_ = 0;
lastTotalSize_ = 0;
}
AppendLogResult RaftPart::isCatchedUp(const HostAddr& peer) {
std::lock_guard<std::mutex> lck(raftLock_);
LOG(INFO) << idStr_ << "Check whether I catch up";
if (role_ != Role::LEADER) {
LOG(INFO) << idStr_ << "I am not the leader";
return AppendLogResult::E_NOT_A_LEADER;
}
if (peer == addr_) {
LOG(INFO) << idStr_ << "I am the leader";
return AppendLogResult::SUCCEEDED;
}
for (auto& host : hosts_) {
if (host->addr_ == peer) {
if (host->followerCommittedLogId_ < wal_->firstLogId()) {
LOG(INFO) << idStr_ << "The committed log id of peer is "
<< host->followerCommittedLogId_
<< ", which is invalid or less than my first wal log id";
return AppendLogResult::E_SENDING_SNAPSHOT;
}
return host->sendingSnapshot_ ? AppendLogResult::E_SENDING_SNAPSHOT
: AppendLogResult::SUCCEEDED;
}
}
return AppendLogResult::E_INVALID_PEER;
}
bool RaftPart::linkCurrentWAL(const char* newPath) {
CHECK_NOTNULL(newPath);
std::lock_guard<std::mutex> g(raftLock_);
return wal_->linkCurrentWAL(newPath);
}
void RaftPart::checkAndResetPeers(const std::vector<HostAddr>& peers) {
std::lock_guard<std::mutex> lck(raftLock_);
// To avoid the iterator invalid, we use another container for it.
decltype(hosts_) hosts = hosts_;
for (auto& h : hosts) {
LOG(INFO) << idStr_ << "Check host " << h->addr_;
auto it = std::find(peers.begin(), peers.end(), h->addr_);
if (it == peers.end()) {
LOG(INFO) << idStr_ << "The peer " << h->addr_ << " should not exist in my peers";
removePeer(h->addr_);
}
}
for (auto& p : peers) {
LOG(INFO) << idStr_ << "Add peer " << p << " if not exist!";
addPeer(p);
}
}
} // namespace raftex
} // namespace nebula
| 1 | 28,204 | Do we need to set lastLogTerm_ to committedLogTerm when `lastLogId_ < committedLogId_`, on line 286 | vesoft-inc-nebula | cpp |
@@ -160,7 +160,8 @@ public class TestAllDictionaries extends LuceneTestCase {
try {
Dictionary dic = loadDictionary(aff);
totalMemory.addAndGet(RamUsageTester.sizeOf(dic));
- totalWords.addAndGet(RamUsageTester.sizeOf(dic.words));
+ totalWords.addAndGet(
+ RamUsageTester.sizeOf(dic.words) + RamUsageTester.sizeOf(dic.wordHashes));
System.out.println(aff + "\t" + memoryUsageSummary(dic));
} catch (Throwable e) {
failures.add(aff); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis.hunspell;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.lucene.store.BaseDirectoryWrapper;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
import org.apache.lucene.util.NamedThreadFactory;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.RamUsageTester;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Ignore;
/**
* Loads all dictionaries from the directory specified in {@code hunspell.dictionaries} system
* property and prints their memory usage. All *.aff files are traversed recursively inside the
* given directory. Each *.aff file must have a same-named sibling *.dic file. For examples of such
* directories, refer to the {@link org.apache.lucene.analysis.hunspell package documentation}.
*/
@SuppressSysoutChecks(bugUrl = "prints important memory utilization stats per dictionary")
public class TestAllDictionaries extends LuceneTestCase {
static Stream<Path> findAllAffixFiles() throws IOException {
String dicDir = System.getProperty("hunspell.dictionaries");
Assume.assumeFalse(
"Requires Hunspell dictionaries at -Dhunspell.dictionaries=...", dicDir == null);
Path dicPath = Paths.get(dicDir);
return Files.walk(dicPath).filter(f -> f.toString().endsWith(".aff")).sorted();
}
static Dictionary loadDictionary(Path aff) throws IOException, ParseException {
String affPath = aff.toString();
Path dic = Path.of(affPath.substring(0, affPath.length() - 4) + ".dic");
assert Files.exists(dic) : dic;
try (InputStream dictionary = Files.newInputStream(dic);
InputStream affix = Files.newInputStream(aff);
BaseDirectoryWrapper tempDir = newDirectory()) {
return new Dictionary(tempDir, "dictionary", affix, dictionary);
}
}
/** Hack bais to expose current position. */
private static class ExposePosition extends ByteArrayInputStream {
public ExposePosition(byte[] buf) {
super(buf);
}
public long position() {
return super.pos;
}
}
@Ignore
public void testMaxPrologueNeeded() throws Exception {
AtomicBoolean failTest = new AtomicBoolean();
Map<String, List<Long>> global = new LinkedHashMap<>();
for (Path aff : findAllAffixFiles().collect(Collectors.toList())) {
Map<String, List<Long>> local = new LinkedHashMap<>();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (ExposePosition is = new ExposePosition(Files.readAllBytes(aff))) {
int chr;
while ((chr = is.read()) >= 0) {
baos.write(chr);
if (chr == '\n') {
String line = baos.toString(StandardCharsets.ISO_8859_1);
if (!line.isBlank()) {
String firstWord = line.split("\\s")[0];
switch (firstWord) {
case "SET":
case "FLAG":
local.computeIfAbsent(firstWord, (k) -> new ArrayList<>()).add(is.position());
global.computeIfAbsent(firstWord, (k) -> new ArrayList<>()).add(is.position());
break;
}
}
baos.reset();
}
}
}
local.forEach(
(flag, positions) -> {
if (positions.size() > 1) {
System.out.format(
Locale.ROOT,
"Flag %s at more than one position in %s: %s%n",
flag,
aff,
positions);
failTest.set(true);
}
});
}
global.forEach(
(flag, positions) -> {
long max = positions.stream().mapToLong(v -> v).max().orElse(0);
System.out.printf(Locale.ROOT, "Flag %s at maximum offset %s%n", flag, max);
Assert.assertTrue(
"Flags beyond max prologue scan window: " + max,
max < Dictionary.MAX_PROLOGUE_SCAN_WINDOW);
});
if (failTest.get()) {
throw new AssertionError("Duplicate flags were present in at least one .aff file.");
}
}
public void testDictionariesLoadSuccessfully() throws Exception {
AtomicLong totalMemory = new AtomicLong();
AtomicLong totalWords = new AtomicLong();
int threads = Runtime.getRuntime().availableProcessors();
ExecutorService executor =
Executors.newFixedThreadPool(threads, new NamedThreadFactory("dictCheck-"));
List<Path> failures = Collections.synchronizedList(new ArrayList<>());
Function<Path, Void> process =
(Path aff) -> {
try {
Dictionary dic = loadDictionary(aff);
totalMemory.addAndGet(RamUsageTester.sizeOf(dic));
totalWords.addAndGet(RamUsageTester.sizeOf(dic.words));
System.out.println(aff + "\t" + memoryUsageSummary(dic));
} catch (Throwable e) {
failures.add(aff);
System.err.println("While checking " + aff + ":");
e.printStackTrace();
}
return null;
};
List<Callable<Void>> tasks =
findAllAffixFiles()
.map(aff -> (Callable<Void>) () -> process.apply(aff))
.collect(Collectors.toList());
try {
for (Future<?> future : executor.invokeAll(tasks)) {
future.get();
}
if (!failures.isEmpty()) {
throw new AssertionError(
"Certain dictionaries failed to parse:\n - "
+ failures.stream()
.map(path -> path.toAbsolutePath().toString())
.collect(Collectors.joining("\n - ")));
}
} finally {
executor.shutdown();
assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
}
System.out.println("Total dictionaries loaded: " + tasks.size());
System.out.println("Total memory: " + RamUsageEstimator.humanReadableUnits(totalMemory.get()));
System.out.println(
"Total memory for word storage: " + RamUsageEstimator.humanReadableUnits(totalWords.get()));
}
private static String memoryUsageSummary(Dictionary dic) {
return RamUsageTester.humanSizeOf(dic)
+ "\t("
+ ("words=" + RamUsageTester.humanSizeOf(dic.words) + ", ")
+ ("flags=" + RamUsageTester.humanSizeOf(dic.flagLookup) + ", ")
+ ("strips=" + RamUsageTester.humanSizeOf(dic.stripData) + ", ")
+ ("conditions=" + RamUsageTester.humanSizeOf(dic.patterns) + ", ")
+ ("affixData=" + RamUsageTester.humanSizeOf(dic.affixData) + ", ")
+ ("morphData=" + RamUsageTester.humanSizeOf(dic.morphData) + ", ")
+ ("prefixes=" + RamUsageTester.humanSizeOf(dic.prefixes) + ", ")
+ ("suffixes=" + RamUsageTester.humanSizeOf(dic.suffixes) + ")");
}
}
| 1 | 40,567 | About ~7% memory usage increase on average, at most 512KB | apache-lucene-solr | java |
@@ -32,6 +32,11 @@ type Response struct {
type ResponseWriter interface {
io.Writer
+ // Options for the request/response that this ResponseWriter is attached to.
+ Options() Options
+ // TODO(abg) Options should not be attached to the ResponseWriter. Instead
+ // they should be part of the Handle interface.
+
// AddHeaders adds the given headers to the response. If called, this MUST
// be called before any invocation of Write().
// | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package transport
import "io"
// Response is the low level response representation.
type Response struct {
Headers Headers
Body io.ReadCloser
}
// ResponseWriter allows Handlers to write responses in a streaming fashion.
type ResponseWriter interface {
io.Writer
// AddHeaders adds the given headers to the response. If called, this MUST
// be called before any invocation of Write().
//
// This MUST NOT panic if Headers is nil.
AddHeaders(Headers)
// TODO(abg): Ability to set individual headers instead?
// SetApplicationError specifies that this response contains an
// application error. If called, this MUST be called before any invocation
// of Write().
SetApplicationError()
}
| 1 | 10,212 | Was this too big of a pain to do now? | yarpc-yarpc-go | go |
@@ -41,10 +41,10 @@ static train_result call_daal_kernel(const context_cpu& ctx,
const int64_t component_count = desc.get_component_count();
auto arr_data = row_accessor<const Float>{ data }.pull();
- array<Float> arr_eigvec{ column_count * component_count };
- array<Float> arr_eigval{ 1 * component_count };
- array<Float> arr_means{ 1 * component_count };
- array<Float> arr_vars{ 1 * component_count };
+ auto arr_eigvec = array<Float>::zeros(column_count * component_count);
+ auto arr_eigval = array<Float>::zeros(1 * component_count);
+ auto arr_means = array<Float>::zeros(1 * component_count);
+ auto arr_vars = array<Float>::zeros(1 * component_count);
// TODO: read-only access performed with deep copy of data since daal numeric tables are mutable.
// Need to create special immutable homogen table on daal interop side | 1 | /*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <daal/src/algorithms/pca/pca_dense_correlation_batch_kernel.h>
#include "oneapi/dal/algo/pca/backend/cpu/train_kernel.hpp"
#include "oneapi/dal/backend/interop/common.hpp"
#include "oneapi/dal/backend/interop/table_conversion.hpp"
namespace oneapi::dal::pca::backend {
using std::int64_t;
using dal::backend::context_cpu;
namespace daal_pca = daal::algorithms::pca;
namespace daal_cov = daal::algorithms::covariance;
namespace interop = dal::backend::interop;
template <typename Float, daal::CpuType Cpu>
using daal_pca_cor_kernel_t = daal_pca::internal::PCACorrelationKernel<daal::batch, Float, Cpu>;
template <typename Float>
static train_result call_daal_kernel(const context_cpu& ctx,
const descriptor_base& desc,
const table& data) {
const int64_t row_count = data.get_row_count();
const int64_t column_count = data.get_column_count();
const int64_t component_count = desc.get_component_count();
auto arr_data = row_accessor<const Float>{ data }.pull();
array<Float> arr_eigvec{ column_count * component_count };
array<Float> arr_eigval{ 1 * component_count };
array<Float> arr_means{ 1 * component_count };
array<Float> arr_vars{ 1 * component_count };
// TODO: read-only access performed with deep copy of data since daal numeric tables are mutable.
// Need to create special immutable homogen table on daal interop side
// TODO: data is table, not a homogen_table. Think better about accessor - is it enough to have just a row_accessor?
const auto daal_data =
interop::convert_to_daal_homogen_table(arr_data, row_count, column_count);
const auto daal_eigenvectors =
interop::convert_to_daal_homogen_table(arr_eigvec, column_count, component_count);
const auto daal_eigenvalues =
interop::convert_to_daal_homogen_table(arr_eigval, 1, component_count);
const auto daal_means = interop::convert_to_daal_homogen_table(arr_means, 1, component_count);
const auto daal_variances =
interop::convert_to_daal_homogen_table(arr_vars, 1, component_count);
daal_cov::Batch<Float, daal_cov::defaultDense> covariance_alg;
covariance_alg.input.set(daal_cov::data, daal_data);
constexpr bool is_correlation = false;
constexpr uint64_t results_to_compute =
int64_t(daal_pca::mean | daal_pca::variance | daal_pca::eigenvalue);
interop::call_daal_kernel<Float, daal_pca_cor_kernel_t>(ctx,
is_correlation,
desc.get_is_deterministic(),
*daal_data,
&covariance_alg,
results_to_compute,
*daal_eigenvectors,
*daal_eigenvalues,
*daal_means,
*daal_variances);
return train_result()
.set_model(
model().set_eigenvectors(homogen_table_builder{ component_count, arr_eigvec }.build()))
.set_eigenvalues(homogen_table_builder{ component_count, arr_eigval }.build());
}
template <typename Float>
static train_result train(const context_cpu& ctx,
const descriptor_base& desc,
const train_input& input) {
return call_daal_kernel<Float>(ctx, desc, input.get_data());
}
template <typename Float>
struct train_kernel_cpu<Float, method::cov> {
train_result operator()(const context_cpu& ctx,
const descriptor_base& desc,
const train_input& input) const {
return train<Float>(ctx, desc, input);
}
};
template struct train_kernel_cpu<float, method::cov>;
template struct train_kernel_cpu<double, method::cov>;
} // namespace oneapi::dal::pca::backend
| 1 | 22,609 | Why need to spend time for initializing of array? when will we rewrite the contents anyway? This can take a lot of time in some algorithms. Especially if the filling is in sequential mode. | oneapi-src-oneDAL | cpp |
@@ -0,0 +1,5 @@
+class AddResourcesToWorkshops < ActiveRecord::Migration
+ def change
+ add_column :workshops, :resources, :text, null: false, default: ''
+ end
+end | 1 | 1 | 6,835 | Can this be `null:false, default: ''` to avoid the nil vs blank issue? | thoughtbot-upcase | rb |
|
@@ -0,0 +1,19 @@
+# frozen_string_literal: true
+
+test_name "Set root/Administrator password to a known value" do
+ extend Beaker::HostPrebuiltSteps
+
+ hosts.each do |host|
+ case host['platform']
+ when /windows/
+ on host, "passwd #{ENV['WINRM_USER']}", stdin: ENV['WINRM_PASSWORD']
+ when /osx/
+ # Our VMs default to PermitRootLogin prohibit-password
+ on host, 'echo "PermitRootLogin yes" >> /etc/ssh/sshd_config' if ENV['SSH_USER'] == 'root'
+ on host, "dscl . -passwd /Users/#{ENV['SSH_USER']}", stdin: ENV['SSH_PASSWORD']
+ else
+ # Some platforms support --stdin, but repeating it seems to work everywhere
+ on host, "passwd #{ENV['SSH_USER']}", stdin: "#{ENV['SSH_PASSWORD']}\n#{ENV['SSH_PASSWORD']}"
+ end
+ end
+end | 1 | 1 | 8,508 | Had to add this to get macOS to work. | puppetlabs-bolt | rb |
|
@@ -0,0 +1,13 @@
+package azkaban.container;
+
+import org.junit.Rule;
+import org.junit.rules.TemporaryFolder;
+
+
+public class FlowContainerTestBase {
+
+ @Rule
+ public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+
+} | 1 | 1 | 21,098 | What is the usage of this class? | azkaban-azkaban | java |
|
@@ -144,7 +144,7 @@ public class DataFilesTable extends BaseMetadataTable {
@Override
public CloseableIterable<StructLike> rows() {
return CloseableIterable.transform(
- ManifestFiles.read(manifest, io).project(schema),
+ ManifestFiles.read(manifest, io, null, null).project(schema),
file -> (GenericDataFile) file);
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.expressions.ManifestEvaluator;
import org.apache.iceberg.expressions.Projections;
import org.apache.iceberg.expressions.ResidualEvaluator;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types.StructType;
/**
* A {@link Table} implementation that exposes a table's data files as rows.
*/
public class DataFilesTable extends BaseMetadataTable {
DataFilesTable(TableOperations ops, Table table) {
this(ops, table, table.name() + ".files");
}
DataFilesTable(TableOperations ops, Table table, String name) {
super(ops, table, name);
}
@Override
public TableScan newScan() {
return new FilesTableScan(operations(), table(), schema());
}
@Override
public Schema schema() {
StructType partitionType = Partitioning.partitionType(table());
Schema schema = new Schema(DataFile.getType(partitionType).fields());
if (partitionType.fields().size() < 1) {
// avoid returning an empty struct, which is not always supported. instead, drop the partition field
return TypeUtil.selectNot(schema, Sets.newHashSet(DataFile.PARTITION_ID));
} else {
return schema;
}
}
@Override
MetadataTableType metadataTableType() {
return MetadataTableType.FILES;
}
public static class FilesTableScan extends BaseMetadataTableScan {
private final Schema fileSchema;
FilesTableScan(TableOperations ops, Table table, Schema fileSchema) {
super(ops, table, fileSchema);
this.fileSchema = fileSchema;
}
private FilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema,
TableScanContext context) {
super(ops, table, schema, context);
this.fileSchema = fileSchema;
}
@Override
public TableScan appendsBetween(long fromSnapshotId, long toSnapshotId) {
throw new UnsupportedOperationException(
String.format("Cannot incrementally scan table of type %s", MetadataTableType.FILES.name()));
}
@Override
public TableScan appendsAfter(long fromSnapshotId) {
throw new UnsupportedOperationException(
String.format("Cannot incrementally scan table of type %s", MetadataTableType.FILES.name()));
}
@Override
protected TableScan newRefinedScan(TableOperations ops, Table table, Schema schema, TableScanContext context) {
return new FilesTableScan(ops, table, schema, fileSchema, context);
}
@Override
protected CloseableIterable<FileScanTask> planFiles(
TableOperations ops, Snapshot snapshot, Expression rowFilter,
boolean ignoreResiduals, boolean caseSensitive, boolean colStats) {
CloseableIterable<ManifestFile> manifests = CloseableIterable.withNoopClose(snapshot.dataManifests());
String schemaString = SchemaParser.toJson(schema());
String specString = PartitionSpecParser.toJson(PartitionSpec.unpartitioned());
Expression filter = ignoreResiduals ? Expressions.alwaysTrue() : rowFilter;
ResidualEvaluator residuals = ResidualEvaluator.unpartitioned(filter);
// use an inclusive projection to remove the partition name prefix and filter out any non-partition expressions
Expression partitionFilter = Projections
.inclusive(
transformSpec(fileSchema, table().spec(), PARTITION_FIELD_PREFIX),
caseSensitive)
.project(rowFilter);
ManifestEvaluator manifestEval = ManifestEvaluator.forPartitionFilter(
partitionFilter, table().spec(), caseSensitive);
CloseableIterable<ManifestFile> filtered = CloseableIterable.filter(manifests, manifestEval::eval);
// Data tasks produce the table schema, not the projection schema and projection is done by processing engines.
// This data task needs to use the table schema, which may not include a partition schema to avoid having an
// empty struct in the schema for unpartitioned tables. Some engines, like Spark, can't handle empty structs in
// all cases.
return CloseableIterable.transform(filtered, manifest ->
new ManifestReadTask(ops.io(), manifest, schema(), schemaString, specString, residuals));
}
}
static class ManifestReadTask extends BaseFileScanTask implements DataTask {
private final FileIO io;
private final ManifestFile manifest;
private final Schema schema;
ManifestReadTask(FileIO io, ManifestFile manifest, Schema schema, String schemaString,
String specString, ResidualEvaluator residuals) {
super(DataFiles.fromManifest(manifest), null, schemaString, specString, residuals);
this.io = io;
this.manifest = manifest;
this.schema = schema;
}
@Override
public CloseableIterable<StructLike> rows() {
return CloseableIterable.transform(
ManifestFiles.read(manifest, io).project(schema),
file -> (GenericDataFile) file);
}
@Override
public Iterable<FileScanTask> split(long splitSize) {
return ImmutableList.of(this); // don't split
}
@VisibleForTesting
ManifestFile manifest() {
return manifest;
}
}
}
| 1 | 37,970 | Could we keep the original signatures as well, so we do not have to rewrite the method calls everywhere and add `null, null`? I think this could greatly reduce the changes needed in this patch | apache-iceberg | java |
@@ -706,6 +706,7 @@ func (tds *TrieDbState) updateTrieRoots(forward bool) ([]common.Hash, error) {
if len(v) > 0 {
//fmt.Printf("Update storage trie addrHash %x, keyHash %x: %x\n", addrHash, keyHash, v)
if forward {
+ _ = ClearTombstonesForNewStorage(tds.db, cKey)
tds.t.Update(cKey, v)
} else {
// If rewinding, it might not be possible to execute storage item update. | 1 | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//nolint:scopelint
package state
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
"runtime"
"sort"
"sync"
"sync/atomic"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/debug"
"github.com/ledgerwatch/turbo-geth/core/types/accounts"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/log"
"github.com/ledgerwatch/turbo-geth/trie"
)
// MaxTrieCacheSize is the trie cache size limit after which to evict trie nodes from memory.
var MaxTrieCacheSize = uint64(1024 * 1024)
const (
//FirstContractIncarnation - first incarnation for contract accounts. After 1 it increases by 1.
FirstContractIncarnation = 1
//NonContractIncarnation incarnation for non contracts
NonContractIncarnation = 0
)
type StateReader interface {
ReadAccountData(address common.Address) (*accounts.Account, error)
ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error)
ReadAccountCode(address common.Address, codeHash common.Hash) ([]byte, error)
ReadAccountCodeSize(address common.Address, codeHash common.Hash) (int, error)
}
type StateWriter interface {
UpdateAccountData(ctx context.Context, address common.Address, original, account *accounts.Account) error
UpdateAccountCode(addrHash common.Hash, incarnation uint64, codeHash common.Hash, code []byte) error
DeleteAccount(ctx context.Context, address common.Address, original *accounts.Account) error
WriteAccountStorage(ctx context.Context, address common.Address, incarnation uint64, key, original, value *common.Hash) error
CreateContract(address common.Address) error
}
type NoopWriter struct {
}
func NewNoopWriter() *NoopWriter {
return &NoopWriter{}
}
func (nw *NoopWriter) UpdateAccountData(_ context.Context, address common.Address, original, account *accounts.Account) error {
return nil
}
func (nw *NoopWriter) DeleteAccount(_ context.Context, address common.Address, original *accounts.Account) error {
return nil
}
func (nw *NoopWriter) UpdateAccountCode(addrHash common.Hash, incarnation uint64, codeHash common.Hash, code []byte) error {
return nil
}
func (nw *NoopWriter) WriteAccountStorage(_ context.Context, address common.Address, incarnation uint64, key, original, value *common.Hash) error {
return nil
}
func (nw *NoopWriter) CreateContract(address common.Address) error {
return nil
}
// Structure holding updates, deletes, and reads registered within one change period
// A change period can be transaction within a block, or a block within group of blocks
type Buffer struct {
codeReads map[common.Hash]common.Hash
codeUpdates map[common.Hash][]byte
storageUpdates map[common.Hash]map[common.Hash][]byte
storageReads map[common.Hash]map[common.Hash]struct{}
accountUpdates map[common.Hash]*accounts.Account
accountReads map[common.Hash]struct{}
deleted map[common.Hash]struct{}
created map[common.Hash]struct{}
}
// Prepares buffer for work or clears previous data
func (b *Buffer) initialise() {
b.codeReads = make(map[common.Hash]common.Hash)
b.codeUpdates = make(map[common.Hash][]byte)
b.storageUpdates = make(map[common.Hash]map[common.Hash][]byte)
b.storageReads = make(map[common.Hash]map[common.Hash]struct{})
b.accountUpdates = make(map[common.Hash]*accounts.Account)
b.accountReads = make(map[common.Hash]struct{})
b.deleted = make(map[common.Hash]struct{})
b.created = make(map[common.Hash]struct{})
}
// Replaces account pointer with pointers to the copies
func (b *Buffer) detachAccounts() {
for addrHash, account := range b.accountUpdates {
if account != nil {
b.accountUpdates[addrHash] = account.SelfCopy()
}
}
}
// Merges the content of another buffer into this one
func (b *Buffer) merge(other *Buffer) {
for addrHash, codeHash := range other.codeReads {
b.codeReads[addrHash] = codeHash
}
for addrHash, code := range other.codeUpdates {
b.codeUpdates[addrHash] = code
}
for addrHash, om := range other.storageUpdates {
m, ok := b.storageUpdates[addrHash]
if !ok {
m = make(map[common.Hash][]byte)
b.storageUpdates[addrHash] = m
}
for keyHash, v := range om {
m[keyHash] = v
}
}
for addrHash, om := range other.storageReads {
m, ok := b.storageReads[addrHash]
if !ok {
m = make(map[common.Hash]struct{})
b.storageReads[addrHash] = m
}
for keyHash := range om {
m[keyHash] = struct{}{}
}
}
for addrHash, account := range other.accountUpdates {
b.accountUpdates[addrHash] = account
}
for addrHash := range other.accountReads {
b.accountReads[addrHash] = struct{}{}
}
for addrHash := range other.deleted {
b.deleted[addrHash] = struct{}{}
}
for addrHash := range other.created {
b.created[addrHash] = struct{}{}
}
}
// TrieDbState implements StateReader by wrapping a trie and a database, where trie acts as a cache for the database
type TrieDbState struct {
t *trie.Trie
tMu *sync.Mutex
db ethdb.Database
blockNr uint64
buffers []*Buffer
aggregateBuffer *Buffer // Merge of all buffers
currentBuffer *Buffer
historical bool
noHistory bool
resolveReads bool
savePreimages bool
resolveSetBuilder *trie.ResolveSetBuilder
tp *trie.Eviction
newStream trie.Stream
hashBuilder *trie.HashBuilder
resolver *trie.Resolver
incarnationMap map[common.Hash]uint64 // Temporary map of incarnation in case we cannot figure out from the database
}
func NewTrieDbState(root common.Hash, db ethdb.Database, blockNr uint64) *TrieDbState {
t := trie.New(root)
tp := trie.NewEviction()
tds := &TrieDbState{
t: t,
tMu: new(sync.Mutex),
db: db,
blockNr: blockNr,
resolveSetBuilder: trie.NewResolveSetBuilder(),
tp: tp,
savePreimages: true,
hashBuilder: trie.NewHashBuilder(false),
incarnationMap: make(map[common.Hash]uint64),
}
tp.SetBlockNumber(blockNr)
t.AddObserver(tp)
t.AddObserver(NewIntermediateHashes(tds.db, tds.db))
return tds
}
func (tds *TrieDbState) EnablePreimages(ep bool) {
tds.savePreimages = ep
}
func (tds *TrieDbState) SetHistorical(h bool) {
tds.historical = h
}
func (tds *TrieDbState) SetResolveReads(rr bool) {
tds.resolveReads = rr
}
func (tds *TrieDbState) SetNoHistory(nh bool) {
tds.noHistory = nh
}
func (tds *TrieDbState) Copy() *TrieDbState {
tds.tMu.Lock()
tcopy := *tds.t
tds.tMu.Unlock()
n := tds.getBlockNr()
tp := trie.NewEviction()
tp.SetBlockNumber(n)
cpy := TrieDbState{
t: &tcopy,
tMu: new(sync.Mutex),
db: tds.db,
blockNr: n,
tp: tp,
hashBuilder: trie.NewHashBuilder(false),
incarnationMap: make(map[common.Hash]uint64),
}
cpy.t.AddObserver(tp)
cpy.t.AddObserver(NewIntermediateHashes(cpy.db, cpy.db))
return &cpy
}
func (tds *TrieDbState) Database() ethdb.Database {
return tds.db
}
func (tds *TrieDbState) Trie() *trie.Trie {
return tds.t
}
func (tds *TrieDbState) StartNewBuffer() {
if tds.currentBuffer != nil {
if tds.aggregateBuffer == nil {
tds.aggregateBuffer = &Buffer{}
tds.aggregateBuffer.initialise()
}
tds.aggregateBuffer.merge(tds.currentBuffer)
tds.currentBuffer.detachAccounts()
}
tds.currentBuffer = &Buffer{}
tds.currentBuffer.initialise()
tds.buffers = append(tds.buffers, tds.currentBuffer)
}
func (tds *TrieDbState) WithNewBuffer() *TrieDbState {
aggregateBuffer := &Buffer{}
aggregateBuffer.initialise()
currentBuffer := &Buffer{}
currentBuffer.initialise()
buffers := []*Buffer{currentBuffer}
tds.tMu.Lock()
t := &TrieDbState{
t: tds.t,
tMu: tds.tMu,
db: tds.db,
blockNr: tds.getBlockNr(),
buffers: buffers,
aggregateBuffer: aggregateBuffer,
currentBuffer: currentBuffer,
historical: tds.historical,
noHistory: tds.noHistory,
resolveReads: tds.resolveReads,
resolveSetBuilder: tds.resolveSetBuilder,
tp: tds.tp,
hashBuilder: trie.NewHashBuilder(false),
}
tds.tMu.Unlock()
return t
}
func (tds *TrieDbState) LastRoot() common.Hash {
if tds == nil || tds.tMu == nil {
return common.Hash{}
}
tds.tMu.Lock()
defer tds.tMu.Unlock()
return tds.t.Hash()
}
// ComputeTrieRoots is a combination of `ResolveStateTrie` and `UpdateStateTrie`
// DESCRIBED: docs/programmers_guide/guide.md#organising-ethereum-state-into-a-merkle-tree
func (tds *TrieDbState) ComputeTrieRoots() ([]common.Hash, error) {
if _, err := tds.ResolveStateTrie(false, false); err != nil {
return nil, err
}
return tds.UpdateStateTrie()
}
// UpdateStateTrie assumes that the state trie is already fully resolved, i.e. any operations
// will find necessary data inside the trie.
func (tds *TrieDbState) UpdateStateTrie() ([]common.Hash, error) {
tds.tMu.Lock()
defer tds.tMu.Unlock()
roots, err := tds.updateTrieRoots(true)
tds.clearUpdates()
return roots, err
}
func (tds *TrieDbState) PrintTrie(w io.Writer) {
tds.tMu.Lock()
defer tds.tMu.Unlock()
tds.t.Print(w)
fmt.Fprintln(w, "") //nolint
}
// Builds a map where for each address (of a smart contract) there is
// a sorted list of all key hashes that were touched within the
// period for which we are aggregating updates
func (tds *TrieDbState) buildStorageTouches(withReads bool, withValues bool) (common.StorageKeys, [][]byte) {
storageTouches := common.StorageKeys{}
var values [][]byte
for addrHash, m := range tds.aggregateBuffer.storageUpdates {
if withValues {
if _, ok := tds.aggregateBuffer.deleted[addrHash]; ok {
continue
}
}
for keyHash := range m {
var storageKey common.StorageKey
copy(storageKey[:], addrHash[:])
copy(storageKey[common.HashLength:], keyHash[:])
storageTouches = append(storageTouches, storageKey)
}
}
if withReads {
for addrHash, m := range tds.aggregateBuffer.storageReads {
mWrite := tds.aggregateBuffer.storageUpdates[addrHash]
for keyHash := range m {
if mWrite != nil {
if _, ok := mWrite[keyHash]; ok {
// Avoid repeating the same storage keys if they are both read and updated
continue
}
}
var storageKey common.StorageKey
copy(storageKey[:], addrHash[:])
copy(storageKey[common.HashLength:], keyHash[:])
storageTouches = append(storageTouches, storageKey)
}
}
}
sort.Sort(storageTouches)
if withValues {
// We assume that if withValues == true, then withReads == false
var addrHash common.Hash
var keyHash common.Hash
for _, storageKey := range storageTouches {
copy(addrHash[:], storageKey[:])
copy(keyHash[:], storageKey[common.HashLength:])
values = append(values, tds.aggregateBuffer.storageUpdates[addrHash][keyHash])
}
}
return storageTouches, values
}
// Expands the storage tries (by loading data from the database) if it is required
// for accessing storage slots containing in the storageTouches map
func (tds *TrieDbState) resolveStorageTouches(storageTouches common.StorageKeys, resolveFunc func(*trie.Resolver) error) error {
var firstRequest = true
for _, storageKey := range storageTouches {
if need, req := tds.t.NeedResolution(storageKey[:common.HashLength], storageKey[:]); need {
if tds.resolver == nil {
tds.resolver = trie.NewResolver(0, false, tds.blockNr)
tds.resolver.SetHistorical(tds.historical)
} else if firstRequest {
tds.resolver.Reset(0, false, tds.blockNr)
}
firstRequest = false
tds.resolver.AddRequest(req)
}
}
if !firstRequest {
res := resolveFunc(tds.resolver)
return res
}
return nil
}
// Populate pending block proof so that it will be sufficient for accessing all storage slots in storageTouches
func (tds *TrieDbState) populateStorageBlockProof(storageTouches common.StorageKeys) error { //nolint
for _, storageKey := range storageTouches {
tds.resolveSetBuilder.AddStorageTouch(storageKey[:])
}
return nil
}
func (tds *TrieDbState) buildCodeTouches(withReads bool) map[common.Hash]common.Hash {
return tds.aggregateBuffer.codeReads
}
// Builds a sorted list of all address hashes that were touched within the
// period for which we are aggregating updates
func (tds *TrieDbState) buildAccountTouches(withReads bool, withValues bool) (common.Hashes, []*accounts.Account) {
accountTouches := common.Hashes{}
var aValues []*accounts.Account
for addrHash, aValue := range tds.aggregateBuffer.accountUpdates {
if aValue != nil {
if _, ok := tds.aggregateBuffer.deleted[addrHash]; ok {
accountTouches = append(accountTouches, addrHash)
}
}
accountTouches = append(accountTouches, addrHash)
}
if withReads {
for addrHash := range tds.aggregateBuffer.accountReads {
if _, ok := tds.aggregateBuffer.accountUpdates[addrHash]; !ok {
accountTouches = append(accountTouches, addrHash)
}
}
}
sort.Sort(accountTouches)
if withValues {
// We assume that if withValues == true, then withReads == false
aValues = make([]*accounts.Account, len(accountTouches))
for i, addrHash := range accountTouches {
if i < len(accountTouches)-1 && addrHash == accountTouches[i+1] {
aValues[i] = nil // Entry that would wipe out existing storage
} else {
a := tds.aggregateBuffer.accountUpdates[addrHash]
if a != nil {
if _, ok := tds.aggregateBuffer.storageUpdates[addrHash]; ok {
var ac accounts.Account
ac.Copy(a)
ac.Root = trie.EmptyRoot
a = &ac
}
}
aValues[i] = a
}
}
}
return accountTouches, aValues
}
func (tds *TrieDbState) resolveCodeTouches(codeTouches map[common.Hash]common.Hash, resolveFunc trie.ResolveFunc) error {
firstRequest := true
for address, codeHash := range codeTouches {
if need, req := tds.t.NeedResolutonForCode(address, codeHash); need {
if tds.resolver == nil {
tds.resolver = trie.NewResolver(0, true, tds.blockNr)
tds.resolver.SetHistorical(tds.historical)
} else if firstRequest {
tds.resolver.Reset(0, true, tds.blockNr)
}
firstRequest = false
tds.resolver.AddCodeRequest(req)
}
}
if !firstRequest {
return resolveFunc(tds.resolver)
}
return nil
}
// Expands the accounts trie (by loading data from the database) if it is required
// for accessing accounts whose addresses are contained in the accountTouches
func (tds *TrieDbState) resolveAccountTouches(accountTouches common.Hashes, resolveFunc trie.ResolveFunc) error {
var firstRequest = true
for _, addrHash := range accountTouches {
if need, req := tds.t.NeedResolution(nil, addrHash[:]); need {
if tds.resolver == nil {
tds.resolver = trie.NewResolver(0, true, tds.blockNr)
tds.resolver.SetHistorical(tds.historical)
} else if firstRequest {
tds.resolver.Reset(0, true, tds.blockNr)
}
firstRequest = false
tds.resolver.AddRequest(req)
}
}
if !firstRequest {
return resolveFunc(tds.resolver)
}
return nil
}
func (tds *TrieDbState) populateAccountBlockProof(accountTouches common.Hashes) {
for _, addrHash := range accountTouches {
a := addrHash
tds.resolveSetBuilder.AddTouch(a[:])
}
}
// ExtractTouches returns two lists of keys - for accounts and storage items correspondingly
// Each list is the collection of keys that have been "touched" (inserted, updated, or simply accessed)
// since the last invocation of `ExtractTouches`.
func (tds *TrieDbState) ExtractTouches() (accountTouches [][]byte, storageTouches [][]byte) {
return tds.resolveSetBuilder.ExtractTouches()
}
func (tds *TrieDbState) resolveStateTrieWithFunc(resolveFunc trie.ResolveFunc) error {
// Aggregating the current buffer, if any
if tds.currentBuffer != nil {
if tds.aggregateBuffer == nil {
tds.aggregateBuffer = &Buffer{}
tds.aggregateBuffer.initialise()
}
tds.aggregateBuffer.merge(tds.currentBuffer)
}
if tds.aggregateBuffer == nil {
return nil
}
tds.tMu.Lock()
defer tds.tMu.Unlock()
// Prepare (resolve) storage tries so that actual modifications can proceed without database access
storageTouches, _ := tds.buildStorageTouches(tds.resolveReads, false)
// Prepare (resolve) accounts trie so that actual modifications can proceed without database access
accountTouches, _ := tds.buildAccountTouches(tds.resolveReads, false)
// Prepare (resolve) contract code reads so that actual modifications can proceed without database access
codeTouches := tds.buildCodeTouches(tds.resolveReads)
var err error
if err = tds.resolveAccountTouches(accountTouches, resolveFunc); err != nil {
return err
}
if err = tds.resolveCodeTouches(codeTouches, resolveFunc); err != nil {
return err
}
if tds.resolveReads {
tds.populateAccountBlockProof(accountTouches)
}
if err = tds.resolveStorageTouches(storageTouches, resolveFunc); err != nil {
return err
}
if tds.resolveReads {
if err := tds.populateStorageBlockProof(storageTouches); err != nil {
return err
}
}
return nil
}
// ResolveStateTrie resolves parts of the state trie that would be necessary for any updates
// (and reads, if `resolveReads` is set).
func (tds *TrieDbState) ResolveStateTrie(extractWitnesses bool, trace bool) ([]*trie.Witness, error) {
var witnesses []*trie.Witness
resolveFunc := func(resolver *trie.Resolver) error {
if resolver == nil {
return nil
}
resolver.CollectWitnesses(extractWitnesses)
if err := resolver.ResolveWithDb(tds.db, tds.blockNr, trace); err != nil {
return err
}
if !extractWitnesses {
return nil
}
resolverWitnesses := resolver.PopCollectedWitnesses()
if len(resolverWitnesses) == 0 {
return nil
}
if witnesses == nil {
witnesses = resolverWitnesses
} else {
witnesses = append(witnesses, resolverWitnesses...)
}
return nil
}
if err := tds.resolveStateTrieWithFunc(resolveFunc); err != nil {
return nil, err
}
return witnesses, nil
}
// ResolveStateTrieStateless uses a witness DB to resolve subtries
func (tds *TrieDbState) ResolveStateTrieStateless(database trie.WitnessStorage) error {
var startPos int64
resolveFunc := func(resolver *trie.Resolver) error {
if resolver == nil {
return nil
}
pos, err := resolver.ResolveStateless(database, tds.blockNr, uint32(MaxTrieCacheSize), startPos)
if err != nil {
return err
}
startPos = pos
return nil
}
return tds.resolveStateTrieWithFunc(resolveFunc)
}
// CalcTrieRoots calculates trie roots without modifying the state trie
func (tds *TrieDbState) CalcTrieRoots(trace bool) (common.Hash, error) {
tds.tMu.Lock()
defer tds.tMu.Unlock()
// Retrive the list of inserted/updated/deleted storage items (keys and values)
storageKeys, sValues := tds.buildStorageTouches(false, true)
if trace {
fmt.Printf("len(storageKeys)=%d, len(sValues)=%d\n", len(storageKeys), len(sValues))
}
// Retrive the list of inserted/updated/deleted accounts (keys and values)
accountKeys, aValues := tds.buildAccountTouches(false, true)
if trace {
fmt.Printf("len(accountKeys)=%d, len(aValues)=%d\n", len(accountKeys), len(aValues))
}
return trie.HashWithModifications(tds.t, accountKeys, aValues, storageKeys, sValues, common.HashLength, &tds.newStream, tds.hashBuilder, trace)
}
// forward is `true` if the function is used to progress the state forward (by adding blocks)
// forward is `false` if the function is used to rewind the state (for reorgs, for example)
func (tds *TrieDbState) updateTrieRoots(forward bool) ([]common.Hash, error) {
accountUpdates := tds.aggregateBuffer.accountUpdates
// Perform actual updates on the tries, and compute one trie root per buffer
// These roots can be used to populate receipt.PostState on pre-Byzantium
roots := make([]common.Hash, len(tds.buffers))
// The following map is to prevent repeated clearouts of the storage
alreadyCreated := make(map[common.Hash]struct{})
for i, b := range tds.buffers {
// New contracts are being created at these addresses. Therefore, we need to clear the storage items
// that might be remaining in the trie and figure out the next incarnations
for addrHash := range b.created {
// Prevent repeated storage clearouts
if _, ok := alreadyCreated[addrHash]; ok {
continue
}
alreadyCreated[addrHash] = struct{}{}
if account, ok := b.accountUpdates[addrHash]; ok && account != nil {
b.accountUpdates[addrHash].Root = trie.EmptyRoot
}
if account, ok := tds.aggregateBuffer.accountUpdates[addrHash]; ok && account != nil {
tds.aggregateBuffer.accountUpdates[addrHash].Root = trie.EmptyRoot
}
//fmt.Println("updateTrieRoots del subtree", addrHash.String())
// The only difference between Delete and DeleteSubtree is that Delete would delete accountNode too,
// wherewas DeleteSubtree will keep the accountNode, but will make the storage sub-trie empty
tds.t.DeleteSubtree(addrHash[:])
}
for addrHash, account := range b.accountUpdates {
if account != nil {
//fmt.Println("updateTrieRoots b.accountUpdates", addrHash.String(), account.Incarnation)
tds.t.UpdateAccount(addrHash[:], account)
} else {
tds.t.Delete(addrHash[:])
delete(b.codeUpdates, addrHash)
}
}
for addrHash, newCode := range b.codeUpdates {
if err := tds.t.UpdateAccountCode(addrHash[:], newCode); err != nil {
return nil, err
}
}
for addrHash, m := range b.storageUpdates {
for keyHash, v := range m {
cKey := dbutils.GenerateCompositeTrieKey(addrHash, keyHash)
if len(v) > 0 {
//fmt.Printf("Update storage trie addrHash %x, keyHash %x: %x\n", addrHash, keyHash, v)
if forward {
tds.t.Update(cKey, v)
} else {
// If rewinding, it might not be possible to execute storage item update.
// If we rewind from the state where a contract does not exist anymore (it was self-destructed)
// to the point where it existed (with storage), then rewinding to the point of existence
// will not bring back the full storage trie. Instead there will be one hashNode.
// So we probe for this situation first
if _, ok := tds.t.Get(cKey); ok {
tds.t.Update(cKey, v)
}
}
} else {
if forward {
tds.t.Delete(cKey)
} else {
// If rewinding, it might not be possible to execute storage item update.
// If we rewind from the state where a contract does not exist anymore (it was self-destructed)
// to the point where it existed (with storage), then rewinding to the point of existence
// will not bring back the full storage trie. Instead there will be one hashNode.
// So we probe for this situation first
if _, ok := tds.t.Get(cKey); ok {
tds.t.Delete(cKey)
}
}
}
}
if forward || debug.IsThinHistory() {
if account, ok := b.accountUpdates[addrHash]; ok && account != nil {
ok, root := tds.t.DeepHash(addrHash[:])
if ok {
account.Root = root
//fmt.Printf("(b)Set %x root for addrHash %x\n", root, addrHash)
} else {
//fmt.Printf("(b)Set empty root for addrHash %x\n", addrHash)
account.Root = trie.EmptyRoot
}
}
if account, ok := accountUpdates[addrHash]; ok && account != nil {
ok, root := tds.t.DeepHash(addrHash[:])
if ok {
account.Root = root
//fmt.Printf("Set %x root for addrHash %x\n", root, addrHash)
} else {
//fmt.Printf("Set empty root for addrHash %x\n", addrHash)
account.Root = trie.EmptyRoot
}
}
} else {
// Simply comparing the correctness of the storageRoot computations
if account, ok := b.accountUpdates[addrHash]; ok && account != nil {
ok, h := tds.t.DeepHash(addrHash[:])
if !ok {
h = trie.EmptyRoot
}
if account.Root != h {
return nil, fmt.Errorf("mismatched storage root for %x: expected %x, got %x", addrHash, account.Root, h)
}
}
if account, ok := accountUpdates[addrHash]; ok && account != nil {
ok, h := tds.t.DeepHash(addrHash[:])
if !ok {
h = trie.EmptyRoot
}
if account.Root != h {
return nil, fmt.Errorf("mismatched storage root for %x: expected %x, got %x", addrHash, account.Root, h)
}
}
}
}
// For the contracts that got deleted
for addrHash := range b.deleted {
if _, ok := b.created[addrHash]; ok {
// In some rather artificial circumstances, an account can be recreated after having been self-destructed
// in the same block. It can only happen when contract is introduced in the genesis state with nonce 0
// rather than created by a transaction (in that case, its starting nonce is 1). The self-destructed
// contract actually gets removed from the state only at the end of the block, so if its nonce is not 0,
// it will prevent any re-creation within the same block. However, if the contract is introduced in
// the genesis state, its nonce is 0, and that means it can be self-destructed, and then re-created,
// all in the same block. In such cases, we must preserve storage modifications happening after the
// self-destruction
continue
}
if account, ok := b.accountUpdates[addrHash]; ok && account != nil {
//fmt.Printf("(b)Set empty root for addrHash %x due to deleted\n", addrHash)
account.Root = trie.EmptyRoot
}
if account, ok := accountUpdates[addrHash]; ok && account != nil {
//fmt.Printf("Set empty root for addrHash %x due to deleted\n", addrHash)
account.Root = trie.EmptyRoot
}
tds.t.DeleteSubtree(addrHash[:])
}
roots[i] = tds.t.Hash()
}
return roots, nil
}
func (tds *TrieDbState) clearUpdates() {
tds.buffers = nil
tds.currentBuffer = nil
tds.aggregateBuffer = nil
}
func (tds *TrieDbState) SetBlockNr(blockNr uint64) {
tds.setBlockNr(blockNr)
tds.tp.SetBlockNumber(blockNr)
}
func (tds *TrieDbState) GetBlockNr() uint64 {
return tds.getBlockNr()
}
func (tds *TrieDbState) UnwindTo(blockNr uint64) error {
tds.StartNewBuffer()
b := tds.currentBuffer
if err := tds.db.RewindData(tds.blockNr, blockNr, func(bucket, key, value []byte) error {
//fmt.Printf("bucket: %x, key: %x, value: %x\n", bucket, key, value)
if bytes.Equal(bucket, dbutils.AccountsHistoryBucket) {
var addrHash common.Hash
copy(addrHash[:], key)
if len(value) > 0 {
var acc accounts.Account
if err := acc.DecodeForStorage(value); err != nil {
return err
}
// Fetch the code hash
if acc.Incarnation > 0 && debug.IsThinHistory() && acc.IsEmptyCodeHash() {
if codeHash, err := tds.db.Get(dbutils.ContractCodeBucket, dbutils.GenerateStoragePrefix(addrHash, acc.Incarnation)); err == nil {
copy(acc.CodeHash[:], codeHash)
}
}
b.accountUpdates[addrHash] = &acc
value = make([]byte, acc.EncodingLengthForStorage())
acc.EncodeForStorage(value)
if err := tds.db.Put(dbutils.AccountsBucket, addrHash[:], value); err != nil {
return err
}
} else {
b.accountUpdates[addrHash] = nil
if err := tds.db.Delete(dbutils.AccountsBucket, addrHash[:]); err != nil {
return err
}
}
} else if bytes.Equal(bucket, dbutils.StorageHistoryBucket) {
var addrHash common.Hash
copy(addrHash[:], key[:common.HashLength])
var keyHash common.Hash
copy(keyHash[:], key[common.HashLength+common.IncarnationLength:])
m, ok := b.storageUpdates[addrHash]
if !ok {
m = make(map[common.Hash][]byte)
b.storageUpdates[addrHash] = m
}
if len(value) > 0 {
m[keyHash] = value
if err := tds.db.Put(dbutils.StorageBucket, key[:common.HashLength+common.IncarnationLength+common.HashLength], value); err != nil {
return err
}
} else {
m[keyHash] = nil
if err := tds.db.Delete(dbutils.StorageBucket, key[:common.HashLength+common.IncarnationLength+common.HashLength]); err != nil {
return err
}
}
}
return nil
}); err != nil {
return err
}
if _, err := tds.ResolveStateTrie(false, false); err != nil {
return err
}
tds.tMu.Lock()
defer tds.tMu.Unlock()
if _, err := tds.updateTrieRoots(false); err != nil {
return err
}
for i := tds.blockNr; i > blockNr; i-- {
if err := tds.db.DeleteTimestamp(i); err != nil {
return err
}
}
tds.clearUpdates()
tds.setBlockNr(blockNr)
return nil
}
func (tds *TrieDbState) readAccountDataByHash(addrHash common.Hash) (*accounts.Account, error) {
if acc, ok := tds.GetAccount(addrHash); ok {
return acc, nil
}
// Not present in the trie, try the database
var err error
var enc []byte
if tds.historical {
enc, err = tds.db.GetAsOf(dbutils.AccountsBucket, dbutils.AccountsHistoryBucket, addrHash[:], tds.blockNr+1)
if err != nil {
enc = nil
}
} else {
enc, err = tds.db.Get(dbutils.AccountsBucket, addrHash[:])
if err != nil {
enc = nil
}
}
if len(enc) == 0 {
return nil, nil
}
var a accounts.Account
if err := a.DecodeForStorage(enc); err != nil {
return nil, err
}
if tds.historical && debug.IsThinHistory() && a.Incarnation > 0 {
codeHash, err := tds.db.Get(dbutils.ContractCodeBucket, dbutils.GenerateStoragePrefix(addrHash, a.Incarnation))
if err == nil {
a.CodeHash = common.BytesToHash(codeHash)
} else {
log.Error("Get code hash is incorrect", "err", err)
}
}
return &a, nil
}
func (tds *TrieDbState) GetAccount(addrHash common.Hash) (*accounts.Account, bool) {
tds.tMu.Lock()
defer tds.tMu.Unlock()
acc, ok := tds.t.GetAccount(addrHash[:])
return acc, ok
}
func (tds *TrieDbState) ReadAccountData(address common.Address) (*accounts.Account, error) {
addrHash, err := common.HashData(address[:])
if err != nil {
return nil, err
}
if tds.resolveReads {
if _, ok := tds.currentBuffer.accountUpdates[addrHash]; !ok {
tds.currentBuffer.accountReads[addrHash] = struct{}{}
}
}
return tds.readAccountDataByHash(addrHash)
}
func (tds *TrieDbState) savePreimage(save bool, hash, preimage []byte) error {
if !save || !tds.savePreimages {
return nil
}
// Following check is to minimise the overwriting the same value of preimage
// in the database, which would cause extra write churn
if p, _ := tds.db.Get(dbutils.PreimagePrefix, hash); p != nil {
return nil
}
return tds.db.Put(dbutils.PreimagePrefix, hash, preimage)
}
func (tds *TrieDbState) HashAddress(address common.Address, save bool) (common.Hash, error) {
addrHash, err := common.HashData(address[:])
if err != nil {
return common.Hash{}, err
}
return addrHash, tds.savePreimage(save, addrHash[:], address[:])
}
func (tds *TrieDbState) HashKey(key *common.Hash, save bool) (common.Hash, error) {
keyHash, err := common.HashData(key[:])
if err != nil {
return common.Hash{}, err
}
return keyHash, tds.savePreimage(save, keyHash[:], key[:])
}
func (tds *TrieDbState) GetKey(shaKey []byte) []byte {
key, _ := tds.db.Get(dbutils.PreimagePrefix, shaKey)
return key
}
func (tds *TrieDbState) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) {
addrHash, err := tds.HashAddress(address, false /*save*/)
if err != nil {
return nil, err
}
if tds.currentBuffer != nil {
if _, ok := tds.currentBuffer.deleted[addrHash]; ok {
return nil, nil
}
}
if tds.aggregateBuffer != nil {
if _, ok := tds.aggregateBuffer.deleted[addrHash]; ok {
return nil, nil
}
}
seckey, err := tds.HashKey(key, false /*save*/)
if err != nil {
return nil, err
}
if tds.resolveReads {
var addReadRecord = false
if mWrite, ok := tds.currentBuffer.storageUpdates[addrHash]; ok {
if _, ok1 := mWrite[seckey]; !ok1 {
addReadRecord = true
}
} else {
addReadRecord = true
}
if addReadRecord {
m, ok := tds.currentBuffer.storageReads[addrHash]
if !ok {
m = make(map[common.Hash]struct{})
tds.currentBuffer.storageReads[addrHash] = m
}
m[seckey] = struct{}{}
}
}
tds.tMu.Lock()
defer tds.tMu.Unlock()
enc, ok := tds.t.Get(dbutils.GenerateCompositeTrieKey(addrHash, seckey))
if !ok {
// Not present in the trie, try database
if tds.historical {
enc, err = tds.db.GetAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, dbutils.GenerateCompositeStorageKey(addrHash, incarnation, seckey), tds.blockNr)
if err != nil {
enc = nil
}
} else {
enc, err = tds.db.Get(dbutils.StorageBucket, dbutils.GenerateCompositeStorageKey(addrHash, incarnation, seckey))
if err != nil {
enc = nil
}
}
}
return enc, nil
}
func (tds *TrieDbState) ReadCodeByHash(codeHash common.Hash) (code []byte, err error) {
if bytes.Equal(codeHash[:], emptyCodeHash) {
return nil, nil
}
code, err = tds.db.Get(dbutils.CodeBucket, codeHash[:])
if tds.resolveReads {
// we have to be careful, because the code might change
// during the block executuion, so we are always
// storing the latest code hash
tds.resolveSetBuilder.ReadCode(codeHash)
}
return code, err
}
func (tds *TrieDbState) readAccountCodeFromTrie(addrHash []byte) ([]byte, bool) {
tds.tMu.Lock()
defer tds.tMu.Unlock()
return tds.t.GetAccountCode(addrHash)
}
func (tds *TrieDbState) ReadAccountCode(address common.Address, codeHash common.Hash) (code []byte, err error) {
if bytes.Equal(codeHash[:], emptyCodeHash) {
return nil, nil
}
addrHash, err := tds.HashAddress(address, false /*save*/)
if err != nil {
return nil, err
}
if cached, ok := tds.readAccountCodeFromTrie(addrHash[:]); ok {
code, err = cached, nil
} else {
code, err = tds.db.Get(dbutils.CodeBucket, codeHash[:])
}
if tds.resolveReads {
addrHash, err1 := common.HashData(address[:])
if err1 != nil {
return nil, err
}
if _, ok := tds.currentBuffer.accountUpdates[addrHash]; !ok {
tds.currentBuffer.accountReads[addrHash] = struct{}{}
}
// we have to be careful, because the code might change
// during the block executuion, so we are always
// storing the latest code hash
tds.currentBuffer.codeReads[addrHash] = codeHash
tds.resolveSetBuilder.ReadCode(codeHash)
}
return code, err
}
func (tds *TrieDbState) ReadAccountCodeSize(address common.Address, codeHash common.Hash) (codeSize int, err error) {
addrHash, err := tds.HashAddress(address, false /*save*/)
if err != nil {
return 0, err
}
if code, ok := tds.readAccountCodeFromTrie(addrHash[:]); ok {
codeSize, err = len(code), nil
} else {
code, err = tds.ReadAccountCode(address, codeHash)
if err != nil {
return 0, err
}
codeSize = len(code)
}
if tds.resolveReads {
addrHash, err1 := common.HashData(address[:])
if err1 != nil {
return 0, err
}
if _, ok := tds.currentBuffer.accountUpdates[addrHash]; !ok {
tds.currentBuffer.accountReads[addrHash] = struct{}{}
}
// we have to be careful, because the code might change
// during the block executuion, so we are always
// storing the latest code hash
tds.currentBuffer.codeReads[addrHash] = codeHash
tds.resolveSetBuilder.ReadCode(codeHash)
}
return codeSize, nil
}
// nextIncarnation determines what should be the next incarnation of an account (i.e. how many time it has existed before at this address)
func (tds *TrieDbState) nextIncarnation(addrHash common.Hash) (uint64, error) {
var found bool
var incarnationBytes [common.IncarnationLength]byte
if tds.historical {
// We reserve ethdb.MaxTimestampLength (8) at the end of the key to accomodate any possible timestamp
// (timestamp's encoding may have variable length)
startkey := make([]byte, common.HashLength+common.IncarnationLength+common.HashLength+ethdb.MaxTimestampLength)
var fixedbits uint = 8 * common.HashLength
copy(startkey, addrHash[:])
if err := tds.db.WalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startkey, fixedbits, tds.blockNr, func(k, _ []byte) (bool, error) {
copy(incarnationBytes[:], k[common.HashLength:])
found = true
return false, nil
}); err != nil {
return 0, err
}
} else {
if inc, ok := tds.incarnationMap[addrHash]; ok {
return inc + 1, nil
}
startkey := make([]byte, common.HashLength+common.IncarnationLength+common.HashLength)
var fixedbits uint = 8 * common.HashLength
copy(startkey, addrHash[:])
if err := tds.db.Walk(dbutils.StorageBucket, startkey, fixedbits, func(k, v []byte) (bool, error) {
copy(incarnationBytes[:], k[common.HashLength:])
found = true
return false, nil
}); err != nil {
return 0, err
}
}
if found {
return (^binary.BigEndian.Uint64(incarnationBytes[:])) + 1, nil
}
return FirstContractIncarnation, nil
}
var prevMemStats runtime.MemStats
type TrieStateWriter struct {
tds *TrieDbState
}
func (tds *TrieDbState) EvictTries(print bool) {
tds.tMu.Lock()
defer tds.tMu.Unlock()
strict := print
tds.incarnationMap = make(map[common.Hash]uint64)
if print {
trieSize := tds.t.TrieSize()
fmt.Println("") // newline for better formatting
fmt.Printf("[Before] Actual nodes size: %d, accounted size: %d\n", trieSize, tds.tp.TotalSize())
}
if strict {
actualAccounts := uint64(tds.t.NumberOfAccounts())
fmt.Println("number of leaves: ", actualAccounts)
accountedAccounts := tds.tp.NumberOf()
if actualAccounts != accountedAccounts {
panic(fmt.Errorf("account number mismatch: trie=%v eviction=%v", actualAccounts, accountedAccounts))
}
fmt.Printf("checking number --> ok\n")
actualSize := uint64(tds.t.TrieSize())
accountedSize := tds.tp.TotalSize()
if actualSize != accountedSize {
panic(fmt.Errorf("account size mismatch: trie=%v eviction=%v", actualSize, accountedSize))
}
fmt.Printf("checking size --> ok\n")
}
tds.tp.EvictToFitSize(tds.t, MaxTrieCacheSize)
if strict {
actualAccounts := uint64(tds.t.NumberOfAccounts())
fmt.Println("number of leaves: ", actualAccounts)
accountedAccounts := tds.tp.NumberOf()
if actualAccounts != accountedAccounts {
panic(fmt.Errorf("after eviction account number mismatch: trie=%v eviction=%v", actualAccounts, accountedAccounts))
}
fmt.Printf("checking number --> ok\n")
actualSize := uint64(tds.t.TrieSize())
accountedSize := tds.tp.TotalSize()
if actualSize != accountedSize {
panic(fmt.Errorf("after eviction account size mismatch: trie=%v eviction=%v", actualSize, accountedSize))
}
fmt.Printf("checking size --> ok\n")
}
if print {
trieSize := tds.t.TrieSize()
fmt.Printf("[After] Actual nodes size: %d, accounted size: %d\n", trieSize, tds.tp.TotalSize())
actualAccounts := uint64(tds.t.NumberOfAccounts())
fmt.Println("number of leaves: ", actualAccounts)
}
var m runtime.MemStats
runtime.ReadMemStats(&m)
log.Info("Memory", "nodes size", tds.tp.TotalSize(), "hashes", tds.t.HashMapSize(),
"alloc", int(m.Alloc/1024), "sys", int(m.Sys/1024), "numGC", int(m.NumGC))
if print {
fmt.Printf("Eviction done. Nodes size: %d, alloc: %d, sys: %d, numGC: %d\n", tds.tp.TotalSize(), int(m.Alloc/1024), int(m.Sys/1024), int(m.NumGC))
}
}
func (tds *TrieDbState) TrieStateWriter() *TrieStateWriter {
return &TrieStateWriter{tds: tds}
}
func (tds *TrieDbState) DbStateWriter() *DbStateWriter {
return &DbStateWriter{tds: tds}
}
func accountsEqual(a1, a2 *accounts.Account) bool {
if a1.Nonce != a2.Nonce {
return false
}
if !a1.Initialised {
if a2.Initialised {
return false
}
} else if !a2.Initialised {
return false
} else if a1.Balance.Cmp(&a2.Balance) != 0 {
return false
}
if a1.Root != a2.Root {
return false
}
if a1.CodeHash == (common.Hash{}) {
if a2.CodeHash != (common.Hash{}) {
return false
}
} else if a2.CodeHash == (common.Hash{}) {
return false
} else if a1.CodeHash != a2.CodeHash {
return false
}
return true
}
func (tsw *TrieStateWriter) UpdateAccountData(_ context.Context, address common.Address, original, account *accounts.Account) error {
addrHash, err := tsw.tds.HashAddress(address, false /*save*/)
if err != nil {
return err
}
tsw.tds.currentBuffer.accountUpdates[addrHash] = account
return nil
}
func (tsw *TrieStateWriter) DeleteAccount(_ context.Context, address common.Address, original *accounts.Account) error {
addrHash, err := tsw.tds.HashAddress(address, false /*save*/)
if err != err {
return err
}
tsw.tds.currentBuffer.accountUpdates[addrHash] = nil
delete(tsw.tds.currentBuffer.storageUpdates, addrHash)
tsw.tds.currentBuffer.deleted[addrHash] = struct{}{}
return nil
}
func (tsw *TrieStateWriter) UpdateAccountCode(addrHash common.Hash, incarnation uint64, codeHash common.Hash, code []byte) error {
if tsw.tds.resolveReads {
tsw.tds.resolveSetBuilder.CreateCode(codeHash)
}
tsw.tds.currentBuffer.codeUpdates[addrHash] = code
return nil
}
func (tsw *TrieStateWriter) WriteAccountStorage(_ context.Context, address common.Address, incarnation uint64, key, original, value *common.Hash) error {
addrHash, err := tsw.tds.HashAddress(address, false /*save*/)
if err != nil {
return err
}
v := bytes.TrimLeft(value[:], "\x00")
m, ok := tsw.tds.currentBuffer.storageUpdates[addrHash]
if !ok {
m = make(map[common.Hash][]byte)
tsw.tds.currentBuffer.storageUpdates[addrHash] = m
}
seckey, err := tsw.tds.HashKey(key, false /*save*/)
if err != nil {
return err
}
if len(v) > 0 {
m[seckey] = v
} else {
m[seckey] = nil
}
//fmt.Printf("WriteAccountStorage %x %x: %x, buffer %d\n", addrHash, seckey, value, len(tsw.tds.buffers))
return nil
}
// ExtractWitness produces block witness for the block just been processed, in a serialised form
func (tds *TrieDbState) ExtractWitness(trace bool, isBinary bool) (*trie.Witness, error) {
rs := tds.resolveSetBuilder.Build(isBinary)
return tds.makeBlockWitness(trace, rs, isBinary)
}
// ExtractWitness produces block witness for the block just been processed, in a serialised form
func (tds *TrieDbState) ExtractWitnessForPrefix(prefix []byte, trace bool, isBinary bool) (*trie.Witness, error) {
rs := tds.resolveSetBuilder.Build(isBinary)
return tds.makeBlockWitnessForPrefix(prefix, trace, rs, isBinary)
}
func (tds *TrieDbState) makeBlockWitnessForPrefix(prefix []byte, trace bool, rs *trie.ResolveSet, isBinary bool) (*trie.Witness, error) {
tds.tMu.Lock()
defer tds.tMu.Unlock()
t := tds.t
if isBinary {
t = trie.HexToBin(tds.t).Trie()
}
return t.ExtractWitnessForPrefix(prefix, tds.blockNr, trace, rs)
}
func (tds *TrieDbState) makeBlockWitness(trace bool, rs *trie.ResolveSet, isBinary bool) (*trie.Witness, error) {
tds.tMu.Lock()
defer tds.tMu.Unlock()
t := tds.t
if isBinary {
t = trie.HexToBin(tds.t).Trie()
}
return t.ExtractWitness(tds.blockNr, trace, rs)
}
func (tsw *TrieStateWriter) CreateContract(address common.Address) error {
addrHash, err := tsw.tds.HashAddress(address, true /*save*/)
if err != nil {
return err
}
tsw.tds.currentBuffer.created[addrHash] = struct{}{}
if account, ok := tsw.tds.currentBuffer.accountUpdates[addrHash]; ok && account != nil {
incarnation, err := tsw.tds.nextIncarnation(addrHash)
if err != nil {
return err
}
account.SetIncarnation(incarnation)
tsw.tds.incarnationMap[addrHash] = incarnation
}
return nil
}
func (tds *TrieDbState) TriePruningDebugDump() string {
return tds.tp.DebugDump()
}
func (tds *TrieDbState) getBlockNr() uint64 {
return atomic.LoadUint64(&tds.blockNr)
}
func (tds *TrieDbState) setBlockNr(n uint64) {
atomic.StoreUint64(&tds.blockNr, n)
}
// GetNodeByHash gets node's RLP by hash.
func (tds *TrieDbState) GetNodeByHash(hash common.Hash) []byte {
tds.tMu.Lock()
defer tds.tMu.Unlock()
return tds.t.GetNodeByHash(hash)
}
| 1 | 21,452 | Ideally this error should not be swallowed (can do in the next PR) | ledgerwatch-erigon | go |
@@ -212,9 +212,10 @@ def config():
global _registry_url
_registry_url = None
-def _update_auth(team, refresh_token):
+def _update_auth(team, refresh_token, timeout=None):
response = requests.post("%s/api/token" % get_registry_url(team), data=dict(
- refresh_token=refresh_token
+ refresh_token=refresh_token,
+ timeout=timeout
))
if response.status_code != requests.codes.ok: | 1 | # -*- coding: utf-8 -*-
"""
Command line parsing and command dispatch
"""
from __future__ import print_function
from builtins import input # pylint:disable=W0622
from collections import namedtuple
from datetime import datetime
from functools import partial
import gzip
import hashlib
import json
import os
import platform
import re
from shutil import copyfileobj, move, rmtree
import socket
import stat
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import yaml
from packaging.version import Version
import pandas as pd
import pkg_resources
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from six import iteritems, itervalues, string_types
from six.moves.urllib.parse import urlparse, urlunparse
from tqdm import tqdm
from .build import (build_package, build_package_from_contents, generate_build_file,
generate_contents, BuildException, exec_yaml_python, load_yaml)
from .const import DEFAULT_BUILDFILE
from .core import (hash_contents, find_object_hashes, PackageFormat, TableNode, FileNode, GroupNode,
decode_node, encode_node, LATEST_TAG)
from .hashing import digest_file
from .store import PackageStore, StoreException
from .util import BASE_DIR, FileWithReadProgress, gzip_compress, is_nodename
from ..imports import _from_core_node
from . import check_functions as qc
from .. import nodes
# pyOpenSSL and S3 don't play well together. pyOpenSSL is completely optional, but gets enabled by requests.
# So... We disable it. That's what boto does.
# https://github.com/boto/botocore/issues/760
# https://github.com/boto/botocore/pull/803
try:
from urllib3.contrib import pyopenssl
pyopenssl.extract_from_urllib3()
except ImportError:
pass
DEFAULT_REGISTRY_URL = 'https://pkg.quiltdata.com'
GIT_URL_RE = re.compile(r'(?P<url>http[s]?://[\w./~_-]+\.git)(?:@(?P<branch>[\w_-]+))?')
EXTENDED_PACKAGE_RE = re.compile(
r'^((?:\w+:)?\w+/[\w/]+)(?::h(?:ash)?:(.+)|:v(?:ersion)?:(.+)|:t(?:ag)?:(.+))?$'
)
CHUNK_SIZE = 4096
PARALLEL_UPLOADS = 20
PARALLEL_DOWNLOADS = 20
S3_CONNECT_TIMEOUT = 30
S3_READ_TIMEOUT = 30
S3_TIMEOUT_RETRIES = 3
CONTENT_RANGE_RE = re.compile(r'^bytes (\d+)-(\d+)/(\d+)$')
LOG_TIMEOUT = 3 # 3 seconds
VERSION = pkg_resources.require('quilt')[0].version
class CommandException(Exception):
"""
Exception class for all command-related failures.
"""
pass
class HTTPResponseException(CommandException):
def __init__(self, message, response):
super(HTTPResponseException, self).__init__(message)
self.response = response
#return type for parse_package_extended
PackageInfo = namedtuple("PackageInfo", "full_name, team, user, name, subpath, hash, version, tag")
def parse_package_extended(identifier):
"""
Parses the extended package syntax and returns a tuple of (package, hash, version, tag).
"""
match = EXTENDED_PACKAGE_RE.match(identifier)
if match is None:
pkg_format = '[team:]owner/package_name/path[:v:<version> or :t:<tag> or :h:<hash>]'
raise CommandException("Specify package as %s." % pkg_format)
full_name, pkg_hash, version, tag = match.groups()
team, user, name, subpath = parse_package(full_name, allow_subpath=True)
# namedtuple return value
return PackageInfo(full_name, team, user, name, subpath, pkg_hash, version, tag)
def parse_package(name, allow_subpath=False):
try:
values = name.split(':', 1)
team = values[0] if len(values) > 1 else None
values = values[-1].split('/')
# Can't do "owner, pkg, *subpath = ..." in Python2 :(
(owner, pkg), subpath = values[:2], values[2:]
if not owner or not pkg:
# Make sure they're not empty.
raise ValueError
if subpath and not allow_subpath:
raise ValueError
except ValueError:
pkg_format = '[team:]owner/package_name/path' if allow_subpath else '[team:]owner/package_name'
raise CommandException("Specify package as %s." % pkg_format)
try:
PackageStore.check_name(team, owner, pkg, subpath)
except StoreException as ex:
raise CommandException(str(ex))
if allow_subpath:
return team, owner, pkg, subpath
return team, owner, pkg
_registry_url = None
def _load_config():
config_path = os.path.join(BASE_DIR, 'config.json')
if os.path.exists(config_path):
with open(config_path) as fd:
return json.load(fd)
return {}
def _save_config(cfg):
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
config_path = os.path.join(BASE_DIR, 'config.json')
with open(config_path, 'w') as fd:
json.dump(cfg, fd)
def _load_auth():
auth_path = os.path.join(BASE_DIR, 'auth.json')
if os.path.exists(auth_path):
with open(auth_path) as fd:
auth = json.load(fd)
if 'access_token' in auth:
# Old format; ignore it.
auth = {}
return auth
return {}
def _save_auth(cfg):
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
auth_path = os.path.join(BASE_DIR, 'auth.json')
with open(auth_path, 'w') as fd:
os.chmod(auth_path, stat.S_IRUSR | stat.S_IWUSR)
json.dump(cfg, fd)
def get_registry_url(team):
if team is not None:
return "https://%s-registry.team.quiltdata.com" % team
global _registry_url
if _registry_url is not None:
return _registry_url
# Env variable; overrides the config.
url = os.environ.get('QUILT_PKG_URL')
if url is None:
# Config file (generated by `quilt config`).
cfg = _load_config()
url = cfg.get('registry_url', '')
# '' means default URL.
_registry_url = url or DEFAULT_REGISTRY_URL
return _registry_url
def config():
answer = input("Please enter the URL for your custom Quilt registry (ask your administrator),\n"
"or leave this line blank to use the default registry: ")
if answer:
url = urlparse(answer.rstrip('/'))
if (url.scheme not in ['http', 'https'] or not url.netloc or
url.path or url.params or url.query or url.fragment):
raise CommandException("Invalid URL: %s" % answer)
canonical_url = urlunparse(url)
else:
# When saving the config, store '' instead of the actual URL in case we ever change it.
canonical_url = ''
cfg = _load_config()
cfg['registry_url'] = canonical_url
_save_config(cfg)
# Clear the cached URL.
global _registry_url
_registry_url = None
def _update_auth(team, refresh_token):
response = requests.post("%s/api/token" % get_registry_url(team), data=dict(
refresh_token=refresh_token
))
if response.status_code != requests.codes.ok:
raise CommandException("Authentication error: %s" % response.status_code)
data = response.json()
error = data.get('error')
if error is not None:
raise CommandException("Failed to log in: %s" % error)
return dict(
team=team,
refresh_token=data['refresh_token'],
access_token=data['access_token'],
expires_at=data['expires_at']
)
def _handle_response(team, resp, **kwargs):
_ = kwargs # unused pylint:disable=W0613
if resp.status_code == requests.codes.unauthorized:
raise CommandException(
"Authentication failed. Run `quilt login%s` again." %
(' ' + team if team else '')
)
elif not resp.ok:
try:
data = resp.json()
raise HTTPResponseException(data['message'], resp)
except ValueError:
raise HTTPResponseException("Unexpected failure: error %s" % resp.status_code, resp)
def _create_auth(team):
"""
Reads the credentials, updates the access token if necessary, and returns it.
"""
url = get_registry_url(team)
contents = _load_auth()
auth = contents.get(url)
if auth is not None:
# If the access token expires within a minute, update it.
if auth['expires_at'] < time.time() + 60:
try:
auth = _update_auth(team, auth['refresh_token'])
except CommandException as ex:
raise CommandException(
"Failed to update the access token (%s). Run `quilt login%s` again." %
(ex, ' ' + team if team else '')
)
contents[url] = auth
_save_auth(contents)
return auth
def _create_session(team, auth):
"""
Creates a session object to be used for `push`, `install`, etc.
"""
session = requests.Session()
session.hooks.update(dict(
response=partial(_handle_response, team)
))
session.headers.update({
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": "quilt-cli/%s (%s %s) %s/%s" % (
VERSION, platform.system(), platform.release(),
platform.python_implementation(), platform.python_version()
)
})
if auth is not None:
session.headers["Authorization"] = "Bearer %s" % auth['access_token']
return session
_sessions = {} # pylint:disable=C0103
def _get_session(team):
"""
Creates a session or returns an existing session.
"""
global _sessions # pylint:disable=C0103
session = _sessions.get(team)
if session is None:
auth = _create_auth(team)
_sessions[team] = session = _create_session(team, auth)
assert session is not None
return session
def _clear_session(team):
global _sessions # pylint:disable=C0103
session = _sessions.pop(team, None)
if session is not None:
session.close()
def _create_s3_session():
"""
Creates a session with automatic retries on 5xx errors.
"""
sess = requests.Session()
retries = Retry(total=3,
backoff_factor=.5,
status_forcelist=[500, 502, 503, 504])
sess.mount('https://', HTTPAdapter(max_retries=retries))
return sess
def _open_url(url):
try:
if sys.platform == 'win32':
os.startfile(url) # pylint:disable=E1101
elif sys.platform == 'darwin':
with open(os.devnull, 'r+') as null:
subprocess.check_call(['open', url], stdin=null, stdout=null, stderr=null)
else:
with open(os.devnull, 'r+') as null:
subprocess.check_call(['xdg-open', url], stdin=null, stdout=null, stderr=null)
except Exception as ex: # pylint:disable=W0703
print("Failed to launch the browser: %s" % ex)
def _match_hash(package, hash):
team, owner, pkg = parse_package(package)
session = _get_session(team)
hash = hash.lower()
if not (6 <= len(hash) <= 64):
raise CommandException('Invalid hash of length {}: {!r}\n '
'Ensure that the hash is between 6 and 64 characters.'
.format(len(hash), hash))
# short-circuit for exact length
if len(hash) == 64:
return hash
response = session.get(
"{url}/api/log/{owner}/{pkg}/".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg
)
)
matches = set(entry['hash'] for entry in response.json()['logs']
if entry['hash'].startswith(hash))
if len(matches) == 1:
return matches.pop()
if len(matches) > 1:
# Sorting for consistency in testing, as well as visual comparison of hashes
ambiguous = '\n'.join(sorted(matches))
raise CommandException(
"Ambiguous hash for package {package}: {hash!r} matches the folowing hashes:\n\n{ambiguous}"
.format(package=package, hash=hash, ambiguous=ambiguous))
raise CommandException("Invalid hash for package {package}: {hash}".format(package=package, hash=hash))
def _find_logged_in_team():
"""
Find a team name in the auth credentials.
There should be at most one, since we don't allow multiple team logins.
"""
contents = _load_auth()
auth = next(itervalues(contents), {})
return auth.get('team')
def _check_team_login(team):
"""
Disallow simultaneous public cloud and team logins.
"""
contents = _load_auth()
for auth in itervalues(contents):
existing_team = auth.get('team')
if team and team != existing_team:
raise CommandException(
"Can't log in as team %r; log out first." % team
)
elif not team and existing_team:
raise CommandException(
"Can't log in as a public user; log out from team %r first." % existing_team
)
team_regex = re.compile('^[a-z]+$')
def _check_team_id(team):
if team is not None and team_regex.match(team) is None:
raise CommandException(
"Invalid team name: {team}. Lowercase letters only.".format(team=team)
)
def _check_team_exists(team):
"""
Check that the team registry actually exists.
"""
if team is None:
return
hostname = '%s-registry.team.quiltdata.com' % team
try:
socket.gethostbyname(hostname)
except IOError:
try:
# Do we have internet?
socket.gethostbyname('quiltdata.com')
except IOError:
message = "Can't find quiltdata.com. Check your internet connection."
else:
message = "Unable to connect to registry. Is the team name %r correct?" % team
raise CommandException(message)
def login(team=None):
"""
Authenticate.
Launches a web browser and asks the user for a token.
"""
_check_team_id(team)
_check_team_exists(team)
_check_team_login(team)
login_url = "%s/login" % get_registry_url(team)
print("Launching a web browser...")
print("If that didn't work, please visit the following URL: %s" % login_url)
_open_url(login_url)
print()
refresh_token = input("Enter the code from the webpage: ")
login_with_token(refresh_token, team)
def login_with_token(refresh_token, team=None):
"""
Authenticate using an existing token.
"""
# Get an access token and a new refresh token.
_check_team_id(team)
auth = _update_auth(team, refresh_token)
url = get_registry_url(team)
contents = _load_auth()
contents[url] = auth
_save_auth(contents)
_clear_session(team)
def logout():
"""
Become anonymous. Useful for testing.
"""
# TODO revoke refresh token (without logging out of web sessions)
if _load_auth():
_save_auth({})
else:
print("Already logged out.")
global _sessions # pylint:disable=C0103
_sessions = {}
def generate(directory, outfilename=DEFAULT_BUILDFILE):
"""
Generate a build-file for quilt build from a directory of
source files.
"""
try:
buildfilepath = generate_build_file(directory, outfilename=outfilename)
except BuildException as builderror:
raise CommandException(str(builderror))
print("Generated build-file %s." % (buildfilepath))
def check(path=None, env='default'):
"""
Execute the checks: rules for a given build.yml file.
"""
# TODO: add files=<list of files> to check only a subset...
# also useful for 'quilt build' to exclude certain files?
# (if not, then require dry_run=True if files!=None/all)
build("dry_run/dry_run", path=path, dry_run=True, env=env)
def _clone_git_repo(url, branch, dest):
cmd = ['git', 'clone', '-q', '--depth=1']
if branch:
cmd += ['-b', branch]
cmd += [url, dest]
subprocess.check_call(cmd)
def _log(team, **kwargs):
# TODO(dima): Save logs to a file, then send them when we get a chance.
cfg = _load_config()
if cfg.get('disable_analytics'):
return
session = _get_session(team)
# Disable error handling.
orig_response_hooks = session.hooks.pop('response')
try:
session.post(
"{url}/api/log".format(
url=get_registry_url(team),
),
data=json.dumps([kwargs]),
timeout=LOG_TIMEOUT,
)
except requests.exceptions.RequestException:
# Ignore logging errors.
pass
finally:
# restore disabled error-handling
session.hooks['response'] = orig_response_hooks
def build(package, path=None, dry_run=False, env='default', force=False):
"""
Compile a Quilt data package, either from a build file or an existing package node.
:param package: short package specifier, i.e. 'team:user/pkg'
:param path: file path, git url, or existing package node
"""
# TODO: rename 'path' param to 'target'?
team, _, _ = parse_package(package)
_check_team_id(team)
logged_in_team = _find_logged_in_team()
if logged_in_team is not None and team is None and force is False:
answer = input("You're logged in as a team member, but you aren't specifying "
"a team for the package you're currently building. Maybe you meant:\n"
"quilt build {team}:{package}\n"
"Are you sure you want to continue? (y/N) ".format(
team=logged_in_team, package=package))
if answer.lower() != 'y':
return
package_hash = hashlib.md5(package.encode('utf-8')).hexdigest()
try:
_build_internal(package, path, dry_run, env)
except Exception as ex:
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env, error=str(ex))
raise
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env)
def _build_internal(package, path, dry_run, env):
# we may have a path, git URL, PackageNode, or None
if isinstance(path, string_types):
# is this a git url?
is_git_url = GIT_URL_RE.match(path)
if is_git_url:
tmpdir = tempfile.mkdtemp()
url = is_git_url.group('url')
branch = is_git_url.group('branch')
try:
_clone_git_repo(url, branch, tmpdir)
build_from_path(package, tmpdir, dry_run=dry_run, env=env)
except Exception as exc:
msg = "attempting git clone raised exception: {exc}"
raise CommandException(msg.format(exc=exc))
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
else:
build_from_path(package, path, dry_run=dry_run, env=env)
elif isinstance(path, nodes.PackageNode):
assert not dry_run # TODO?
build_from_node(package, path)
elif path is None:
assert not dry_run # TODO?
_build_empty(package)
else:
raise ValueError("Expected a PackageNode, path or git URL, but got %r" % path)
def _build_empty(package):
"""
Create an empty package for convenient editing of de novo packages
"""
team, owner, pkg = parse_package(package)
store = PackageStore()
new = store.create_package(team, owner, pkg)
new.save_contents()
def build_from_node(package, node):
"""
Compile a Quilt data package from an existing package node.
"""
team, owner, pkg = parse_package(package)
_check_team_id(team)
# deliberate access of protected member
store = node._package.get_store()
package_obj = store.create_package(team, owner, pkg)
def _process_node(node, path=''):
if isinstance(node, nodes.GroupNode):
for key, child in node._items():
_process_node(child, (path + '/' + key if path else key))
elif isinstance(node, nodes.DataNode):
core_node = node._node
metadata = core_node.metadata or {}
if isinstance(core_node, TableNode):
dataframe = node._data()
package_obj.save_df(dataframe, path, metadata.get('q_path'), metadata.get('q_ext'),
'pandas', PackageFormat.default)
elif isinstance(core_node, FileNode):
src_path = node._data()
package_obj.save_file(src_path, path, metadata.get('q_path'))
else:
assert False, "Unexpected core node type: %r" % core_node
else:
assert False, "Unexpected node type: %r" % node
_process_node(node)
package_obj.save_contents()
def build_from_path(package, path, dry_run=False, env='default', outfilename=DEFAULT_BUILDFILE):
"""
Compile a Quilt data package from a build file.
Path can be a directory, in which case the build file will be generated automatically.
"""
team, owner, pkg = parse_package(package)
if not os.path.exists(path):
raise CommandException("%s does not exist." % path)
try:
if os.path.isdir(path):
buildpath = os.path.join(path, outfilename)
if os.path.exists(buildpath):
raise CommandException(
"Build file already exists. Run `quilt build %r` instead." % buildpath
)
contents = generate_contents(path, outfilename)
build_package_from_contents(team, owner, pkg, path, contents, dry_run=dry_run, env=env)
else:
build_package(team, owner, pkg, path, dry_run=dry_run, env=env)
if not dry_run:
print("Built %s%s/%s successfully." % (team + ':' if team else '', owner, pkg))
except BuildException as ex:
raise CommandException("Failed to build the package: %s" % ex)
def log(package):
"""
List all of the changes to a package on the server.
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
response = session.get(
"{url}/api/log/{owner}/{pkg}/".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg
)
)
format_str = "%-64s %-19s %s"
print(format_str % ("Hash", "Pushed", "Author"))
for entry in reversed(response.json()['logs']):
ugly = datetime.fromtimestamp(entry['created'])
nice = ugly.strftime("%Y-%m-%d %H:%M:%S")
print(format_str % (entry['hash'], nice, entry['author']))
def push(package, is_public=False, is_team=False, reupload=False):
"""
Push a Quilt data package to the server
"""
team, owner, pkg = parse_package(package)
_check_team_id(team)
session = _get_session(team)
pkgobj = PackageStore.find_package(team, owner, pkg)
if pkgobj is None:
raise CommandException("Package {package} not found.".format(package=package))
pkghash = pkgobj.get_hash()
def _push_package(dry_run=False, sizes=dict()):
data = json.dumps(dict(
dry_run=dry_run,
is_public=is_public,
is_team=is_team,
contents=pkgobj.get_contents(),
description="", # TODO
sizes=sizes
), default=encode_node)
compressed_data = gzip_compress(data.encode('utf-8'))
return session.put(
"{url}/api/package/{owner}/{pkg}/{hash}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
hash=pkghash
),
data=compressed_data,
headers={
'Content-Encoding': 'gzip'
}
)
print("Fetching upload URLs from the registry...")
resp = _push_package(dry_run=True)
upload_urls = resp.json()['upload_urls']
obj_queue = sorted(set(find_object_hashes(pkgobj.get_contents())), reverse=True)
total = len(obj_queue)
obj_sizes = {
obj_hash: os.path.getsize(pkgobj.get_store().object_path(obj_hash)) for obj_hash in obj_queue
}
total_bytes = sum(itervalues(obj_sizes))
uploaded = []
lock = Lock()
headers = {
'Content-Encoding': 'gzip'
}
print("Uploading %d fragments (%d bytes before compression)..." % (total, total_bytes))
with tqdm(total=total_bytes, unit='B', unit_scale=True) as progress:
def _worker_thread():
with _create_s3_session() as s3_session:
while True:
with lock:
if not obj_queue:
break
obj_hash = obj_queue.pop()
try:
obj_urls = upload_urls[obj_hash]
original_size = os.path.getsize(pkgobj.get_store().object_path(obj_hash))
if reupload or not s3_session.head(obj_urls['head']).ok:
# Create a temporary gzip'ed file.
with pkgobj.tempfile(obj_hash) as temp_file:
temp_file.seek(0, 2)
compressed_size = temp_file.tell()
temp_file.seek(0)
# Workaround for non-local variables in Python 2.7
class Context:
compressed_read = 0
original_last_update = 0
def _progress_cb(count):
Context.compressed_read += count
original_read = Context.compressed_read * original_size // compressed_size
with lock:
progress.update(original_read - Context.original_last_update)
Context.original_last_update = original_read
with FileWithReadProgress(temp_file, _progress_cb) as fd:
url = obj_urls['put']
response = s3_session.put(url, data=fd, headers=headers)
response.raise_for_status()
else:
with lock:
tqdm.write("Fragment %s already uploaded; skipping." % obj_hash)
progress.update(original_size)
with lock:
uploaded.append(obj_hash)
except requests.exceptions.RequestException as ex:
message = "Upload failed for %s:\n" % obj_hash
if ex.response is not None:
message += "URL: %s\nStatus code: %s\nResponse: %r\n" % (
ex.request.url, ex.response.status_code, ex.response.text
)
else:
message += "%s\n" % ex
with lock:
tqdm.write(message)
threads = [
Thread(target=_worker_thread, name="upload-worker-%d" % i)
for i in range(PARALLEL_UPLOADS)
]
for thread in threads:
thread.daemon = True
thread.start()
for thread in threads:
thread.join()
if len(uploaded) != total:
raise CommandException("Failed to upload fragments")
print("Uploading package metadata...")
resp = _push_package(sizes=obj_sizes)
package_url = resp.json()['package_url']
print("Updating the 'latest' tag...")
session.put(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
tag=LATEST_TAG
),
data=json.dumps(dict(
hash=pkghash
))
)
print("Push complete. %s is live:\n%s" % (package, package_url))
def version_list(package):
"""
List the versions of a package.
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
response = session.get(
"{url}/api/version/{owner}/{pkg}/".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg
)
)
for version in response.json()['versions']:
print("%s: %s" % (version['version'], version['hash']))
def version_add(package, version, pkghash, force=False):
"""
Add a new version for a given package hash.
Version format needs to follow PEP 440.
Versions are permanent - once created, they cannot be modified or deleted.
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
try:
Version(version)
except ValueError:
url = "https://www.python.org/dev/peps/pep-0440/#examples-of-compliant-version-schemes"
raise CommandException(
"Invalid version format; see %s" % url
)
if not force:
answer = input("Versions cannot be modified or deleted; are you sure? (y/n) ")
if answer.lower() != 'y':
return
session.put(
"{url}/api/version/{owner}/{pkg}/{version}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
version=version
),
data=json.dumps(dict(
hash=_match_hash(package, pkghash)
))
)
def tag_list(package):
"""
List the tags of a package.
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
response = session.get(
"{url}/api/tag/{owner}/{pkg}/".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg
)
)
for tag in response.json()['tags']:
print("%s: %s" % (tag['tag'], tag['hash']))
def tag_add(package, tag, pkghash):
"""
Add a new tag for a given package hash.
Unlike versions, tags can have an arbitrary format, and can be modified
and deleted.
When a package is pushed, it gets the "latest" tag.
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
session.put(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
tag=tag
),
data=json.dumps(dict(
hash=_match_hash(package, pkghash)
))
)
def tag_remove(package, tag):
"""
Delete a tag.
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
session.delete(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
tag=tag
)
)
def install_via_requirements(requirements_str, force=False):
"""
Download multiple Quilt data packages via quilt.xml requirements file.
"""
if requirements_str[0] == '@':
path = requirements_str[1:]
if os.path.isfile(path):
yaml_data = load_yaml(path)
else:
raise CommandException("Requirements file not found: {filename}".format(filename=path))
else:
yaml_data = yaml.load(requirements_str)
for pkginfo in yaml_data['packages']:
info = parse_package_extended(pkginfo)
install(info.full_name, info.hash, info.version, info.tag, force=force)
def install(package, hash=None, version=None, tag=None, force=False):
"""
Download a Quilt data package from the server and install locally.
At most one of `hash`, `version`, or `tag` can be given. If none are
given, `tag` defaults to "latest".
"""
if hash is version is tag is None:
tag = LATEST_TAG
# @filename ==> read from file
# newline = multiple lines ==> multiple requirements
package = package.strip()
if len(package) == 0:
raise CommandException("package name is empty.")
if package[0] == '@' or '\n' in package:
return install_via_requirements(package, force=force)
assert [hash, version, tag].count(None) == 2
team, owner, pkg, subpath = parse_package(package, allow_subpath=True)
_check_team_id(team)
session = _get_session(team)
store = PackageStore()
existing_pkg = store.get_package(team, owner, pkg)
print("Downloading package metadata...")
try:
if version is not None:
response = session.get(
"{url}/api/version/{owner}/{pkg}/{version}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
version=version
)
)
pkghash = response.json()['hash']
elif tag is not None:
response = session.get(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
tag=tag
)
)
pkghash = response.json()['hash']
else:
pkghash = _match_hash(package, hash)
except HTTPResponseException as e:
logged_in_team = _find_logged_in_team()
if (team is None and logged_in_team is not None
and e.response.status_code == requests.codes.not_found):
raise CommandException("Package {owner}/{pkg} does not exist. "
"Maybe you meant {team}:{owner}/{pkg}?".format(
owner=owner, pkg=pkg, team=logged_in_team))
else:
raise
assert pkghash is not None
response = session.get(
"{url}/api/package/{owner}/{pkg}/{hash}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
hash=pkghash
),
params=dict(
subpath='/'.join(subpath)
)
)
assert response.ok # other responses handled by _handle_response
if existing_pkg is not None and not force:
print("{package} already installed.".format(package=package))
overwrite = input("Overwrite? (y/n) ")
if overwrite.lower() != 'y':
return
dataset = response.json(object_hook=decode_node)
response_urls = dataset['urls']
response_contents = dataset['contents']
obj_sizes = dataset['sizes']
# Verify contents hash
if pkghash != hash_contents(response_contents):
raise CommandException("Mismatched hash. Try again.")
pkgobj = store.install_package(team, owner, pkg, response_contents)
obj_queue = sorted(iteritems(response_urls), reverse=True)
total = len(obj_queue)
# Some objects might be missing a size; ignore those for now.
total_bytes = sum(size or 0 for size in itervalues(obj_sizes))
downloaded = []
lock = Lock()
print("Downloading %d fragments (%d bytes before compression)..." % (total, total_bytes))
with tqdm(total=total_bytes, unit='B', unit_scale=True) as progress:
def _worker_thread():
with _create_s3_session() as s3_session:
while True:
with lock:
if not obj_queue:
break
obj_hash, url = obj_queue.pop()
original_size = obj_sizes[obj_hash] or 0 # If the size is unknown, just treat it as 0.
local_filename = store.object_path(obj_hash)
if os.path.exists(local_filename):
with lock:
progress.update(original_size)
downloaded.append(obj_hash)
continue
success = False
temp_path_gz = store.temporary_object_path(obj_hash + '.gz')
with open(temp_path_gz, 'ab') as output_file:
for attempt in range(S3_TIMEOUT_RETRIES):
try:
starting_length = output_file.tell()
response = s3_session.get(
url,
headers={
'Range': 'bytes=%d-' % starting_length
},
stream=True,
timeout=(S3_CONNECT_TIMEOUT, S3_READ_TIMEOUT)
)
# RANGE_NOT_SATISFIABLE means, we already have the whole file.
if response.status_code == requests.codes.RANGE_NOT_SATISFIABLE:
with lock:
progress.update(original_size)
else:
if not response.ok:
message = "Download failed for %s:\nURL: %s\nStatus code: %s\nResponse: %r\n" % (
obj_hash, response.request.url, response.status_code, response.text
)
with lock:
tqdm.write(message)
break
# Fragments have the 'Content-Encoding: gzip' header set to make requests ungzip
# them automatically - but that turned out to be a bad idea because it makes
# resuming downloads impossible.
# HACK: For now, just delete the header. Eventually, update the data in S3.
response.raw.headers.pop('Content-Encoding', None)
# Make sure we're getting the expected range.
content_range = response.headers.get('Content-Range', '')
match = CONTENT_RANGE_RE.match(content_range)
if not match or not int(match.group(1)) == starting_length:
with lock:
tqdm.write("Unexpected Content-Range: %s" % content_range)
break
compressed_size = int(match.group(3))
# We may have started with a partially-downloaded file, so update the progress bar.
compressed_read = starting_length
original_read = compressed_read * original_size // compressed_size
with lock:
progress.update(original_read)
original_last_update = original_read
# Do the actual download.
for chunk in response.iter_content(CHUNK_SIZE):
output_file.write(chunk)
compressed_read += len(chunk)
original_read = compressed_read * original_size // compressed_size
with lock:
progress.update(original_read - original_last_update)
original_last_update = original_read
success = True
break # Done!
except requests.exceptions.ConnectionError as ex:
if attempt < S3_TIMEOUT_RETRIES - 1:
with lock:
tqdm.write("Download for %s timed out; retrying..." % obj_hash)
else:
with lock:
tqdm.write("Download failed for %s: %s" % (obj_hash, ex))
break
if not success:
# We've already printed an error, so not much to do - just move on to the next object.
continue
# Ungzip the downloaded fragment.
temp_path = store.temporary_object_path(obj_hash)
try:
with gzip.open(temp_path_gz, 'rb') as f_in, open(temp_path, 'wb') as f_out:
copyfileobj(f_in, f_out)
finally:
# Delete the file unconditionally - in case it's corrupted and cannot be ungzipped.
os.remove(temp_path_gz)
# Check the hash of the result.
file_hash = digest_file(temp_path)
if file_hash != obj_hash:
os.remove(temp_path)
with lock:
tqdm.write("Fragment hashes do not match: expected %s, got %s." %
(obj_hash, file_hash))
continue
move(temp_path, local_filename)
# Success.
with lock:
downloaded.append(obj_hash)
threads = [
Thread(target=_worker_thread, name="download-worker-%d" % i)
for i in range(PARALLEL_DOWNLOADS)
]
for thread in threads:
thread.daemon = True
thread.start()
for thread in threads:
thread.join()
if len(downloaded) != total:
raise CommandException("Failed to download fragments")
pkgobj.save_contents()
def _setup_env(env, files):
""" process data distribution. """
# TODO: build.yml is not saved in the package system, so re-load it here
with open('build.yml') as fd:
buildfile = next(yaml.load_all(fd), None)
environments = buildfile.get('environments', {})
if env != 'default' and (env not in environments):
raise CommandException(
"environment %s not found in environments: section of build.yml" % env)
if len(environments) == 0:
return files
if env == 'default' and 'default' not in environments:
return files
# TODO: this should be done during quilt push, not during install/import
# (requires server support)
# TODO: add a way to dry-run dataset checking
print('processing environment %s: checking data...' % (env))
environment = environments[env]
dataset = environment.get('dataset')
for key, val in files.items():
# TODO: debug mode, where we can see which files were skipped
if isinstance(val, pd.DataFrame):
before_len = len(val)
res = exec_yaml_python(dataset, val, key, '('+key+')')
if not res and res is not None:
raise BuildException("error creating dataset for environment: %s on file %s" % (
env, key))
print('%s: %s=>%s recs' % (key, before_len, len(qc.data)))
files[key] = qc.data
# TODO: should be done on the server during quilt install
# (requires server support)
print('processing environment %s: slicing data...' % (env))
instance_data = environment.get('instance_data')
for key, val in files.items():
# TODO: debug mode, where we can see which files were skipped
if type(val) == pd.core.frame.DataFrame:
before_len = len(val)
# TODO: pass instance identifier, e.g. instance number N of M
val['.qchash'] = val.apply(lambda x: abs(hash(tuple(x))), axis = 1)
res = exec_yaml_python(instance_data, val, key, '('+key+')')
if res == False:
raise BuildException("error assigning data to instance in environment: %s on file %s" % (
env, key))
print('%s: %s=>%s recs' % (key, before_len, len(qc.data)))
files[key] = qc.data
return files
def access_list(package):
"""
Print list of users who can access a package.
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
lookup_url = "{url}/api/access/{owner}/{pkg}/".format(url=get_registry_url(team), owner=owner, pkg=pkg)
response = session.get(lookup_url)
data = response.json()
users = data['users']
print('\n'.join(users))
def access_add(package, user):
"""
Add access
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
session.put("%s/api/access/%s/%s/%s" % (get_registry_url(team), owner, pkg, user))
print(u'Access added for %s' % user)
def access_remove(package, user):
"""
Remove access
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
session.delete("%s/api/access/%s/%s/%s" % (get_registry_url(team), owner, pkg, user))
print(u'Access removed for %s' % user)
def delete(package):
"""
Delete a package from the server.
Irreversibly deletes the package along with its history, tags, versions, etc.
"""
team, owner, pkg = parse_package(package)
answer = input(
"Are you sure you want to delete this package and its entire history? "
"Type '%s' to confirm: " % package
)
if answer != package:
print("Not deleting.")
return 1
session = _get_session(team)
session.delete("%s/api/package/%s/%s/" % (get_registry_url(team), owner, pkg))
print("Deleted.")
def search(query, team=None):
"""
Search for packages
"""
if team is None:
team = _find_logged_in_team()
if team is not None:
session = _get_session(team)
response = session.get("%s/api/search/" % get_registry_url(team), params=dict(q=query))
print("* Packages in team %s" % team)
packages = response.json()['packages']
for pkg in packages:
print(("%s:" % team) + ("%(owner)s/%(name)s" % pkg))
if len(packages) == 0:
print("(No results)")
print("* Packages in public cloud")
public_session = _get_session(None)
response = public_session.get("%s/api/search/" % get_registry_url(None), params=dict(q=query))
packages = response.json()['packages']
for pkg in packages:
print("%(owner)s/%(name)s" % pkg)
if len(packages) == 0:
print("(No results)")
def ls(): # pylint:disable=C0103
"""
List all installed Quilt data packages
"""
for pkg_dir in PackageStore.find_store_dirs():
print("%s" % pkg_dir)
packages = PackageStore(pkg_dir).ls_packages()
for package, tag, pkghash in packages:
print("{0:30} {1:20} {2}".format(package, tag, pkghash))
def inspect(package):
"""
Inspect package details
"""
team, owner, pkg = parse_package(package)
pkgobj = PackageStore.find_package(team, owner, pkg)
if pkgobj is None:
raise CommandException("Package {package} not found.".format(package=package))
def _print_children(children, prefix, path):
for idx, (name, child) in enumerate(children):
if idx == len(children) - 1:
new_prefix = u"└─"
new_child_prefix = u" "
else:
new_prefix = u"├─"
new_child_prefix = u"│ "
_print_node(child, prefix + new_prefix, prefix + new_child_prefix, name, path)
def _print_node(node, prefix, child_prefix, name, path):
name_prefix = u"─ "
if isinstance(node, GroupNode):
children = list(node.children.items())
if children:
name_prefix = u"┬ "
print(prefix + name_prefix + name)
_print_children(children, child_prefix, path + name)
elif isinstance(node, TableNode):
df = pkgobj.get_obj(node)
assert isinstance(df, pd.DataFrame)
info = "shape %s, type \"%s\"" % (df.shape, df.dtypes)
print(prefix + name_prefix + ": " + info)
elif isinstance(node, FileNode):
print(prefix + name_prefix + name)
else:
assert False, "node=%s type=%s" % (node, type(node))
print(pkgobj.get_path())
_print_children(children=pkgobj.get_contents().children.items(), prefix='', path='')
def rm(package, force=False):
"""
Remove a package (all instances) from the local store.
"""
team, owner, pkg = parse_package(package)
if not force:
confirmed = input("Remove {0}? (y/n) ".format(package))
if confirmed.lower() != 'y':
return
store = PackageStore()
deleted = store.remove_package(team, owner, pkg)
for obj in deleted:
print("Removed: {0}".format(obj))
def list_users(team=None):
# get team from disk if not specified
if team is None:
team = _find_logged_in_team()
session = _get_session(team)
url = get_registry_url(team)
resp = session.get('%s/api/users/list' % url)
return resp.json()
def list_users_detailed(team=None):
# get team from disk if not specified
if team is None:
team = _find_logged_in_team()
session = _get_session(team)
url = get_registry_url(team)
resp = session.get('%s/api/users/list_detailed' % url)
return resp.json()
def create_user(username, email, team):
_check_team_id(team)
session = _get_session(team)
url = get_registry_url(team)
session.post('%s/api/users/create' % url,
data=json.dumps({'username':username, 'email':email}))
def list_packages(username, team=None):
# get team from disk if not specified
if team is None:
team = _find_logged_in_team()
session = _get_session(team)
url = get_registry_url(team)
resp = session.get('%s/api/admin/package_list/%s' % (url, username))
return resp.json()
def disable_user(username, team):
_check_team_id(team)
session = _get_session(team)
url = get_registry_url(team)
session.post('%s/api/users/disable' % url,
data=json.dumps({'username':username}))
def enable_user(username, team):
_check_team_id(team)
session = _get_session(team)
url = get_registry_url(team)
session.post('%s/api/users/enable' % url,
data=json.dumps({'username':username}))
def delete_user(username, team, force=False):
_check_team_id(team)
if not force:
confirmed = input("Really delete user '{0}'? (y/n)".format(username))
if confirmed.lower() != 'y':
return
session = _get_session(team)
url = get_registry_url(team)
session.post('%s/api/users/delete' % url, data=json.dumps({'username':username}))
def audit(user_or_package):
parts = user_or_package.split('/')
if len(parts) > 2 or not all(is_nodename(part) for part in parts):
raise CommandException("Need either a user or a user/package")
team = _find_logged_in_team()
if not team:
raise CommandException("Not logged in as a team user")
session = _get_session(team)
response = session.get(
"{url}/api/audit/{user_or_package}/".format(
url=get_registry_url(team),
user_or_package=user_or_package
)
)
print(json.dumps(response.json(), indent=2))
def reset_password(team, username):
_check_team_id(team)
session = _get_session(team)
session.post(
"{url}/api/users/reset_password".format(
url=get_registry_url(team),
), data=json.dumps({'username':username})
)
def _load(package):
info = parse_package_extended(package)
team, user, name = info.team, info.user, info.name
pkgobj = PackageStore.find_package(team, user, name)
if pkgobj is None:
raise CommandException("Package {package} not found.".format(package=package))
node = _from_core_node(pkgobj, pkgobj.get_contents())
return node, pkgobj, info
def load(pkginfo):
"""functional interface to "from quilt.data.USER import PKG"""
# TODO: support hashes/versions/etc.
return _load(pkginfo)[0]
| 1 | 16,268 | Wait a minute... You're passing it as a POST parameter. There's no way that can work. | quiltdata-quilt | py |
@@ -259,6 +259,7 @@ func (w *Workflow) cleanup() {
w.LogWorkflowInfo("Error returned from cleanup hook: %s", err)
}
}
+ w.LogWorkflowInfo("Workflow %q finished clean up.", w.Name)
}
func (w *Workflow) genName(n string) string { | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package daisy describes a daisy workflow.
package daisy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
"cloud.google.com/go/logging"
"cloud.google.com/go/storage"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
const defaultTimeout = "10m"
func daisyBkt(ctx context.Context, client *storage.Client, project string) (string, dErr) {
dBkt := strings.Replace(project, ":", "-", -1) + "-daisy-bkt"
it := client.Buckets(ctx, project)
for bucketAttrs, err := it.Next(); err != iterator.Done; bucketAttrs, err = it.Next() {
if err != nil {
return "", typedErr(apiError, err)
}
if bucketAttrs.Name == dBkt {
return dBkt, nil
}
}
if err := client.Bucket(dBkt).Create(ctx, project, nil); err != nil {
return "", typedErr(apiError, err)
}
return dBkt, nil
}
// Var is a type with a flexible JSON representation. A Var can be represented
// by either a string, or by this struct definition. A Var that is represented
// by a string will unmarshal into the struct: {Value: <string>, Required: false, Description: ""}.
type Var struct {
Value string
Required bool `json:",omitempty"`
Description string `json:",omitempty"`
}
// UnmarshalJSON unmarshals a Var.
func (v *Var) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err == nil {
v.Value = s
return nil
}
// We can't unmarshal into Var directly as it would create an infinite loop.
type aVar Var
return json.Unmarshal(b, &struct{ *aVar }{aVar: (*aVar)(v)})
}
// Workflow is a single Daisy workflow workflow.
type Workflow struct {
// Populated on New() construction.
Cancel chan struct{} `json:"-"`
// Workflow template fields.
// Workflow name.
Name string `json:",omitempty"`
// Project to run in.
Project string `json:",omitempty"`
// Zone to run in.
Zone string `json:",omitempty"`
// GCS Path to use for scratch data and write logs/results to.
GCSPath string `json:",omitempty"`
// Path to OAuth credentials file.
OAuthPath string `json:",omitempty"`
// Sources used by this workflow, map of destination to source.
Sources map[string]string `json:",omitempty"`
// Vars defines workflow variables, substitution is done at Workflow run time.
Vars map[string]Var `json:",omitempty"`
Steps map[string]*Step `json:",omitempty"`
// Map of steps to their dependencies.
Dependencies map[string][]string `json:",omitempty"`
// Default timout for each step, defaults to 10m.
// Must be parsable by https://golang.org/pkg/time/#ParseDuration.
DefaultTimeout string `json:",omitempty"`
defaultTimeout time.Duration
// Working fields.
autovars map[string]string
workflowDir string
parent *Workflow
bucket string
scratchPath string
sourcesPath string
logsPath string
outsPath string
username string
externalLogging bool
gcsLoggingDisabled bool
cloudLoggingDisabled bool
stdoutLoggingDisabled bool
id string
Logger Logger `json:"-"`
cleanupHooks []func() dErr
cleanupHooksMx sync.Mutex
logWait sync.WaitGroup
// Optional compute endpoint override.
ComputeEndpoint string `json:",omitempty"`
ComputeClient compute.Client `json:"-"`
StorageClient *storage.Client `json:"-"`
cloudLoggingClient *logging.Client
// Resource registries.
disks *diskRegistry
forwardingRules *forwardingRuleRegistry
firewallRules *firewallRuleRegistry
images *imageRegistry
instances *instanceRegistry
networks *networkRegistry
subnetworks *subnetworkRegistry
targetInstances *targetInstanceRegistry
objects *objectRegistry
}
//DisableCloudLogging disables logging to Cloud Logging for this workflow.
func (w *Workflow) DisableCloudLogging() {
w.cloudLoggingDisabled = true
}
//DisableGCSLogging disables logging to GCS for this workflow.
func (w *Workflow) DisableGCSLogging() {
w.gcsLoggingDisabled = true
}
//DisableStdoutLogging disables logging to stdout for this workflow.
func (w *Workflow) DisableStdoutLogging() {
w.stdoutLoggingDisabled = true
}
// AddVar adds a variable set to the Workflow.
func (w *Workflow) AddVar(k, v string) {
if w.Vars == nil {
w.Vars = map[string]Var{}
}
w.Vars[k] = Var{Value: v}
}
func (w *Workflow) addCleanupHook(hook func() dErr) {
w.cleanupHooksMx.Lock()
w.cleanupHooks = append(w.cleanupHooks, hook)
w.cleanupHooksMx.Unlock()
}
// Validate runs validation on the workflow.
func (w *Workflow) Validate(ctx context.Context) error {
if err := w.PopulateClients(ctx); err != nil {
close(w.Cancel)
return errf("error populating workflow: %v", err)
}
if err := w.validateRequiredFields(); err != nil {
close(w.Cancel)
return errf("error validating workflow: %v", err)
}
if err := w.populate(ctx); err != nil {
close(w.Cancel)
return errf("error populating workflow: %v", err)
}
w.LogWorkflowInfo("Validating workflow")
if err := w.validate(ctx); err != nil {
w.LogWorkflowInfo("Error validating workflow: %v", err)
close(w.Cancel)
return err
}
w.LogWorkflowInfo("Validation Complete")
return nil
}
// WorkflowModifier is a function type for functions that can modify a Workflow object.
type WorkflowModifier func(*Workflow)
// Run runs a workflow.
func (w *Workflow) Run(ctx context.Context) error {
return w.RunWithModifiers(ctx, nil, nil)
}
// RunWithModifiers runs a workflow with the ability to modify it before and/or after validation.
func (w *Workflow) RunWithModifiers(
ctx context.Context,
preValidateWorkflowModifier WorkflowModifier,
postValidateWorkflowModifier WorkflowModifier) error {
w.externalLogging = true
if preValidateWorkflowModifier != nil {
preValidateWorkflowModifier(w)
}
if err := w.Validate(ctx); err != nil {
return err
}
if postValidateWorkflowModifier != nil {
postValidateWorkflowModifier(w)
}
defer w.cleanup()
w.LogWorkflowInfo("Workflow Project: %s", w.Project)
w.LogWorkflowInfo("Workflow Zone: %s", w.Zone)
w.LogWorkflowInfo("Workflow GCSPath: %s", w.GCSPath)
w.LogWorkflowInfo("Daisy scratch path: https://console.cloud.google.com/storage/browser/%s", path.Join(w.bucket, w.scratchPath))
w.LogWorkflowInfo("Uploading sources")
if err := w.uploadSources(ctx); err != nil {
w.LogWorkflowInfo("Error uploading sources: %v", err)
close(w.Cancel)
return err
}
w.LogWorkflowInfo("Running workflow")
if err := w.run(ctx); err != nil {
w.LogWorkflowInfo("Error running workflow: %v", err)
return err
}
return nil
}
func (w *Workflow) cleanup() {
w.LogWorkflowInfo("Workflow %q cleaning up (this may take up to 2 minutes).", w.Name)
select {
case <-w.Cancel:
default:
close(w.Cancel)
}
for _, hook := range w.cleanupHooks {
if err := hook(); err != nil {
w.LogWorkflowInfo("Error returned from cleanup hook: %s", err)
}
}
}
func (w *Workflow) genName(n string) string {
name := w.Name
for parent := w.parent; parent != nil; parent = parent.parent {
name = parent.Name + "-" + name
}
prefix := name
if n != "" {
prefix = fmt.Sprintf("%s-%s", n, name)
}
if len(prefix) > 57 {
prefix = prefix[0:56]
}
result := fmt.Sprintf("%s-%s", prefix, w.id)
if len(result) > 64 {
result = result[0:63]
}
return strings.ToLower(result)
}
func (w *Workflow) getSourceGCSAPIPath(s string) string {
return fmt.Sprintf("%s/%s", gcsAPIBase, path.Join(w.bucket, w.sourcesPath, s))
}
// PopulateClients populates the compute and storage clients for the workflow.
func (w *Workflow) PopulateClients(ctx context.Context) error {
// API clients instantiation.
var err error
computeOptions := []option.ClientOption{option.WithCredentialsFile(w.OAuthPath)}
if w.ComputeEndpoint != "" {
computeOptions = append(computeOptions, option.WithEndpoint(w.ComputeEndpoint))
}
if w.ComputeClient == nil {
w.ComputeClient, err = compute.NewClient(ctx, computeOptions...)
if err != nil {
return typedErr(apiError, err)
}
}
storageOptions := []option.ClientOption{option.WithCredentialsFile(w.OAuthPath)}
if w.StorageClient == nil {
w.StorageClient, err = storage.NewClient(ctx, storageOptions...)
if err != nil {
return err
}
}
loggingOptions := []option.ClientOption{option.WithCredentialsFile(w.OAuthPath)}
if w.externalLogging && w.cloudLoggingClient == nil {
w.cloudLoggingClient, err = logging.NewClient(ctx, w.Project, loggingOptions...)
if err != nil {
return err
}
}
return nil
}
func (w *Workflow) populateStep(ctx context.Context, s *Step) dErr {
if s.Timeout == "" {
s.Timeout = w.DefaultTimeout
}
timeout, err := time.ParseDuration(s.Timeout)
if err != nil {
return newErr(err)
}
s.timeout = timeout
var derr dErr
var step stepImpl
if step, derr = s.stepImpl(); derr != nil {
return derr
}
return step.populate(ctx, s)
}
// populate does the following:
// - checks that all required Vars are set.
// - instantiates API clients, if needed.
// - sets generic autovars and do first round of var substitution.
// - sets GCS path information.
// - generates autovars from workflow fields (Name, Zone, etc) and run second round of var substitution.
// - sets up logger.
// - runs populate on each step.
func (w *Workflow) populate(ctx context.Context) dErr {
for k, v := range w.Vars {
if v.Required && v.Value == "" {
return errf("cannot populate workflow, required var %q is unset", k)
}
}
// Set some generic autovars and run first round of var substitution.
cwd, _ := os.Getwd()
now := time.Now().UTC()
w.username = getUser()
w.autovars = map[string]string{
"ID": w.id,
"DATE": now.Format("20060102"),
"DATETIME": now.Format("20060102150405"),
"TIMESTAMP": strconv.FormatInt(now.Unix(), 10),
"USERNAME": w.username,
"WFDIR": w.workflowDir,
"CWD": cwd,
}
var replacements []string
for k, v := range w.autovars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v)
}
for k, v := range w.Vars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v.Value)
}
substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...))
// Parse timeout.
timeout, err := time.ParseDuration(w.DefaultTimeout)
if err != nil {
return newErr(err)
}
w.defaultTimeout = timeout
// Set up GCS paths.
if w.GCSPath == "" {
dBkt, err := daisyBkt(ctx, w.StorageClient, w.Project)
if err != nil {
return err
}
w.GCSPath = "gs://" + dBkt
}
bkt, p, err := splitGCSPath(w.GCSPath)
if err != nil {
return newErr(err)
}
w.bucket = bkt
w.scratchPath = path.Join(p, fmt.Sprintf("daisy-%s-%s-%s", w.Name, now.Format("20060102-15:04:05"), w.id))
w.sourcesPath = path.Join(w.scratchPath, "sources")
w.logsPath = path.Join(w.scratchPath, "logs")
w.outsPath = path.Join(w.scratchPath, "outs")
// Generate more autovars from workflow fields. Run second round of var substitution.
w.autovars["NAME"] = w.Name
w.autovars["FULLNAME"] = w.genName("")
w.autovars["ZONE"] = w.Zone
w.autovars["PROJECT"] = w.Project
w.autovars["GCSPATH"] = w.GCSPath
w.autovars["SCRATCHPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.scratchPath)
w.autovars["SOURCESPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.sourcesPath)
w.autovars["LOGSPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.logsPath)
w.autovars["OUTSPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.outsPath)
replacements = []string{}
for k, v := range w.autovars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v)
}
substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...))
// We do this here, and not in validate, as embedded startup scripts could
// have what we think are daisy variables.
if err := w.validateVarsSubbed(); err != nil {
return err
}
if err := w.substituteSourceVars(ctx, reflect.ValueOf(w).Elem()); err != nil {
return err
}
if w.Logger == nil {
w.createLogger(ctx)
}
// Run populate on each step.
for name, s := range w.Steps {
s.name = name
s.w = w
if err := w.populateStep(ctx, s); err != nil {
return errf("error populating step %q: %v", name, err)
}
}
return nil
}
// AddDependency creates a dependency of dependent on each dependency. Returns an
// error if dependent or dependency are not steps in this workflow.
func (w *Workflow) AddDependency(dependent *Step, dependencies ...*Step) error {
if _, ok := w.Steps[dependent.name]; !ok {
return fmt.Errorf("can't create dependency: step %q does not exist", dependent.name)
}
if w.Dependencies == nil {
w.Dependencies = map[string][]string{}
}
for _, dependency := range dependencies {
if _, ok := w.Steps[dependency.name]; !ok {
return fmt.Errorf("can't create dependency: step %q does not exist", dependency.name)
}
if !strIn(dependency.name, w.Dependencies[dependent.name]) { // Don't add if dependency already exists.
w.Dependencies[dependent.name] = append(w.Dependencies[dependent.name], dependency.name)
}
}
return nil
}
// NewIncludedWorkflow instantiates a new workflow with the same resources as the parent.
func (w *Workflow) NewIncludedWorkflow() *Workflow {
iw := New()
iw.Cancel = w.Cancel
iw.parent = w
iw.disks = w.disks
iw.forwardingRules = w.forwardingRules
iw.firewallRules = w.firewallRules
iw.images = w.images
iw.instances = w.instances
iw.networks = w.networks
iw.subnetworks = w.subnetworks
iw.targetInstances = w.targetInstances
iw.objects = w.objects
return iw
}
// ID is the unique identifyier for this Workflow.
func (w *Workflow) ID() string {
return w.id
}
// NewIncludedWorkflowFromFile reads and unmarshals a workflow with the same resources as the parent.
func (w *Workflow) NewIncludedWorkflowFromFile(file string) (*Workflow, error) {
iw := w.NewIncludedWorkflow()
if !filepath.IsAbs(file) {
file = filepath.Join(w.workflowDir, file)
}
if err := readWorkflow(file, iw); err != nil {
return nil, err
}
return iw, nil
}
// NewStep instantiates a new, typeless step for this workflow.
// The step type must be specified before running this workflow.
func (w *Workflow) NewStep(name string) (*Step, error) {
if _, ok := w.Steps[name]; ok {
return nil, fmt.Errorf("can't create step %q: a step already exists with that name", name)
}
s := &Step{name: name, w: w}
if w.Steps == nil {
w.Steps = map[string]*Step{}
}
w.Steps[name] = s
return s, nil
}
// NewSubWorkflow instantiates a new workflow as a child to this workflow.
func (w *Workflow) NewSubWorkflow() *Workflow {
sw := New()
sw.Cancel = w.Cancel
sw.parent = w
return sw
}
// NewSubWorkflowFromFile reads and unmarshals a workflow as a child to this workflow.
func (w *Workflow) NewSubWorkflowFromFile(file string) (*Workflow, error) {
sw := w.NewSubWorkflow()
if !filepath.IsAbs(file) {
file = filepath.Join(w.workflowDir, file)
}
if err := readWorkflow(file, sw); err != nil {
return nil, err
}
return sw, nil
}
// Print populates then pretty prints the workflow.
func (w *Workflow) Print(ctx context.Context) {
w.externalLogging = false
if err := w.PopulateClients(ctx); err != nil {
fmt.Println("Error running PopulateClients:", err)
}
if err := w.populate(ctx); err != nil {
fmt.Println("Error running populate:", err)
}
b, err := json.MarshalIndent(w, "", " ")
if err != nil {
fmt.Println("Error marshalling workflow for printing:", err)
}
fmt.Println(string(b))
}
func (w *Workflow) run(ctx context.Context) dErr {
return w.traverseDAG(func(s *Step) dErr {
return w.runStep(ctx, s)
})
}
func (w *Workflow) runStep(ctx context.Context, s *Step) dErr {
timeout := make(chan struct{})
go func() {
time.Sleep(s.timeout)
close(timeout)
}()
e := make(chan dErr)
go func() {
e <- s.run(ctx)
}()
select {
case err := <-e:
return err
case <-timeout:
return errf("step %q did not complete within the specified timeout of %s", s.name, s.timeout)
}
}
// Concurrently traverse the DAG, running func f on each step.
// Return an error if f returns an error on any step.
func (w *Workflow) traverseDAG(f func(*Step) dErr) dErr {
// waiting = steps and the dependencies they are waiting for.
// running = the currently running steps.
// start = map of steps' start channels/semaphores.
// done = map of steps' done channels for signaling step completion.
waiting := map[string][]string{}
var running []string
start := map[string]chan dErr{}
done := map[string]chan dErr{}
// Setup: channels, copy dependencies.
for name := range w.Steps {
waiting[name] = w.Dependencies[name]
start[name] = make(chan dErr)
done[name] = make(chan dErr)
}
// Setup: goroutine for each step. Each waits to be notified to start.
for name, s := range w.Steps {
go func(name string, s *Step) {
// Wait for signal, then run the function. Return any errs.
if err := <-start[name]; err != nil {
done[name] <- err
} else if err := f(s); err != nil {
done[name] <- err
}
close(done[name])
}(name, s)
}
// Main signaling logic.
for len(waiting) != 0 || len(running) != 0 {
// If we got a Cancel signal, kill all waiting steps.
// Let running steps finish.
select {
case <-w.Cancel:
waiting = map[string][]string{}
default:
}
// Kick off all steps that aren't waiting for anything.
for name, deps := range waiting {
if len(deps) == 0 {
delete(waiting, name)
running = append(running, name)
close(start[name])
}
}
// Sanity check. There should be at least one running step,
// but loop back through if there isn't.
if len(running) == 0 {
continue
}
// Get next finished step. Return the step error if it erred.
finished, err := stepsListen(running, done)
if err != nil {
return err
}
// Remove finished step from other steps' waiting lists.
for name, deps := range waiting {
waiting[name] = filter(deps, finished)
}
// Remove finished from currently running list.
running = filter(running, finished)
}
return nil
}
// New instantiates a new workflow.
func New() *Workflow {
// We can't use context.WithCancel as we use the context even after cancel for cleanup.
w := &Workflow{Cancel: make(chan struct{})}
// Init nil'ed fields
w.Sources = map[string]string{}
w.Vars = map[string]Var{}
w.Steps = map[string]*Step{}
w.Dependencies = map[string][]string{}
w.DefaultTimeout = defaultTimeout
w.autovars = map[string]string{}
// Resource registries and cleanup.
w.disks = newDiskRegistry(w)
w.forwardingRules = newForwardingRuleRegistry(w)
w.firewallRules = newFirewallRuleRegistry(w)
w.images = newImageRegistry(w)
w.instances = newInstanceRegistry(w)
w.networks = newNetworkRegistry(w)
w.subnetworks = newSubnetworkRegistry(w)
w.objects = newObjectRegistry(w)
w.targetInstances = newTargetInstanceRegistry(w)
w.addCleanupHook(func() dErr {
w.instances.cleanup() // instances need to be done before disks/networks
w.images.cleanup()
w.disks.cleanup()
w.forwardingRules.cleanup()
w.targetInstances.cleanup()
w.firewallRules.cleanup()
w.subnetworks.cleanup()
w.networks.cleanup()
return nil
})
w.id = randString(5)
return w
}
// NewFromFile reads and unmarshals a workflow file.
// Recursively reads subworkflow steps as well.
func NewFromFile(file string) (*Workflow, error) {
w := New()
if err := readWorkflow(file, w); err != nil {
return nil, err
}
return w, nil
}
// JSONError turns an error from json.Unmarshal and returns a more user
// friendly error.
func JSONError(file string, data []byte, err error) error {
// If this is a syntax error return a useful error.
sErr, ok := err.(*json.SyntaxError)
if !ok {
return err
}
// Byte number where the error line starts.
start := bytes.LastIndex(data[:sErr.Offset], []byte("\n")) + 1
// Assume end byte of error line is EOF unless this isn't the last line.
end := len(data)
if i := bytes.Index(data[start:], []byte("\n")); i >= 0 {
end = start + i
}
// Line number of error.
line := bytes.Count(data[:start], []byte("\n")) + 1
// Position of error in line (where to place the '^').
pos := int(sErr.Offset) - start
if pos != 0 {
pos = pos - 1
}
return fmt.Errorf("%s: JSON syntax error in line %d: %s \n%s\n%s^", file, line, err, data[start:end], strings.Repeat(" ", pos))
}
func readWorkflow(file string, w *Workflow) error {
data, err := ioutil.ReadFile(file)
if err != nil {
return err
}
w.workflowDir, err = filepath.Abs(filepath.Dir(file))
if err != nil {
return err
}
if err := json.Unmarshal(data, &w); err != nil {
return JSONError(file, data, err)
}
if w.OAuthPath != "" && !filepath.IsAbs(w.OAuthPath) {
w.OAuthPath = filepath.Join(w.workflowDir, w.OAuthPath)
}
for name, s := range w.Steps {
s.name = name
s.w = w
}
return nil
}
// stepsListen returns the first step that finishes/errs.
func stepsListen(names []string, chans map[string]chan dErr) (string, dErr) {
cases := make([]reflect.SelectCase, len(names))
for i, name := range names {
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(chans[name])}
}
caseIndex, value, recvOk := reflect.Select(cases)
name := names[caseIndex]
if recvOk {
// recvOk -> a step failed, return the error.
return name, value.Interface().(dErr)
}
return name, nil
}
| 1 | 8,933 | minor: to be consistent with other logs, either use "cleaning up" (preferred) or "cleanup". | GoogleCloudPlatform-compute-image-tools | go |
@@ -23,6 +23,7 @@ PG_ASYNC_LISTEN_COMMIT = False
# Redis
REDIS_HOST = "redis"
REDIS_PORT = 6379
+REDIS_NAMESPACE = "listenbrainz"
# Influx DB (main listen store)
INFLUX_HOST = "influx" | 1 | DEBUG = False # set to False in production mode
SECRET_KEY = "CHANGE_ME"
# DATABASES
# Primary database
SQLALCHEMY_DATABASE_URI = "postgresql://listenbrainz:listenbrainz@db:5432/listenbrainz"
MESSYBRAINZ_SQLALCHEMY_DATABASE_URI = "postgresql://messybrainz:messybrainz@db:5432/messybrainz"
POSTGRES_ADMIN_URI="postgresql://postgres@db/template1"
# Other postgres configuration options
# Oldest listens which can be stored in the database, in days.
MAX_POSTGRES_LISTEN_HISTORY = "-1"
# Log Postgres queries if they execeed this time, in milliseconds.
PG_QUERY_TIMEOUT = "3000"
# Set to True to enable 'synchronous_commit' for Postgres. Default: False
PG_ASYNC_LISTEN_COMMIT = False
# Redis
REDIS_HOST = "redis"
REDIS_PORT = 6379
# Influx DB (main listen store)
INFLUX_HOST = "influx"
INFLUX_PORT = 8086
INFLUX_DB_NAME = "listenbrainz"
# RabbitMQ
RABBITMQ_HOST = "rabbitmq"
RABBITMQ_PORT = 5672
RABBITMQ_USERNAME = "guest"
RABBITMQ_PASSWORD = "guest"
RABBITMQ_VHOST = "/"
# RabbitMQ exchanges and queues
INCOMING_EXCHANGE = "incoming"
INCOMING_QUEUE = "incoming"
UNIQUE_EXCHANGE = "unique"
UNIQUE_QUEUE = "unique"
# MusicBrainz OAuth
MUSICBRAINZ_CLIENT_ID = "CLIENT_ID"
MUSICBRAINZ_CLIENT_SECRET = "CLIENT_SECRET"
# Lastfm API
LASTFM_API_URL = "https://ws.audioscrobbler.com/2.0/"
LASTFM_API_KEY = "USE_LASTFM_API_KEY"
# BigQuery support
# Enable/disable support. If enabled, the Application Credentials must reside in
# bigquery-credentials.json in the top level directory.
WRITE_TO_BIGQUERY = False
BIGQUERY_PROJECT_ID = "listenbrainz"
BIGQUERY_DATASET_ID = "listenbrainz_test"
BIGQUERY_TABLE_ID = "listen"
# Stats
STATS_ENTITY_LIMIT = 100 # the number of entities to calculate at max with BQ
STATS_CALCULATION_LOGIN_TIME = 30 # users must have logged in to LB in the past 30 days for stats to be calculated
STATS_CALCULATION_INTERVAL = 7 # stats are calculated every 7 days
# Max time in seconds after which the playing_now stream will expire.
PLAYING_NOW_MAX_DURATION = 10 * 60
# LOGGING
#LOG_FILE_ENABLED = True
#LOG_FILE = "./listenbrainz.log"
#LOG_EMAIL_ENABLED = True
#LOG_EMAIL_TOPIC = "ListenBrainz Webserver Failure"
#LOG_EMAIL_RECIPIENTS = [] # List of email addresses (strings)
#LOG_SENTRY_ENABLED = True
#SENTRY_DSN = ""
# MISCELLANEOUS
# Set to True if Less should be compiled in browser. Set to False if styling is pre-compiled.
COMPILE_LESS = True
# MAX file size to be allowed for the lastfm-backup import, default is infinite
# Size is in bytes
MAX_CONTENT_LENGTH = 16 * 1024 * 1024 # 16MB
# Specify the upload folder where all the lastfm-backup will be stored
# The path must be absolute path
UPLOAD_FOLDER = "/tmp/lastfm-backup-upload"
API_URL = 'https://api.listenbrainz.org'
LASTFM_PROXY_URL = 'http://0.0.0.0:8080/'
| 1 | 14,713 | Adding the config changes to the consul config template `consul_config.py.ctmpl` would be helpful too. | metabrainz-listenbrainz-server | py |
@@ -56,7 +56,7 @@ import org.hibernate.validator.constraints.NotEmpty;
public class Dataverse extends DvObjectContainer {
public enum DataverseType {
- RESEARCHERS, RESEARCH_PROJECTS, JOURNALS, ORGANIZATIONS_INSTITUTIONS, TEACHING_COURSES, UNCATEGORIZED
+ RESEARCHERS, RESEARCH_PROJECTS, JOURNALS, ORGANIZATIONS_INSTITUTIONS, TEACHING_COURSES, UNCATEGORIZED, LABORATORY, RESEARCH_GROUP
};
private static final long serialVersionUID = 1L; | 1 | package edu.harvard.iq.dataverse;
import edu.harvard.iq.dataverse.authorization.DataverseRole;
import edu.harvard.iq.dataverse.search.savedsearch.SavedSearch;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.FetchType;
import javax.persistence.Index;
import javax.persistence.JoinColumn;
import javax.persistence.JoinTable;
import javax.persistence.ManyToMany;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.OneToMany;
import javax.persistence.OneToOne;
import javax.persistence.OrderBy;
import javax.persistence.Table;
import javax.persistence.Transient;
import javax.validation.constraints.NotNull;
import javax.validation.constraints.Pattern;
import javax.validation.constraints.Size;
import org.hibernate.validator.constraints.NotBlank;
import org.hibernate.validator.constraints.NotEmpty;
/**
*
* @author gdurand
* @author mbarsinai
*/
@NamedQueries({
@NamedQuery(name = "Dataverse.ownedObjectsById", query = "SELECT COUNT(obj) FROM DvObject obj WHERE obj.owner.id=:id"),
@NamedQuery(name = "Dataverse.findByAlias", query="SELECT dv FROM Dataverse dv WHERE LOWER(dv.alias)=:alias")
})
@Entity
@Table(indexes = {@Index(columnList="fk_dataverse_id")
, @Index(columnList="defaultcontributorrole_id")
, @Index(columnList="defaulttemplate_id")
, @Index(columnList="alias")
, @Index(columnList="affiliation")
, @Index(columnList="dataversetype")
, @Index(columnList="facetroot")
, @Index(columnList="guestbookroot")
, @Index(columnList="metadatablockroot")
, @Index(columnList="templateroot")
, @Index(columnList="permissionroot")
, @Index(columnList="themeroot")})
public class Dataverse extends DvObjectContainer {
public enum DataverseType {
RESEARCHERS, RESEARCH_PROJECTS, JOURNALS, ORGANIZATIONS_INSTITUTIONS, TEACHING_COURSES, UNCATEGORIZED
};
private static final long serialVersionUID = 1L;
@NotBlank(message = "Please enter a name.")
@Column( nullable = false )
private String name;
/**
* @todo add @Column(nullable = false) for the database to enforce non-null
*/
@NotBlank(message = "Please enter an alias.")
@Column(nullable = false, unique=true)
@Size(max = 60, message = "Alias must be at most 60 characters.")
@Pattern.List({@Pattern(regexp = "[a-zA-Z0-9\\_\\-]*", message = "Found an illegal character(s). Valid characters are a-Z, 0-9, '_', and '-'."),
@Pattern(regexp=".*\\D.*", message="Alias should not be a number")})
private String alias;
@Column(name = "description", columnDefinition = "TEXT")
private String description;
@Enumerated(EnumType.STRING)
@NotNull(message = "Please select a category for your dataverse.")
@Column( nullable = false )
private DataverseType dataverseType;
/**
* When {@code true}, users are not granted permissions the got for parent
* dataverses.
*/
protected boolean permissionRoot;
public DataverseType getDataverseType() {
return dataverseType;
}
public void setDataverseType(DataverseType dataverseType) {
this.dataverseType = dataverseType;
}
@Transient
private final String uncategorizedString = "Uncategorized";
/**
* @todo Don't hard code these as English.
*/
public String getFriendlyCategoryName(){
switch (this.dataverseType) {
case RESEARCHERS:
return "Researcher";
case RESEARCH_PROJECTS:
return "Research Project";
case JOURNALS:
return "Journal";
case ORGANIZATIONS_INSTITUTIONS:
return "Organization or Institution";
case TEACHING_COURSES:
return "Teaching Course";
case UNCATEGORIZED:
return uncategorizedString;
default:
return "";
}
}
public String getIndexableCategoryName() {
String friendlyName = getFriendlyCategoryName();
if (friendlyName.equals(uncategorizedString)) {
return null;
} else {
return friendlyName;
}
}
private String affiliation;
// Note: We can't have "Remove" here, as there are role assignments that refer
// to this role. So, adding it would mean violating a forign key contstraint.
@OneToMany(cascade = {CascadeType.MERGE},
fetch = FetchType.LAZY,
mappedBy = "owner")
private Set<DataverseRole> roles;
@ManyToOne
@JoinColumn(nullable = false)
private DataverseRole defaultContributorRole;
public DataverseRole getDefaultContributorRole() {
return defaultContributorRole;
}
public void setDefaultContributorRole(DataverseRole defaultContributorRole) {
this.defaultContributorRole = defaultContributorRole;
}
private boolean metadataBlockRoot;
private boolean facetRoot;
private boolean themeRoot;
private boolean templateRoot;
@OneToOne(mappedBy = "dataverse",cascade={ CascadeType.REMOVE, CascadeType.MERGE,CascadeType.PERSIST}, orphanRemoval=true)
private DataverseTheme dataverseTheme;
@OneToMany(mappedBy = "dataverse",cascade={ CascadeType.REMOVE, CascadeType.MERGE,CascadeType.PERSIST}, orphanRemoval=true)
@OrderBy("displayOrder")
@NotEmpty(message="At least one contact is required.")
private List<DataverseContact> dataverseContacts = new ArrayList();
@ManyToMany(cascade = {CascadeType.MERGE})
private List<MetadataBlock> metadataBlocks = new ArrayList();
@OneToMany(mappedBy = "dataverse",cascade={ CascadeType.REMOVE, CascadeType.MERGE,CascadeType.PERSIST}, orphanRemoval=true)
@OrderBy("displayOrder")
private List<DataverseFacet> dataverseFacets = new ArrayList();
@ManyToMany
@JoinTable(name = "dataversesubjects",
joinColumns = @JoinColumn(name = "dataverse_id"),
inverseJoinColumns = @JoinColumn(name = "controlledvocabularyvalue_id"))
private Set<ControlledVocabularyValue> dataverseSubjects;
@OneToMany(mappedBy="dataverse", cascade={CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST})
private List<DataverseFeaturedDataverse> dataverseFeaturedDataverses;
public List<DataverseFeaturedDataverse> getDataverseFeaturedDataverses() {
return dataverseFeaturedDataverses;
}
public void setDataverseFeaturedDataverses(List<DataverseFeaturedDataverse> dataverseFeaturedDataverses) {
this.dataverseFeaturedDataverses = dataverseFeaturedDataverses;
}
@OneToMany(mappedBy="featuredDataverse", cascade={CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST})
private List<DataverseFeaturedDataverse> dataverseFeaturingDataverses;
public List<DataverseFeaturedDataverse> getDataverseFeaturingDataverses() {
return dataverseFeaturingDataverses;
}
public void setDataverseFeaturingDataverses(List<DataverseFeaturedDataverse> dataverseFeaturingDataverses) {
this.dataverseFeaturingDataverses = dataverseFeaturingDataverses;
}
@OneToMany(mappedBy="dataverse", cascade={CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST})
private List<DataverseLinkingDataverse> dataverseLinkingDataverses;
public List<DataverseLinkingDataverse> getDataverseLinkingDataverses() {
return dataverseLinkingDataverses;
}
public void setDataverseLinkingDataverses(List<DataverseLinkingDataverse> dataverseLinkingDataverses) {
this.dataverseLinkingDataverses = dataverseLinkingDataverses;
}
@OneToMany(mappedBy="linkingDataverse", cascade={CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST})
private List<DataverseLinkingDataverse> dataverseLinkedDataverses;
public List<DataverseLinkingDataverse> getDataverseLinkedDataverses() {
return dataverseLinkedDataverses;
}
public void setDataverseLinkedDataverses(List<DataverseLinkingDataverse> dataverseLinkedDataverses) {
this.dataverseLinkedDataverses = dataverseLinkedDataverses;
}
@OneToMany(mappedBy="linkingDataverse", cascade={CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST})
private List<DatasetLinkingDataverse> datasetLinkingDataverses;
public List<DatasetLinkingDataverse> getDatasetLinkingDataverses() {
return datasetLinkingDataverses;
}
public void setDatasetLinkingDataverses(List<DatasetLinkingDataverse> datasetLinkingDataverses) {
this.datasetLinkingDataverses = datasetLinkingDataverses;
}
public Set<ControlledVocabularyValue> getDataverseSubjects() {
return dataverseSubjects;
}
public void setDataverseSubjects(Set<ControlledVocabularyValue> dataverseSubjects) {
this.dataverseSubjects = dataverseSubjects;
}
@OneToMany(mappedBy = "dataverse")
private List<DataverseFieldTypeInputLevel> dataverseFieldTypeInputLevels = new ArrayList();
@ManyToOne
@JoinColumn(nullable = true)
private Template defaultTemplate;
@OneToMany(mappedBy = "definitionPoint", cascade={CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST})
private List<SavedSearch> savedSearches;
public List<SavedSearch> getSavedSearches() {
return savedSearches;
}
public void setSavedSearches(List<SavedSearch> savedSearches) {
this.savedSearches = savedSearches;
}
@OneToMany(mappedBy="dataverse", cascade = {CascadeType.MERGE, CascadeType.REMOVE})
private List<Template> templates;
@OneToMany(mappedBy="dataverse", cascade = {CascadeType.MERGE, CascadeType.REMOVE})
private List<Guestbook> guestbooks;
public List<Guestbook> getGuestbooks() {
return guestbooks;
}
public void setGuestbooks(List<Guestbook> guestbooks) {
this.guestbooks = guestbooks;
}
@OneToOne (mappedBy="dataverse", cascade={CascadeType.PERSIST, CascadeType.REMOVE})
private HarvestingDataverseConfig harvestingDataverseConfig;
public HarvestingDataverseConfig getHarvestingDataverseConfig() {
return this.harvestingDataverseConfig;
}
public void setHarvestingDataverseConfig(HarvestingDataverseConfig harvestingDataverseConfig) {
this.harvestingDataverseConfig = harvestingDataverseConfig;
}
public boolean isHarvested() {
return harvestingDataverseConfig != null;
}
public List<Guestbook> getParentGuestbooks() {
List<Guestbook> retList = new ArrayList();
Dataverse testDV = this;
while (testDV.getOwner() != null){
retList.addAll(testDV.getOwner().getGuestbooks());
if(testDV.getOwner().guestbookRoot){
break;
}
testDV = testDV.getOwner();
}
return retList;
}
public List<Guestbook> getAvailableGuestbooks() {
//get all guestbooks
List<Guestbook> retList = new ArrayList();
Dataverse testDV = this;
List<Guestbook> allGbs = new ArrayList();
if (!this.guestbookRoot){
while (testDV.getOwner() != null){
allGbs.addAll(testDV.getOwner().getGuestbooks());
if (testDV.getOwner().isGuestbookRoot()) {
break;
}
testDV = testDV.getOwner();
}
}
allGbs.addAll(this.getGuestbooks());
//then only display them if they are enabled
for (Guestbook gbt : allGbs) {
if (gbt.isEnabled()) {
retList.add(gbt);
}
}
return retList;
}
private boolean guestbookRoot;
public boolean isGuestbookRoot() {
return guestbookRoot;
}
public void setGuestbookRoot(boolean guestbookRoot) {
this.guestbookRoot = guestbookRoot;
}
public void setDataverseFieldTypeInputLevels(List<DataverseFieldTypeInputLevel> dataverseFieldTypeInputLevels) {
this.dataverseFieldTypeInputLevels = dataverseFieldTypeInputLevels;
}
public List<DataverseFieldTypeInputLevel> getDataverseFieldTypeInputLevels() {
return dataverseFieldTypeInputLevels;
}
public Template getDefaultTemplate() {
return defaultTemplate;
}
public void setDefaultTemplate(Template defaultTemplate) {
this.defaultTemplate = defaultTemplate;
}
public List<Template> getTemplates() {
return templates;
}
public void setTemplates(List<Template> templates) {
this.templates = templates;
}
public List<Template> getParentTemplates() {
List<Template> retList = new ArrayList();
Dataverse testDV = this;
while (testDV.getOwner() != null){
if (!testDV.getMetadataBlocks().equals(testDV.getOwner().getMetadataBlocks())){
break;
}
retList.addAll(testDV.getOwner().getTemplates());
if(testDV.getOwner().templateRoot){
break;
}
testDV = testDV.getOwner();
}
return retList;
}
public boolean isThemeRoot() {
return themeRoot;
}
public boolean getThemeRoot() {
return themeRoot;
}
public void setThemeRoot(boolean themeRoot) {
this.themeRoot = themeRoot;
}
public boolean isTemplateRoot() {
return templateRoot;
}
public void setTemplateRoot(boolean templateRoot) {
this.templateRoot = templateRoot;
}
public List<MetadataBlock> getMetadataBlocks() {
return getMetadataBlocks(false);
}
public List<MetadataBlock> getMetadataBlocks(boolean returnActualDB) {
if (returnActualDB || metadataBlockRoot || getOwner() == null) {
return metadataBlocks;
} else {
return getOwner().getMetadataBlocks();
}
}
public Long getMetadataRootId(){
if(metadataBlockRoot || getOwner() == null){
return this.getId();
} else {
return getOwner().getMetadataRootId();
}
}
public DataverseTheme getDataverseTheme() {
return getDataverseTheme(false);
}
public DataverseTheme getDataverseTheme(boolean returnActualDB) {
if (returnActualDB || themeRoot || getOwner() == null) {
return dataverseTheme;
} else {
return getOwner().getDataverseTheme();
}
}
public String getGuestbookRootDataverseName() {
Dataverse testDV = this;
String retName = "Parent";
while (testDV.getOwner() != null) {
retName = testDV.getOwner().getDisplayName();
if (testDV.getOwner().guestbookRoot) {
break;
}
testDV = testDV.getOwner();
}
return retName;
}
public String getTemplateRootDataverseName() {
Dataverse testDV = this;
String retName = "Parent";
while (testDV.getOwner() != null) {
retName = testDV.getOwner().getDisplayName();
if (testDV.getOwner().templateRoot) {
break;
}
testDV = testDV.getOwner();
}
return retName;
}
public String getThemeRootDataverseName() {
Dataverse testDV = this;
String retName = "Parent";
while (testDV.getOwner() != null) {
retName = testDV.getOwner().getDisplayName();
if (testDV.getOwner().themeRoot) {
break;
}
testDV = testDV.getOwner();
}
return retName;
}
public String getMetadataRootDataverseName() {
Dataverse testDV = this;
String retName = "Parent";
while (testDV.getOwner() != null) {
retName = testDV.getOwner().getDisplayName();
if (testDV.getOwner().metadataBlockRoot) {
break;
}
testDV = testDV.getOwner();
}
return retName;
}
public String getFacetRootDataverseName() {
Dataverse testDV = this;
String retName = "Parent";
while (testDV.getOwner() != null) {
retName = testDV.getOwner().getDisplayName();
if (testDV.getOwner().facetRoot) {
break;
}
testDV = testDV.getOwner();
}
return retName;
}
public String getLogoOwnerId() {
if (themeRoot || getOwner()==null) {
return this.getId().toString();
} else {
return getOwner().getId().toString();
}
}
public void setDataverseTheme(DataverseTheme dataverseTheme) {
this.dataverseTheme=dataverseTheme;
}
public void setMetadataBlocks(List<MetadataBlock> metadataBlocks) {
this.metadataBlocks = metadataBlocks;
}
public List<DataverseFacet> getDataverseFacets() {
return getDataverseFacets(false);
}
public List<DataverseFacet> getDataverseFacets(boolean returnActualDB) {
if (returnActualDB || facetRoot || getOwner() == null) {
return dataverseFacets;
} else {
return getOwner().getDataverseFacets();
}
}
public Long getFacetRootId(){
if(facetRoot || getOwner() == null){
return this.getId();
} else {
return getOwner().getFacetRootId();
}
}
public void setDataverseFacets(List<DataverseFacet> dataverseFacets) {
this.dataverseFacets = dataverseFacets;
}
public List<DataverseContact> getDataverseContacts() {
return dataverseContacts;
}
/**
* Get the email addresses of the dataverse contacts as a comma-separated
* concatenation.
* @return a comma-separated concatenation of email addresses, or the empty
* string if there are no contacts.
* @author bencomp
*/
public String getContactEmails() {
if (dataverseContacts != null && !dataverseContacts.isEmpty()) {
StringBuilder buf = new StringBuilder();
Iterator<DataverseContact> it = dataverseContacts.iterator();
while (it.hasNext()) {
DataverseContact con = it.next();
buf.append(con.getContactEmail());
if (it.hasNext()) {
buf.append(",");
}
}
return buf.toString();
} else {
return "";
}
}
public void setDataverseContacts(List<DataverseContact> dataverseContacts) {
this.dataverseContacts = dataverseContacts;
}
public void addDataverseContact(int index) {
dataverseContacts.add(index, new DataverseContact(this));
}
public void removeDataverseContact(int index) {
dataverseContacts.remove(index);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getAlias() {
return alias;
}
public void setAlias(String alias) {
this.alias = alias;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getAffiliation() {
return affiliation;
}
public void setAffiliation(String affiliation) {
this.affiliation = affiliation;
}
public boolean isMetadataBlockRoot() {
return metadataBlockRoot;
}
public void setMetadataBlockRoot(boolean metadataBlockRoot) {
this.metadataBlockRoot = metadataBlockRoot;
}
public boolean isFacetRoot() {
return facetRoot;
}
public void setFacetRoot(boolean facetRoot) {
this.facetRoot = facetRoot;
}
public void addRole(DataverseRole role) {
role.setOwner(this);
roles.add(role);
}
public Set<DataverseRole> getRoles() {
return roles;
}
public List<Dataverse> getOwners() {
List owners = new ArrayList();
if (getOwner() != null) {
owners.addAll(getOwner().getOwners());
owners.add(getOwner());
}
return owners;
}
@Override
public boolean equals(Object object) {
// TODO: Warning - this method won't work in the case the id fields are not set
if (!(object instanceof Dataverse)) {
return false;
}
Dataverse other = (Dataverse) object;
return Objects.equals(getId(), other.getId());
}
@Override
protected String toStringExtras() {
return "name:" + getName();
}
@Override
public <T> T accept(Visitor<T> v) {
return v.visit(this);
}
/**
* @todo implement in https://github.com/IQSS/dataverse/issues/551
*/
public String getDepositTermsOfUse() {
return "Dataverse Deposit Terms of Use will be implemented in https://github.com/IQSS/dataverse/issues/551";
}
@Override
public String getDisplayName() {
return getName() + " Dataverse";
}
@Override
public boolean isPermissionRoot() {
return permissionRoot;
}
public void setPermissionRoot(boolean permissionRoot) {
this.permissionRoot = permissionRoot;
}
}
| 1 | 35,004 | While we're at it should we add Department? See Dataverse Category: Add Department #2829 | IQSS-dataverse | java |
@@ -43,7 +43,12 @@ module Selenium
def text
@bridge.getAlertText
end
+
+ def authenticate(username, password)
+ @bridge.setAuthentication username: username, password: password
+ accept
+ end
end # Alert
end # WebDriver
-end # Selenium
+end # Selenium | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
class Alert
def initialize(bridge)
@bridge = bridge
# fail fast if the alert doesn't exist
bridge.getAlertText
end
def accept
@bridge.acceptAlert
end
def dismiss
@bridge.dismissAlert
end
def send_keys(keys)
@bridge.setAlertValue keys
end
def text
@bridge.getAlertText
end
end # Alert
end # WebDriver
end # Selenium | 1 | 13,040 | Files should have an extra line at the end of them. | SeleniumHQ-selenium | py |
@@ -22,13 +22,14 @@ function detachedClone(vnode) {
if (vnode) {
vnode = assign({}, vnode);
vnode._component = null;
+ vnode._original = vnode;
vnode._children = vnode._children && vnode._children.map(detachedClone);
}
return vnode;
}
// having custom inheritance instead of a class here saves a lot of bytes
-export function Suspense(props) {
+export function Suspense() {
// we do not call super here to golf some bytes...
this._pendingSuspensionCount = 0;
this._suspenders = null; | 1 | import { Component, createElement, options } from 'preact';
import { assign } from './util';
const oldCatchError = options._catchError;
options._catchError = function(error, newVNode, oldVNode) {
if (error.then) {
/** @type {import('./internal').Component} */
let component;
let vnode = newVNode;
for (; (vnode = vnode._parent); ) {
if ((component = vnode._component) && component._childDidSuspend) {
// Don't call oldCatchError if we found a Suspense
return component._childDidSuspend(error, newVNode._component);
}
}
}
oldCatchError(error, newVNode, oldVNode);
};
function detachedClone(vnode) {
if (vnode) {
vnode = assign({}, vnode);
vnode._component = null;
vnode._children = vnode._children && vnode._children.map(detachedClone);
}
return vnode;
}
// having custom inheritance instead of a class here saves a lot of bytes
export function Suspense(props) {
// we do not call super here to golf some bytes...
this._pendingSuspensionCount = 0;
this._suspenders = null;
this._detachOnNextRender = null;
}
// Things we do here to save some bytes but are not proper JS inheritance:
// - call `new Component()` as the prototype
// - do not set `Suspense.prototype.constructor` to `Suspense`
Suspense.prototype = new Component();
/**
* @param {Promise} promise The thrown promise
* @param {Component<any, any>} suspendingComponent The suspending component
*/
Suspense.prototype._childDidSuspend = function(promise, suspendingComponent) {
/** @type {import('./internal').SuspenseComponent} */
const c = this;
if (c._suspenders == null) {
c._suspenders = [];
}
c._suspenders.push(suspendingComponent);
const resolve = suspended(c._vnode);
let resolved = false;
const onResolved = () => {
if (resolved) return;
resolved = true;
if (resolve) {
resolve(onSuspensionComplete);
} else {
onSuspensionComplete();
}
};
suspendingComponent._suspendedComponentWillUnmount =
suspendingComponent.componentWillUnmount;
suspendingComponent.componentWillUnmount = () => {
onResolved();
if (suspendingComponent._suspendedComponentWillUnmount) {
suspendingComponent._suspendedComponentWillUnmount();
}
};
const onSuspensionComplete = () => {
if (!--c._pendingSuspensionCount) {
c._vnode._children[0] = c.state._suspended;
c.setState({ _suspended: (c._detachOnNextRender = null) });
let suspended;
while ((suspended = c._suspenders.pop())) {
suspended.forceUpdate();
}
}
};
if (!c._pendingSuspensionCount++) {
c.setState({ _suspended: (c._detachOnNextRender = c._vnode._children[0]) });
}
promise.then(onResolved, onResolved);
};
Suspense.prototype.render = function(props, state) {
if (this._detachOnNextRender) {
this._vnode._children[0] = detachedClone(this._detachOnNextRender);
this._detachOnNextRender = null;
}
return [
createElement(Component, null, state._suspended ? null : props.children),
state._suspended && props.fallback
];
};
/**
* Checks and calls the parent component's _suspended method, passing in the
* suspended vnode. This is a way for a parent (e.g. SuspenseList) to get notified
* that one of its children/descendants suspended.
*
* The parent MAY return a callback. The callback will get called when the
* suspension resolves, notifying the parent of the fact.
* Moreover, the callback gets function `unsuspend` as a parameter. The resolved
* child descendant will not actually get unsuspended until `unsuspend` gets called.
* This is a way for the parent to delay unsuspending.
*
* If the parent does not return a callback then the resolved vnode
* gets unsuspended immediately when it resolves.
*
* @param {import('../src/internal').VNode} vnode
* @returns {((unsuspend: () => void) => void)?}
*/
export function suspended(vnode) {
let component = vnode._parent._component;
return component && component._suspended && component._suspended(vnode);
}
export function lazy(loader) {
let prom;
let component;
let error;
function Lazy(props) {
if (!prom) {
prom = loader();
prom.then(
exports => {
component = exports.default || exports;
},
e => {
error = e;
}
);
}
if (error) {
throw error;
}
if (!component) {
throw prom;
}
return createElement(component, props);
}
Lazy.displayName = 'Lazy';
Lazy._forwarded = true;
return Lazy;
}
| 1 | 15,372 | TODO: check whether this is needed or not | preactjs-preact | js |
@@ -48,8 +48,12 @@ type Config struct {
Aliases map[string]string `yaml:"aliases"`
}
-// ReadConfig represents the current config read from local
-var ReadConfig Config
+var (
+ // ReadConfig represents the current config read from local
+ ReadConfig Config
+ // IsInsecure represents the connect option of grpc dial
+ IsInsecure bool
+)
func init() {
ConfigDir = os.Getenv("HOME") + "/.config/ioctl/default" | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package config
import (
"fmt"
"io/ioutil"
"os"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/iotexproject/iotex-core/pkg/log"
)
// Directories
var (
// ConfigDir is the directory to store config file
ConfigDir string
// DefaultConfigFile is the default config file name
DefaultConfigFile string
)
// Error strings
var (
// ErrConfigNotMatch indicates error for no config matchs
ErrConfigNotMatch = fmt.Errorf("no config matchs")
// ErrEmptyEndpoint indicates error for empty endpoint
ErrEmptyEndpoint = fmt.Errorf("no endpoint has been set")
)
// ConfigCmd represents the config command
var ConfigCmd = &cobra.Command{
Use: "config",
Short: "Set or get configuration for ioctl",
ValidArgs: []string{"set", "get"},
Args: cobra.MinimumNArgs(1),
}
// Config defines the config schema
type Config struct {
Endpoint string `yaml:"endpoint"`
Wallet string `yaml:"wallet"`
Aliases map[string]string `yaml:"aliases"`
}
// ReadConfig represents the current config read from local
var ReadConfig Config
func init() {
ConfigDir = os.Getenv("HOME") + "/.config/ioctl/default"
if err := os.MkdirAll(ConfigDir, 0700); err != nil {
log.L().Panic(err.Error())
}
DefaultConfigFile = ConfigDir + "/config.default"
var err error
ReadConfig, err = LoadConfig()
if err != nil || ReadConfig.Wallet == "" {
if !os.IsNotExist(err) || ReadConfig.Wallet == "" {
ReadConfig.Wallet = ConfigDir
out, err := yaml.Marshal(&ReadConfig)
if err != nil {
log.L().Panic(err.Error())
}
if err := ioutil.WriteFile(DefaultConfigFile, out, 0600); err != nil {
log.L().Panic(fmt.Sprintf("Failed to write to config file %s.", DefaultConfigFile))
}
} else {
log.L().Panic(err.Error())
}
}
ConfigCmd.AddCommand(configGetCmd)
ConfigCmd.AddCommand(configSetCmd)
}
// LoadConfig loads config file in yaml format
func LoadConfig() (Config, error) {
ReadConfig := Config{
Aliases: make(map[string]string),
}
in, err := ioutil.ReadFile(DefaultConfigFile)
if err == nil {
if err := yaml.Unmarshal(in, &ReadConfig); err != nil {
return ReadConfig, err
}
}
return ReadConfig, err
}
| 1 | 17,367 | `ReadConfig` is a global variable (from `gochecknoglobals`) | iotexproject-iotex-core | go |
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
+//go:build !cgo
// +build !cgo
package ptrace | 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !cgo
package ptrace
import (
"github.com/go-logr/logr"
"github.com/chaos-mesh/chaos-mesh/pkg/mapreader"
)
// RegisterLogger registers a logger on ptrace pkg
func RegisterLogger(logger logr.Logger) {
panic("unimplemented")
}
// TracedProgram is a program traced by ptrace
type TracedProgram struct {
Entries []mapreader.Entry
}
// Pid return the pid of traced program
func (p *TracedProgram) Pid() int {
panic("unimplemented")
}
// Trace ptrace all threads of a process
func Trace(pid int) (*TracedProgram, error) {
panic("unimplemented")
}
// Detach detaches from all threads of the processes
func (p *TracedProgram) Detach() error {
panic("unimplemented")
}
// Protect will backup regs and rip into fields
func (p *TracedProgram) Protect() error {
panic("unimplemented")
}
// Restore will restore regs and rip from fields
func (p *TracedProgram) Restore() error {
panic("unimplemented")
}
// Wait waits until the process stops
func (p *TracedProgram) Wait() error {
panic("unimplemented")
}
// Step moves one step forward
func (p *TracedProgram) Step() error {
panic("unimplemented")
}
// Syscall runs a syscall at main thread of process
func (p *TracedProgram) Syscall(number uint64, args ...uint64) (uint64, error) {
panic("unimplemented")
}
// Mmap runs mmap syscall
func (p *TracedProgram) Mmap(length uint64, fd uint64) (uint64, error) {
panic("unimplemented")
}
// ReadSlice reads from addr and return a slice
func (p *TracedProgram) ReadSlice(addr uint64, size uint64) (*[]byte, error) {
panic("unimplemented")
}
// WriteSlice writes a buffer into addr
func (p *TracedProgram) WriteSlice(addr uint64, buffer []byte) error {
panic("unimplemented")
}
// PtraceWriteSlice uses ptrace rather than process_vm_write to write a buffer into addr
func (p *TracedProgram) PtraceWriteSlice(addr uint64, buffer []byte) error {
panic("unimplemented")
}
// GetLibBuffer reads an entry
func (p *TracedProgram) GetLibBuffer(entry *mapreader.Entry) (*[]byte, error) {
panic("unimplemented")
}
// MmapSlice mmaps a slice and return it's addr
func (p *TracedProgram) MmapSlice(slice []byte) (*mapreader.Entry, error) {
panic("unimplemented")
}
// FindSymbolInEntry finds symbol in entry through parsing elf
func (p *TracedProgram) FindSymbolInEntry(symbolName string, entry *mapreader.Entry) (uint64, error) {
panic("unimplemented")
}
// WriteUint64ToAddr writes uint64 to addr
func (p *TracedProgram) WriteUint64ToAddr(addr uint64, value uint64) error {
panic("unimplemented")
}
// JumpToFakeFunc writes jmp instruction to jump to fake function
func (p *TracedProgram) JumpToFakeFunc(originAddr uint64, targetAddr uint64) error {
panic("unimplemented")
}
| 1 | 26,519 | This is duplicated with `+build !cgo`? | chaos-mesh-chaos-mesh | go |
@@ -34,7 +34,7 @@ module Selenium
driver.manage.timeouts.implicit_wait = 6
driver.find_element(id: 'adder').click
- driver.find_element(id: 'box0')
+ expect { driver.find_element(id: 'box0') }.not_to raise_error(WebDriver::Error::NoSuchElementError)
end
it 'should still fail to find an element with implicit waits enabled' do | 1 | # frozen_string_literal: true
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require_relative 'spec_helper'
module Selenium
module WebDriver
describe Timeouts do
context 'implicit waits' do
before do
driver.manage.timeouts.implicit_wait = 0
driver.navigate.to url_for('dynamic.html')
end
after { driver.manage.timeouts.implicit_wait = 0 }
it 'should implicitly wait for a single element', except: {browser: :safari_preview} do
driver.manage.timeouts.implicit_wait = 6
driver.find_element(id: 'adder').click
driver.find_element(id: 'box0')
end
it 'should still fail to find an element with implicit waits enabled' do
driver.manage.timeouts.implicit_wait = 0.5
expect { driver.find_element(id: 'box0') }.to raise_error(WebDriver::Error::NoSuchElementError)
end
it 'should return after first attempt to find one after disabling implicit waits' do
driver.manage.timeouts.implicit_wait = 3
driver.manage.timeouts.implicit_wait = 0
expect { driver.find_element(id: 'box0') }.to raise_error(WebDriver::Error::NoSuchElementError)
end
it 'should implicitly wait until at least one element is found when searching for many' do
add = driver.find_element(id: 'adder')
driver.manage.timeouts.implicit_wait = 6
add.click
add.click
expect(driver.find_elements(class_name: 'redbox')).not_to be_empty
end
it 'should still fail to find elements when implicit waits are enabled' do
driver.manage.timeouts.implicit_wait = 0.5
expect(driver.find_elements(class_name: 'redbox')).to be_empty
end
it 'should return after first attempt to find many after disabling implicit waits', except: {browser: :firefox, platform: :windows} do
add = driver.find_element(id: 'adder')
driver.manage.timeouts.implicit_wait = 3
driver.manage.timeouts.implicit_wait = 0
add.click
expect(driver.find_elements(class_name: 'redbox')).to be_empty
end
end
context 'page loads' do
# w3c default is 300,000
after { driver.manage.timeouts.page_load = 300000 }
it 'should be able to set the page load timeout' do
expect { driver.manage.timeouts.page_load = 2 }.not_to raise_exception
end
end
end
end # WebDriver
end # Selenium
| 1 | 16,703 | This should just be `.not_to raise_error` otherwise it potentially hides errors | SeleniumHQ-selenium | py |
@@ -164,13 +164,17 @@ class DataFrame(_Frame):
else:
self._metadata = metadata
- def _reduce_for_stat_function(self, sfun):
+ def _reduce_for_stat_function(self, sfun, numeric_only=False):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
- :param sfun: either an 1-arg function that takes a Column and returns a Column, or
+ Parameters
+ ----------
+ sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
+ numeric_only : boolean, default False
+ If True, sfun is applied on numeric columns (including booleans) only.
"""
from inspect import signature
exprs = [] | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
import re
import warnings
from functools import partial, reduce
from typing import Any, Optional, List, Tuple, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like, \
is_dict_like
from pyspark import sql as spark
from pyspark.sql import functions as F, Column, DataFrame as SDataFrame
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, NumericType, ShortType, StructField,
StructType, to_arrow_type)
from pyspark.sql.utils import AnalysisException
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import default_session, validate_arguments_and_invoke_function
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.metadata import Metadata
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.typedef import infer_pd_series_spark_type
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
class DataFrame(_Frame):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _sdf: Spark Column instance
:type _sdf: SDataFrame
:ivar _metadata: Metadata related to column names and index information.
:type _metadata: Metadata
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame or Spark DataFrame
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, other arguments should not be used.
If `data` is a Spark DataFrame, all other arguments except `index` should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
If `data` is a Spark DataFrame, `index` is expected to be `Metadata`.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
self._init_from_pandas(data)
elif isinstance(data, spark.DataFrame):
assert columns is None
assert dtype is None
assert not copy
self._init_from_spark(data, index)
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
self._init_from_pandas(pdf)
def _init_from_pandas(self, pdf):
metadata = Metadata.from_pandas(pdf)
reset_index = pdf.reset_index()
reset_index.columns = metadata.columns
schema = StructType([StructField(name, infer_pd_series_spark_type(col),
nullable=bool(col.isnull().any()))
for name, col in reset_index.iteritems()])
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema),
metadata)
def _init_from_spark(self, sdf, metadata=None):
self._sdf = sdf
if metadata is None:
self._metadata = Metadata(data_columns=self._sdf.schema.fieldNames())
else:
self._metadata = metadata
def _reduce_for_stat_function(self, sfun):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
:param sfun: either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
"""
from inspect import signature
exprs = []
num_args = len(signature(sfun).parameters)
for col in self.columns:
col_sdf = self._sdf[col]
col_type = self._sdf.schema[col].dataType
if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'):
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(col))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
return row # Return first row as a Series
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
See https://docs.python.org/3/library/typing.html. For instance, as below:
>>> def function() -> int:
... return 1
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
applied = []
for column in self._metadata.data_columns:
applied.append(self[column].apply(func))
sdf = self._sdf.select(
self._metadata.index_columns + [c._scol for c in applied])
metadata = self._metadata.copy(data_columns=[c.name for c in applied])
return DataFrame(sdf, metadata)
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes import Index, MultiIndex
if len(self._metadata.index_map) == 0:
return None
elif len(self._metadata.index_map) == 1:
return Index(self)
else:
return MultiIndex(self)
@property
def empty(self):
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return len(self._metadata.data_columns) == 0 or self._sdf.rdd.isEmpty()
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, str):
keys = [keys]
else:
keys = list(keys)
for key in keys:
if key not in self.columns:
raise KeyError(key)
if drop:
data_columns = [column for column in self._metadata.data_columns if column not in keys]
else:
data_columns = self._metadata.data_columns
if append:
index_map = self._metadata.index_map + [(column, column) for column in keys]
else:
index_map = [(column, column) for column in keys]
metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map)
# Sync Spark's columns as well.
sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns])
if inplace:
self._metadata = metadata
self._sdf = sdf
else:
kdf = self.copy()
kdf._metadata = metadata
kdf._sdf = sdf
return kdf
def reset_index(self, level=None, drop=False, inplace=False):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
"""
# TODO: add example of MultiIndex back. See https://github.com/databricks/koalas/issues/301
if len(self._metadata.index_map) == 0:
raise NotImplementedError('Can\'t reset index because there is no index.')
multi_index = len(self._metadata.index_map) > 1
def rename(index):
if multi_index:
return 'level_{}'.format(index)
else:
if 'index' not in self._metadata.data_columns:
return 'index'
else:
return 'level_{}'.format(index)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._metadata.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._metadata.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._metadata.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._metadata.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._metadata.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._metadata.index_map.copy()
for i in idx:
info = self._metadata.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(index_name)))
index_map.remove(info)
if drop:
new_index_map = []
metadata = self._metadata.copy(
data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns,
index_map=index_map)
columns = [name for _, name in new_index_map] + self._metadata.data_columns
if inplace:
self._metadata = metadata
self.columns = columns
else:
kdf = self.copy()
kdf._metadata = metadata
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
def nunique(self, axis: int = 0, dropna: bool = True, approx: bool = False,
rsd: float = 0.05) -> pd.Series:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a pandas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
Name: 0, dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
Name: 0, dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
Name: 0, dtype: int64
"""
if axis != 0:
raise ValueError("The 'nunique' method only works with axis=0 at the moment")
count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct
if dropna:
res = self._sdf.select([count_fn(Column(c))
.alias(c)
for c in self.columns])
else:
res = self._sdf.select([(count_fn(Column(c))
# If the count of null values in a column is at least 1,
# increase the total count by 1 else 0. This is like adding
# self.isnull().sum().clip(upper=1) but can be computed in a
# single Spark job when pulling it into the select statement.
+ F.when(F.count(F.when(F.col(c).isNull(), 1).otherwise(None))
>= 1, 1).otherwise(0))
.alias(c)
for c in self.columns])
return res.toPandas().T.iloc[:, 0]
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
__index_level_0__ col1 col2
0 0 1 3
1 1 2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
See Also
--------
DataFrame.to_koalas
"""
return self._sdf
def to_pandas(self):
"""
Return a Pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.columns])
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
# TODO: push to OSS
pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype()
for field in sdf.schema})
index_columns = self._metadata.index_columns
if len(index_columns) > 0:
append = False
for index_field in index_columns:
drop = index_field not in self._metadata.data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[self._metadata.data_columns]
index_names = self._metadata.index_names
if len(index_names) > 0:
if isinstance(pdf.index, pd.MultiIndex):
pdf.index.names = index_names
else:
pdf.index.name = index_names[0]
return pdf
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, F.lit(c))
data_columns = self._metadata.data_columns
metadata = self._metadata.copy(
data_columns=(data_columns +
[name for name, _ in pairs if name not in data_columns]))
return DataFrame(sdf, metadata)
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
"""
return DataFrame(self._sdf, self._metadata.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._metadata.data_columns]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F.when(self[column].notna()._scol, 1).otherwise(0)
for column in columns],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(columns))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
def fillna(self, value=None, axis=None, inplace=False):
"""Fill NA/NaN values.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is None:
raise ValueError('Currently must specify value')
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
sdf = self._sdf.fillna(value)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
sdf = self._sdf
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
numeric_columns = [c for c in self.columns
if isinstance(sdf.schema[c].dataType, numeric_types)]
nonnumeric_columns = [c for c in self.columns
if not isinstance(sdf.schema[c].dataType, numeric_types)]
if lower is not None:
sdf = sdf.select(*[F.when(F.col(c) < lower, lower).otherwise(F.col(c)).alias(c)
for c in numeric_columns] + nonnumeric_columns)
if upper is not None:
sdf = sdf.select(*[F.when(F.col(c) > upper, upper).otherwise(F.col(c)).alias(c)
for c in numeric_columns] + nonnumeric_columns)
# Restore initial column order
sdf = sdf.select(list(self.columns))
return ks.DataFrame(sdf)
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._sdf.limit(n), self._metadata.copy())
@property
def columns(self):
"""The column labels of the DataFrame."""
return pd.Index(self._metadata.data_columns)
@columns.setter
def columns(self, names):
old_names = self._metadata.data_columns
if len(old_names) != len(names):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(names)))
sdf = self._sdf.select(self._metadata.index_columns +
[self[old_name]._scol.alias(new_name)
for (old_name, new_name) in zip(old_names, names)])
self._sdf = sdf
self._metadata = self._metadata.copy(data_columns=names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._metadata.data_columns],
index=self._metadata.data_columns)
def count(self):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
"""
return self._reduce_for_stat_function(_Frame._count_expr)
def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [columns]
sdf = self._sdf.drop(*columns)
metadata = self._metadata.copy(
data_columns=[column for column in self.columns if column not in columns]
)
return DataFrame(sdf, metadata)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def sort_values(self, by: Union[str, List[str]], ascending: Union[bool, List[bool]] = True,
inplace: bool = False, na_position: str = 'last') -> Optional['DataFrame']:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](self[colname]._scol)
for colname, asc in zip(by, ascending)]
kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy())
if inplace:
self._sdf = kdf._sdf
self._metadata = kdf._metadata
return None
else:
return kdf
def sort_index(self, axis: int = 0, level: int = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['DataFrame']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]]).sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
"""
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if level is not None:
raise ValueError("The 'axis' argument is not supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
return self.sort_values(by=self._metadata.index_columns, ascending=ascending,
inplace=inplace, na_position=na_position)
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._metadata.index_columns
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self[col]._scol.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self[col]._scol.isin(list(values)).alias(col) for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._sdf.select(_select_columns), self._metadata.copy())
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(left, right: 'DataFrame', how: str = 'inner',
on: Optional[Union[str, List[str]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y'])
lkey value_x rkey value_y
0 bar 2 bar 6
1 baz 3 baz 7
2 foo 1 foo 5
3 foo 1 foo 8
4 foo 5 foo 5
5 foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
0 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
0 2.0 x
1 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
_to_list = lambda o: o if o is None or is_list_like(o) else [o]
if on:
if left_on or right_on:
raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", '
'not a combination of both.')
left_keys = _to_list(on)
right_keys = _to_list(on)
else:
# TODO: need special handling for multi-index.
if left_index:
left_keys = left._metadata.index_columns
else:
left_keys = _to_list(left_on)
if right_index:
right_keys = right._metadata.index_columns
else:
right_keys = _to_list(right_on)
if left_keys and not right_keys:
raise ValueError('Must pass right_on or right_index=True')
if right_keys and not left_keys:
raise ValueError('Must pass left_on or left_index=True')
if not left_keys and not right_keys:
common = list(left.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
'No common columns to perform merge on. Merge options: '
'left_on=None, right_on=None, left_index=False, right_index=False')
left_keys = common
right_keys = common
if len(left_keys) != len(right_keys): # type: ignore
raise ValueError('len(left_keys) must equal len(right_keys)')
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
left_table = left._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
left_key_columns = [left_table[col] for col in left_keys] # type: ignore
right_key_columns = [right_table[col] for col in right_keys] # type: ignore
join_condition = reduce(lambda x, y: x & y,
[lkey == rkey for lkey, rkey
in zip(left_key_columns, right_key_columns)])
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = (set(left._metadata.data_columns)
& set(right._metadata.data_columns))
left_index_columns = set(left._metadata.index_columns)
right_index_columns = set(right._metadata.index_columns)
# TODO: in some case, we can keep indexes.
exprs = []
for col in left_table.columns:
if col in left_index_columns:
continue
scol = left_table[col]
if col in duplicate_columns:
if col in left_keys and col in right_keys:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
exprs.append(scol)
for col in right_table.columns:
if col in right_index_columns:
continue
scol = right_table[col]
if col in duplicate_columns:
if col in left_keys and col in right_keys:
continue
else:
col = col + right_suffix
scol = scol.alias(col)
exprs.append(scol)
return DataFrame(joined_table.select(*exprs))
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifing the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(sdf, self._metadata.copy())
def astype(self, dtype) -> 'DataFrame':
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
results = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name]))
else:
results.append(col)
else:
for col_name, col in self.iteritems():
results.append(col.astype(dtype=dtype))
sdf = self._sdf.select(
self._metadata.index_columns + list(map(lambda ser: ser._scol, results)))
return DataFrame(sdf, self._metadata.copy())
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
data_columns = self._metadata.data_columns
metadata = self._metadata.copy(data_columns=[prefix + name for name in data_columns])
sdf = self._sdf.select(self._metadata.index_columns +
[self[name]._scol.alias(prefix + name)
for name in self._metadata.data_columns])
return DataFrame(sdf, metadata)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
data_columns = self._metadata.data_columns
metadata = self._metadata.copy(data_columns=[name + suffix for name in data_columns])
sdf = self._sdf.select(self._metadata.index_columns +
[self[name]._scol.alias(name + suffix)
for name in self._metadata.data_columns])
return DataFrame(sdf, metadata)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> 'DataFrame':
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the obersvations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: 0, dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
data_columns = []
for col in self.columns:
kseries = self[col]
spark_type = kseries.spark_type
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(F.nanvl(kseries._scol, F.lit(None)).alias(kseries.name))
data_columns.append(kseries.name)
elif isinstance(spark_type, NumericType):
exprs.append(kseries._scol)
data_columns.append(kseries.name)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._sdf.select(*exprs).summary(stats)
return DataFrame(sdf.replace("stddev", "std", subset='summary'),
index=Metadata(data_columns=data_columns,
index_map=[('summary', None)])).astype('float64')
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
try:
return Series(self._sdf.__getitem__(key), anchor=self,
index=self._metadata.index_map)
except AnalysisException:
raise KeyError(key)
if np.isscalar(key) or isinstance(key, (tuple, str)):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._sdf.filter(bcol), self._metadata.copy())
raise NotImplementedError(key)
def __repr__(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_string = repr(pdf.iloc[:max_display_count])
if pdf_length > max_display_count:
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return repr_string
def _repr_html_(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_html = pdf[:max_display_count]._repr_html_()
if pdf_length > max_display_count:
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return repr_html
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
# For now, we don't support realignment against different dataframes.
# This is too expensive in Spark.
# Are we assigning against a column?
if isinstance(value, Series):
assert value._kdf is self, \
"Cannot combine column argument because it comes from a different dataframe"
if isinstance(key, (tuple, list)):
assert isinstance(value.schema, StructType)
field_names = value.schema.fieldNames()
kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})
else:
kdf = self.assign(**{key: value})
self._sdf = kdf._sdf
self._metadata = kdf._metadata
def __getattr__(self, key: str) -> Any:
from databricks.koalas.series import Series
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_map)
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
| 1 | 9,880 | Line 175 needs to be indented so that it's under `sfun`. | databricks-koalas | py |
@@ -21,12 +21,16 @@ namespace Nethermind.Blockchain.Filters.Topics
{
public class AnyTopic : TopicExpression
{
- public static readonly AnyTopic Instance = new();
+ public static readonly AnyTopic Instance = new();
+
+ private AnyTopic() { }
public override bool Accepts(Keccak topic) => true;
public override bool Accepts(ref KeccakStructRef topic) => true;
public override bool Matches(Bloom bloom) => true;
public override bool Matches(ref BloomStructRef bloom) => true;
+
+ public override string ToString() => "null";
}
} | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using Nethermind.Core;
using Nethermind.Core.Crypto;
namespace Nethermind.Blockchain.Filters.Topics
{
public class AnyTopic : TopicExpression
{
public static readonly AnyTopic Instance = new();
public override bool Accepts(Keccak topic) => true;
public override bool Accepts(ref KeccakStructRef topic) => true;
public override bool Matches(Bloom bloom) => true;
public override bool Matches(ref BloomStructRef bloom) => true;
}
}
| 1 | 26,214 | It looks strange a bit. What is the reason? | NethermindEth-nethermind | .cs |
@@ -9,13 +9,17 @@ import java.nio.file.Path;
import java.nio.file.PathMatcher;
import java.util.Optional;
-import static com.github.javaparser.StaticJavaParser.parse;
+import static com.github.javaparser.StaticJavaParser.parse;import org.apache.log4j.Logger;
+
/**
* A strategy for discovering the structure of a project.
* Implementations could read a pom.xml, a Gradle build file, a makefile...
*/
-public interface CollectionStrategy {
+public interface CollectionStrategy {
+ protected static Logger LOG = Logger.getLogger(CollectionStrategy.class.getName());
+
+
ProjectRoot collect(Path path);
| 1 | package com.github.javaparser.utils;
import com.github.javaparser.ParseProblemException;
import com.github.javaparser.ast.CompilationUnit;
import java.io.FileNotFoundException;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
import java.util.Optional;
import static com.github.javaparser.StaticJavaParser.parse;
/**
* A strategy for discovering the structure of a project.
* Implementations could read a pom.xml, a Gradle build file, a makefile...
*/
public interface CollectionStrategy {
ProjectRoot collect(Path path);
default Optional<Path> getRoot(Path file) throws FileNotFoundException {
try {
return parse(file.toFile()).getStorage()
.map(CompilationUnit.Storage::getSourceRoot);
} catch (ParseProblemException e) {
Log.info("Problem parsing file %s", () -> file);
} catch (RuntimeException e) {
Log.info("Could not parse file %s", () -> file);
}
return Optional.empty();
}
default PathMatcher getPathMatcher(String pattern) {
return FileSystems.getDefault().getPathMatcher(pattern);
}
}
| 1 | 13,482 | Is this one of those `LexicalPreservingPrinter` issues? | javaparser-javaparser | java |
@@ -61,6 +61,12 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ buf.WriteString("<binary>")
+ break
+ }
+
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) | 1 | package awsutil
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
)
// Prettify returns the string representation of a value.
func Prettify(i interface{}) string {
var buf bytes.Buffer
prettify(reflect.ValueOf(i), 0, &buf)
return buf.String()
}
// prettify will recursively walk value v to build a textual
// representation of the value.
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
strtype := v.Type().String()
if strtype == "time.Time" {
fmt.Fprintf(buf, "%s", v.Interface())
break
} else if strings.HasPrefix(strtype, "io.") {
buf.WriteString("<buffer>")
break
}
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
prettify(val, indent+2, buf)
if i < len(names)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
buf.WriteString(id2)
prettify(v.Index(i), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString("," + nl)
}
}
buf.WriteString(nl + id + "]")
case reflect.Map:
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(k.String() + ": ")
prettify(v.MapIndex(k), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
if !v.IsValid() {
fmt.Fprint(buf, "<invalid value>")
return
}
format := "%v"
switch v.Interface().(type) {
case string:
format = "%q"
case io.ReadSeeker, io.Reader:
format = "buffer(%p)"
}
fmt.Fprintf(buf, format, v.Interface())
}
}
| 1 | 8,412 | Can we also add the length of the slice here? Some like `<binary> len %d` | aws-aws-sdk-go | go |
@@ -7,6 +7,8 @@
package block
import (
+ "github.com/iotexproject/iotex-core/pkg/log"
+ "go.uber.org/zap"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/iotex-proto/golang/iotextypes" | 1 | // Copyright (c) 2020 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package block
import (
"google.golang.org/protobuf/proto"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
)
type (
// Store defines block storage schema
Store struct {
Block *Block
Receipts []*action.Receipt
}
)
// Serialize returns the serialized byte stream of Store
func (in *Store) Serialize() ([]byte, error) {
return proto.Marshal(in.ToProto())
}
// ToProto converts to proto message
func (in *Store) ToProto() *iotextypes.BlockStore {
receipts := []*iotextypes.Receipt{}
for _, r := range in.Receipts {
receipts = append(receipts, r.ConvertToReceiptPb())
}
return &iotextypes.BlockStore{
Block: in.Block.ConvertToBlockPb(),
Receipts: receipts,
}
}
// FromProto converts from proto message
func (in *Store) FromProto(pb *iotextypes.BlockStore) error {
in.Block = &Block{}
if err := in.Block.ConvertFromBlockPb(pb.Block); err != nil {
return err
}
// verify merkle root can match after deserialize
if err := in.Block.VerifyTxRoot(in.Block.CalculateTxRoot()); err != nil {
return err
}
in.Receipts = []*action.Receipt{}
for _, receiptPb := range pb.Receipts {
receipt := &action.Receipt{}
receipt.ConvertFromReceiptPb(receiptPb)
in.Receipts = append(in.Receipts, receipt)
}
return nil
}
// Deserialize parses the byte stream into Store
func (in *Store) Deserialize(buf []byte) error {
pbStore := &iotextypes.BlockStore{}
if err := proto.Unmarshal(buf, pbStore); err != nil {
return err
}
return in.FromProto(pbStore)
}
// DeserializeBlockStoresPb decode byte stream into BlockStores pb message
func DeserializeBlockStoresPb(buf []byte) (*iotextypes.BlockStores, error) {
pbStores := &iotextypes.BlockStores{}
if err := proto.Unmarshal(buf, pbStores); err != nil {
return nil, err
}
return pbStores, nil
}
| 1 | 23,495 | move this line together to after line 16, group internal packages together | iotexproject-iotex-core | go |
@@ -0,0 +1,12 @@
+class CreateFeedbacks < ActiveRecord::Migration
+ def change
+ create_table :feedbacks do |t|
+ t.integer :rating
+ t.integer :more_info
+ t.string :uuid
+ t.string :project_name
+
+ t.timestamps null: false
+ end
+ end
+end | 1 | 1 | 8,391 | How about using a reference(project_id) instead of project name | blackducksoftware-ohloh-ui | rb |
|
@@ -133,12 +133,14 @@ var (
Explorer: Explorer{
Enabled: false,
IsTest: false,
+ UseRDS: false,
Port: 14004,
TpsWindow: 10,
MaxTransferPayloadBytes: 1024,
},
Indexer: Indexer{
Enabled: false,
+ NodeAddr: "",
},
System: System{
HeartbeatInterval: 10 * time.Second, | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package config
import (
"flag"
"os"
"time"
"github.com/pkg/errors"
uconfig "go.uber.org/config"
"google.golang.org/grpc/keepalive"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/pkg/keypair"
)
// IMPORTANT: to define a config, add a field or a new config type to the existing config types. In addition, provide
// the default value in Default var.
func init() {
flag.StringVar(&_overwritePath, "config-path", "", "Config path")
flag.StringVar(&_secretPath, "secret-path", "", "Secret path")
flag.StringVar(&_subChainPath, "sub-config-path", "", "Sub chain Config path")
}
var (
// overwritePath is the path to the config file which overwrite default values
_overwritePath string
// secretPath is the path to the config file store secret values
_secretPath string
_subChainPath string
)
const (
// DelegateType represents the delegate node type
DelegateType = "delegate"
// FullNodeType represents the full node type
FullNodeType = "full_node"
// LightweightType represents the lightweight type
LightweightType = "lightweight"
// RollDPoSScheme means randomized delegated proof of stake
RollDPoSScheme = "ROLLDPOS"
// StandaloneScheme means that the node creates a block periodically regardless of others (if there is any)
StandaloneScheme = "STANDALONE"
// NOOPScheme means that the node does not create only block
NOOPScheme = "NOOP"
)
var (
// Default is the default config
Default = Config{
NodeType: FullNodeType,
Network: Network{
Host: "127.0.0.1",
Port: 4689,
MsgLogsCleaningInterval: 2 * time.Second,
MsgLogRetention: 5 * time.Second,
HealthCheckInterval: time.Second,
SilentInterval: 5 * time.Second,
PeerMaintainerInterval: time.Second,
PeerForceDisconnectionRoundInterval: 0,
AllowMultiConnsPerHost: false,
NumPeersLowerBound: 5,
NumPeersUpperBound: 5,
PingInterval: time.Second,
RateLimitEnabled: false,
RateLimitPerSec: 10000,
RateLimitWindowSize: 60 * time.Second,
BootstrapNodes: make([]string, 0),
TLSEnabled: false,
CACrtPath: "",
PeerCrtPath: "",
PeerKeyPath: "",
KLClientParams: keepalive.ClientParameters{},
KLServerParams: keepalive.ServerParameters{},
KLPolicy: keepalive.EnforcementPolicy{},
MaxMsgSize: 10485760,
PeerDiscovery: true,
TopologyPath: "",
TTL: 3,
},
Chain: Chain{
ChainDBPath: "/tmp/chain.db",
TrieDBPath: "/tmp/trie.db",
ID: 1,
ProducerPubKey: keypair.EncodePublicKey(keypair.ZeroPublicKey),
ProducerPrivKey: keypair.EncodePrivateKey(keypair.ZeroPrivateKey),
InMemTest: false,
GenesisActionsPath: "",
NumCandidates: 101,
EnableFallBackToFreshDB: false,
},
ActPool: ActPool{
MaxNumActsPerPool: 32000,
MaxNumActsPerAcct: 2000,
MaxNumActsToPick: 0,
},
Consensus: Consensus{
Scheme: NOOPScheme,
RollDPoS: RollDPoS{
DelegateInterval: 10 * time.Second,
ProposerInterval: 10 * time.Second,
UnmatchedEventTTL: 3 * time.Second,
UnmatchedEventInterval: 100 * time.Millisecond,
RoundStartTTL: 10 * time.Second,
AcceptProposeTTL: time.Second,
AcceptProposalEndorseTTL: time.Second,
AcceptCommitEndorseTTL: time.Second,
Delay: 5 * time.Second,
NumSubEpochs: 1,
EventChanSize: 10000,
NumDelegates: 21,
EnableDummyBlock: true,
TimeBasedRotation: false,
EnableDKG: false,
},
BlockCreationInterval: 10 * time.Second,
},
BlockSync: BlockSync{
Interval: 10 * time.Second,
BufferSize: 16,
},
Dispatcher: Dispatcher{
EventChanSize: 10000,
},
Explorer: Explorer{
Enabled: false,
IsTest: false,
Port: 14004,
TpsWindow: 10,
MaxTransferPayloadBytes: 1024,
},
Indexer: Indexer{
Enabled: false,
},
System: System{
HeartbeatInterval: 10 * time.Second,
HTTPProfilingPort: 0,
HTTPMetricsPort: 8080,
},
DB: DB{
NumRetries: 3,
},
}
// ErrInvalidCfg indicates the invalid config value
ErrInvalidCfg = errors.New("invalid config value")
// Validates is the collection config validation functions
Validates = []Validate{
ValidateKeyPair,
ValidateConsensusScheme,
ValidateRollDPoS,
ValidateDispatcher,
ValidateExplorer,
ValidateNetwork,
ValidateActPool,
ValidateChain,
}
)
// Network is the config struct for network package
type (
Network struct {
Host string `yaml:"host"`
Port int `yaml:"port"`
MsgLogsCleaningInterval time.Duration `yaml:"msgLogsCleaningInterval"`
MsgLogRetention time.Duration `yaml:"msgLogRetention"`
HealthCheckInterval time.Duration `yaml:"healthCheckInterval"`
SilentInterval time.Duration `yaml:"silentInterval"`
PeerMaintainerInterval time.Duration `yaml:"peerMaintainerInterval"`
// Force disconnecting a random peer every given number of peer maintenance round
PeerForceDisconnectionRoundInterval int `yaml:"peerForceDisconnectionRoundInterval"`
AllowMultiConnsPerHost bool `yaml:"allowMultiConnsPerHost"`
NumPeersLowerBound uint `yaml:"numPeersLowerBound"`
NumPeersUpperBound uint `yaml:"numPeersUpperBound"`
PingInterval time.Duration `yaml:"pingInterval"`
RateLimitEnabled bool `yaml:"rateLimitEnabled"`
RateLimitPerSec uint64 `yaml:"rateLimitPerSec"`
RateLimitWindowSize time.Duration `yaml:"rateLimitWindowSize"`
BootstrapNodes []string `yaml:"bootstrapNodes"`
TLSEnabled bool `yaml:"tlsEnabled"`
CACrtPath string `yaml:"caCrtPath"`
PeerCrtPath string `yaml:"peerCrtPath"`
PeerKeyPath string `yaml:"peerKeyPath"`
KLClientParams keepalive.ClientParameters `yaml:"klClientParams"`
KLServerParams keepalive.ServerParameters `yaml:"klServerParams"`
KLPolicy keepalive.EnforcementPolicy `yaml:"klPolicy"`
MaxMsgSize int `yaml:"maxMsgSize"`
PeerDiscovery bool `yaml:"peerDiscovery"`
TopologyPath string `yaml:"topologyPath"`
TTL int32 `yaml:"ttl"`
}
// Chain is the config struct for blockchain package
Chain struct {
ChainDBPath string `yaml:"chainDBPath"`
TrieDBPath string `yaml:"trieDBPath"`
ID uint32 `yaml:"id"`
ProducerPubKey string `yaml:"producerPubKey"`
ProducerPrivKey string `yaml:"producerPrivKey"`
// InMemTest creates in-memory DB file for local testing
InMemTest bool `yaml:"inMemTest"`
GenesisActionsPath string `yaml:"genesisActionsPath"`
NumCandidates uint `yaml:"numCandidates"`
EnableFallBackToFreshDB bool `yaml:"enablefallbacktofreshdb"`
}
// Consensus is the config struct for consensus package
Consensus struct {
// There are three schemes that are supported
Scheme string `yaml:"scheme"`
RollDPoS RollDPoS `yaml:"rollDPoS"`
BlockCreationInterval time.Duration `yaml:"blockCreationInterval"`
}
// BlockSync is the config struct for the BlockSync
BlockSync struct {
Interval time.Duration `yaml:"interval"` // update duration
BufferSize uint64 `yaml:"bufferSize"`
}
// RollDPoS is the config struct for RollDPoS consensus package
RollDPoS struct {
DelegateInterval time.Duration `yaml:"delegateInterval"`
ProposerInterval time.Duration `yaml:"proposerInterval"`
UnmatchedEventTTL time.Duration `yaml:"unmatchedEventTTL"`
UnmatchedEventInterval time.Duration `yaml:"unmatchedEventInterval"`
RoundStartTTL time.Duration `yaml:"roundStartTTL"`
AcceptProposeTTL time.Duration `yaml:"acceptProposeTTL"`
AcceptProposalEndorseTTL time.Duration `yaml:"acceptProposalEndorseTTL"`
AcceptCommitEndorseTTL time.Duration `yaml:"acceptCommitEndorseTTL"`
Delay time.Duration `yaml:"delay"`
NumSubEpochs uint `yaml:"numSubEpochs"`
EventChanSize uint `yaml:"eventChanSize"`
NumDelegates uint `yaml:"numDelegates"`
EnableDummyBlock bool `yaml:"enableDummyBlock"`
TimeBasedRotation bool `yaml:"timeBasedRotation"`
EnableDKG bool `yaml:"enableDKG"`
}
// Dispatcher is the dispatcher config
Dispatcher struct {
EventChanSize uint `yaml:"eventChanSize"`
}
// Explorer is the explorer service config
Explorer struct {
Enabled bool `yaml:"enabled"`
IsTest bool `yaml:"isTest"`
Port int `yaml:"addr"`
TpsWindow int `yaml:"tpsWindow"`
// MaxTransferPayloadBytes limits how many bytes a playload can contain at most
MaxTransferPayloadBytes uint64 `yaml:"maxTransferPayloadBytes"`
}
// Indexer is the index service config
Indexer struct {
Enabled bool `yaml:"enabled"`
}
// System is the system config
System struct {
HeartbeatInterval time.Duration `yaml:"heartbeatInterval"`
// HTTPProfilingPort is the port number to access golang performance profiling data of a blockchain node. It is
// 0 by default, meaning performance profiling has been disabled
HTTPProfilingPort int `yaml:"httpProfilingPort"`
HTTPMetricsPort int `yaml:"httpMetricsPort"`
}
// ActPool is the actpool config
ActPool struct {
// MaxNumActsPerPool indicates maximum number of actions the whole actpool can hold
MaxNumActsPerPool uint64 `yaml:"maxNumActsPerPool"`
// MaxNumActsPerAcct indicates maximum number of actions an account queue can hold
MaxNumActsPerAcct uint64 `yaml:"maxNumActsPerAcct"`
// MaxNumActsToPick indicates maximum number of actions to pick to mint a block. Default is 0, which means no
// limit on the number of actions to pick.
MaxNumActsToPick uint64 `yaml:"maxNumActsToPick"`
}
// DB is the blotDB config
DB struct {
// NumRetries is the number of retries
NumRetries uint8 `yaml:"numRetries"`
// RDS is the config fot rds
RDS RDS `yaml:"RDS"`
}
// RDS is the cloud rds config
RDS struct {
// AwsRDSEndpoint is the endpoint of aws rds
AwsRDSEndpoint string `yaml:"awsRDSEndpoint"`
// AwsRDSPort is the port of aws rds
AwsRDSPort uint64 `yaml:"awsRDSPort"`
// AwsRDSUser is the user to access aws rds
AwsRDSUser string `yaml:"awsRDSUser"`
// AwsPass is the pass to access aws rds
AwsPass string `yaml:"awsPass"`
// AwsDBName is the db name of aws rds
AwsDBName string `yaml:"awsDBName"`
}
// Config is the root config struct, each package's config should be put as its sub struct
Config struct {
NodeType string `yaml:"nodeType"`
Network Network `yaml:"network"`
Chain Chain `yaml:"chain"`
ActPool ActPool `yaml:"actPool"`
Consensus Consensus `yaml:"consensus"`
BlockSync BlockSync `yaml:"blockSync"`
Dispatcher Dispatcher `yaml:"dispatcher"`
Explorer Explorer `yaml:"explorer"`
Indexer Indexer `yaml:"indexer"`
System System `yaml:"system"`
DB DB `yaml:"db"`
}
// Validate is the interface of validating the config
Validate func(*Config) error
)
// New creates a config instance. It first loads the default configs. If the config path is not empty, it will read from
// the file and override the default configs. By default, it will apply all validation functions. To bypass validation,
// use DoNotValidate instead.
func New(validates ...Validate) (*Config, error) {
opts := make([]uconfig.YAMLOption, 0)
opts = append(opts, uconfig.Static(Default))
opts = append(opts, uconfig.Expand(os.LookupEnv))
if _overwritePath != "" {
opts = append(opts, uconfig.File(_overwritePath))
}
if _secretPath != "" {
opts = append(opts, uconfig.File(_secretPath))
}
yaml, err := uconfig.NewYAML(opts...)
if err != nil {
return nil, errors.Wrap(err, "failed to init config")
}
var cfg Config
if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal YAML config to struct")
}
// By default, the config needs to pass all the validation
if len(validates) == 0 {
validates = Validates
}
for _, validate := range validates {
if err := validate(&cfg); err != nil {
return nil, errors.Wrap(err, "failed to validate config")
}
}
return &cfg, nil
}
// NewSub create config for sub chain.
func NewSub(validates ...Validate) (*Config, error) {
if _subChainPath == "" {
return nil, nil
}
opts := make([]uconfig.YAMLOption, 0)
opts = append(opts, uconfig.Static(Default))
opts = append(opts, uconfig.Expand(os.LookupEnv))
opts = append(opts, uconfig.File(_subChainPath))
if _secretPath != "" {
opts = append(opts, uconfig.File(_secretPath))
}
yaml, err := uconfig.NewYAML(opts...)
if err != nil {
return nil, errors.Wrap(err, "failed to init config")
}
var cfg Config
if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal YAML config to struct")
}
// By default, the config needs to pass all the validation
if len(validates) == 0 {
validates = Validates
}
for _, validate := range validates {
if err := validate(&cfg); err != nil {
return nil, errors.Wrap(err, "failed to validate config")
}
}
return &cfg, nil
}
// IsDelegate returns true if the node type is Delegate
func (cfg *Config) IsDelegate() bool {
return cfg.NodeType == DelegateType
}
// IsFullnode returns true if the node type is Fullnode
func (cfg *Config) IsFullnode() bool {
return cfg.NodeType == FullNodeType
}
// IsLightweight returns true if the node type is Lightweight
func (cfg *Config) IsLightweight() bool {
return cfg.NodeType == LightweightType
}
// BlockchainAddress returns the address derived from the configured chain ID and public key
func (cfg *Config) BlockchainAddress() (address.Address, error) {
pk, err := keypair.DecodePublicKey(cfg.Chain.ProducerPubKey)
if err != nil {
return nil, errors.Wrapf(err, "error when decoding public key %s", cfg.Chain.ProducerPubKey)
}
pkHash := keypair.HashPubKey(pk)
return address.New(cfg.Chain.ID, pkHash[:]), nil
}
// KeyPair returns the decoded public and private key pair
func (cfg *Config) KeyPair() (keypair.PublicKey, keypair.PrivateKey, error) {
pk, err := keypair.DecodePublicKey(cfg.Chain.ProducerPubKey)
if err != nil {
return keypair.ZeroPublicKey,
keypair.ZeroPrivateKey,
errors.Wrapf(err, "error when decoding public key %s", cfg.Chain.ProducerPubKey)
}
sk, err := keypair.DecodePrivateKey(cfg.Chain.ProducerPrivKey)
if err != nil {
return keypair.ZeroPublicKey,
keypair.ZeroPrivateKey,
errors.Wrapf(err, "error when decoding private key %s", cfg.Chain.ProducerPrivKey)
}
return pk, sk, nil
}
// ValidateKeyPair validates the block producer address
func ValidateKeyPair(cfg *Config) error {
priKey, err := keypair.DecodePrivateKey(cfg.Chain.ProducerPrivKey)
if err != nil {
return err
}
pubKey, err := keypair.DecodePublicKey(cfg.Chain.ProducerPubKey)
if err != nil {
return err
}
// Validate producer pubkey and prikey by signing a dummy message and verify it
validationMsg := "connecting the physical world block by block"
sig := crypto.EC283.Sign(priKey, []byte(validationMsg))
if !crypto.EC283.Verify(pubKey, []byte(validationMsg), sig) {
return errors.Wrap(ErrInvalidCfg, "block producer has unmatched pubkey and prikey")
}
return nil
}
// ValidateChain validates the chain configure
func ValidateChain(cfg *Config) error {
if cfg.Chain.NumCandidates <= 0 {
return errors.Wrapf(ErrInvalidCfg, "candidate number should be greater than 0")
}
if cfg.Consensus.Scheme == RollDPoSScheme && cfg.Chain.NumCandidates < cfg.Consensus.RollDPoS.NumDelegates {
return errors.Wrapf(ErrInvalidCfg, "candidate number should be greater than or equal to delegate number")
}
return nil
}
// ValidateConsensusScheme validates the if scheme and node type match
func ValidateConsensusScheme(cfg *Config) error {
switch cfg.NodeType {
case DelegateType:
case FullNodeType:
if cfg.Consensus.Scheme != NOOPScheme {
return errors.Wrap(ErrInvalidCfg, "consensus scheme of fullnode should be NOOP")
}
case LightweightType:
if cfg.Consensus.Scheme != NOOPScheme {
return errors.Wrap(ErrInvalidCfg, "consensus scheme of lightweight node should be NOOP")
}
default:
return errors.Wrapf(ErrInvalidCfg, "unknown node type %s", cfg.NodeType)
}
return nil
}
// ValidateDispatcher validates the dispatcher configs
func ValidateDispatcher(cfg *Config) error {
if cfg.Dispatcher.EventChanSize <= 0 {
return errors.Wrap(ErrInvalidCfg, "dispatcher event chan size should be greater than 0")
}
return nil
}
// ValidateRollDPoS validates the roll-DPoS configs
func ValidateRollDPoS(cfg *Config) error {
if cfg.Consensus.Scheme == RollDPoSScheme && cfg.Consensus.RollDPoS.EventChanSize <= 0 {
return errors.Wrap(ErrInvalidCfg, "roll-DPoS event chan size should be greater than 0")
}
if cfg.Consensus.Scheme == RollDPoSScheme && cfg.Consensus.RollDPoS.NumDelegates <= 0 {
return errors.Wrap(ErrInvalidCfg, "roll-DPoS event delegate number should be greater than 0")
}
if cfg.Consensus.Scheme == RollDPoSScheme &&
cfg.Consensus.RollDPoS.EnableDummyBlock &&
cfg.Consensus.RollDPoS.TimeBasedRotation {
return errors.Wrap(ErrInvalidCfg, "roll-DPoS should enable dummy block when doing time based rotation")
}
return nil
}
// ValidateExplorer validates the explorer configs
func ValidateExplorer(cfg *Config) error {
if cfg.Explorer.Enabled && cfg.Explorer.TpsWindow <= 0 {
return errors.Wrap(ErrInvalidCfg, "tps window is not a positive integer when the explorer is enabled")
}
return nil
}
// ValidateNetwork validates the network configs
func ValidateNetwork(cfg *Config) error {
if !cfg.Network.PeerDiscovery && cfg.Network.TopologyPath == "" {
return errors.Wrap(ErrInvalidCfg, "either peer discover should be enabled or a topology should be given")
}
return nil
}
// ValidateActPool validates the given config
func ValidateActPool(cfg *Config) error {
maxNumActPerPool := cfg.ActPool.MaxNumActsPerPool
maxNumActPerAcct := cfg.ActPool.MaxNumActsPerAcct
if maxNumActPerPool <= 0 || maxNumActPerAcct <= 0 {
return errors.Wrap(
ErrInvalidCfg,
"maximum number of actions per pool or per account cannot be zero or negative",
)
}
if maxNumActPerPool < maxNumActPerAcct {
return errors.Wrap(
ErrInvalidCfg,
"maximum number of actions per pool cannot be less than maximum number of actions per account",
)
}
return nil
}
// DoNotValidate validates the given config
func DoNotValidate(cfg *Config) error { return nil }
| 1 | 12,754 | File is not `gofmt`-ed with `-s` | iotexproject-iotex-core | go |
@@ -235,6 +235,10 @@ public class FileDownloadServiceBean implements java.io.Serializable {
dataFile = guestbookResponse.getDataFile();
}
}
+ //For tools to get the dataset and datasetversion ids, we need a full DataFile object (not a findCheapAndEasy() copy)
+ if(dataFile.getFileMetadata()==null) {
+ dataFile=datafileService.find(dataFile.getId());
+ }
ExternalToolHandler externalToolHandler = new ExternalToolHandler(externalTool, dataFile, apiToken);
// Back when we only had TwoRavens, the downloadType was always "Explore". Now we persist the name of the tool (i.e. "TwoRavens", "Data Explorer", etc.)
guestbookResponse.setDownloadtype(externalTool.getDisplayName()); | 1 | package edu.harvard.iq.dataverse;
import edu.harvard.iq.dataverse.authorization.AuthenticationServiceBean;
import edu.harvard.iq.dataverse.authorization.Permission;
import edu.harvard.iq.dataverse.authorization.users.ApiToken;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.authorization.users.User;
import edu.harvard.iq.dataverse.datasetutility.WorldMapPermissionHelper;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import edu.harvard.iq.dataverse.engine.command.impl.CreateGuestbookResponseCommand;
import edu.harvard.iq.dataverse.engine.command.impl.RequestAccessCommand;
import edu.harvard.iq.dataverse.externaltools.ExternalTool;
import edu.harvard.iq.dataverse.externaltools.ExternalToolHandler;
import edu.harvard.iq.dataverse.util.FileUtil;
import java.io.IOException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.logging.Logger;
import javax.ejb.EJB;
import javax.ejb.Stateless;
import javax.faces.context.FacesContext;
import javax.inject.Inject;
import javax.inject.Named;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletResponse;
/**
*
* @author skraffmi
* Handles All File Download processes
* including Guestbook responses
*/
@Stateless
@Named
public class FileDownloadServiceBean implements java.io.Serializable {
@PersistenceContext(unitName = "VDCNet-ejbPU")
private EntityManager em;
@EJB
GuestbookResponseServiceBean guestbookResponseService;
@EJB
DatasetServiceBean datasetService;
@EJB
DatasetVersionServiceBean datasetVersionService;
@EJB
DataFileServiceBean datafileService;
@EJB
PermissionServiceBean permissionService;
@EJB
DataverseServiceBean dataverseService;
@EJB
UserNotificationServiceBean userNotificationService;
@EJB
AuthenticationServiceBean authService;
@Inject
DataverseSession session;
@EJB
EjbDataverseEngine commandEngine;
@Inject
DataverseRequestServiceBean dvRequestService;
@Inject WorldMapPermissionHelper worldMapPermissionHelper;
@Inject FileDownloadHelper fileDownloadHelper;
private static final Logger logger = Logger.getLogger(FileDownloadServiceBean.class.getCanonicalName());
public void writeGuestbookAndStartBatchDownload(GuestbookResponse guestbookResponse){
writeGuestbookAndStartBatchDownload(guestbookResponse, false);
}
public void writeGuestbookAndStartBatchDownload(GuestbookResponse guestbookResponse, Boolean doNotSaveGuestbookRecord){
if (guestbookResponse == null || guestbookResponse.getSelectedFileIds() == null) {
return;
}
// Let's intercept the case where a multiple download method was called,
// with only 1 file on the list. We'll treat it like a single file download
// instead:
String[] fileIds = guestbookResponse.getSelectedFileIds().split(",");
if (fileIds.length == 1) {
Long fileId;
try {
fileId = Long.parseLong(fileIds[0]);
} catch (NumberFormatException nfe) {
logger.warning("A file id passed to the writeGuestbookAndStartBatchDownload method as a string could not be converted back to Long: " + fileIds[0]);
return;
}
// If we need to create a GuestBookResponse record, we have to
// look up the DataFile object for this file:
if (!doNotSaveGuestbookRecord) {
DataFile df = datafileService.findCheapAndEasy(Long.parseLong(fileIds[0]));
guestbookResponse.setDataFile(df);
writeGuestbookResponseRecord(guestbookResponse);
}
redirectToDownloadAPI(guestbookResponse.getFileFormat(), fileId, true);
return;
}
// OK, this is a real batch (multi-file) download.
// Do we need to write GuestbookRecord entries for the files?
if (!doNotSaveGuestbookRecord) {
List<String> list = new ArrayList<>(Arrays.asList(guestbookResponse.getSelectedFileIds().split(",")));
for (String idAsString : list) {
DataFile df = datafileService.findCheapAndEasy(new Long(idAsString));
if (df != null) {
guestbookResponse.setDataFile(df);
writeGuestbookResponseRecord(guestbookResponse);
}
}
}
redirectToBatchDownloadAPI(guestbookResponse.getSelectedFileIds(), "original".equals(guestbookResponse.getFileFormat()));
}
public void writeGuestbookAndStartFileDownload(GuestbookResponse guestbookResponse, FileMetadata fileMetadata, String format) {
if(!fileMetadata.getDatasetVersion().isDraft()){
guestbookResponse = guestbookResponseService.modifyDatafileAndFormat(guestbookResponse, fileMetadata, format);
writeGuestbookResponseRecord(guestbookResponse);
}
// Make sure to set the "do not write Guestbook response" flag to TRUE when calling the Access API:
redirectToDownloadAPI(format, fileMetadata.getDataFile().getId(), true);
logger.fine("issued file download redirect for filemetadata "+fileMetadata.getId()+", datafile "+fileMetadata.getDataFile().getId());
}
public void writeGuestbookAndStartFileDownload(GuestbookResponse guestbookResponse) {
if (guestbookResponse.getDataFile() == null) {
logger.warning("writeGuestbookAndStartFileDownload(GuestbookResponse) called without the DataFile in the GuestbookResponse.");
return;
}
writeGuestbookResponseRecord(guestbookResponse);
redirectToDownloadAPI(guestbookResponse.getFileFormat(), guestbookResponse.getDataFile().getId());
logger.fine("issued file download redirect for datafile "+guestbookResponse.getDataFile().getId());
}
public void writeGuestbookResponseRecord(GuestbookResponse guestbookResponse, FileMetadata fileMetadata, String format) {
if(!fileMetadata.getDatasetVersion().isDraft()){
guestbookResponse = guestbookResponseService.modifyDatafileAndFormat(guestbookResponse, fileMetadata, format);
writeGuestbookResponseRecord(guestbookResponse);
}
}
public void writeGuestbookResponseRecord(GuestbookResponse guestbookResponse) {
try {
CreateGuestbookResponseCommand cmd = new CreateGuestbookResponseCommand(dvRequestService.getDataverseRequest(), guestbookResponse, guestbookResponse.getDataset());
commandEngine.submit(cmd);
} catch (CommandException e) {
//if an error occurs here then download won't happen no need for response recs...
}
}
// The "guestBookRecord(s)AlreadyWritten" parameter in the 2 methods
// below (redirectToBatchDownloadAPI() and redirectToDownloadAPI(), for the
// multiple- and single-file downloads respectively) are passed to the
// Download API, where it is treated as a "SKIP writing the GuestbookResponse
// record for this download on the API side" flag. In other words, we want
// to create and save this record *either* on the UI, or the API side - but
// not both.
// As of now (Aug. 2018) we always set this flag to true when redirecting the
// user to the Access API. That's because we have either just created the
// record ourselves, on the UI side; or we have skipped creating one,
// because this was a draft file and we don't want to count the download.
// But either way, it is NEVER the API side's job to count the download that
// was initiated in the GUI.
// But note that this may change - there may be some future situations where it will
// become necessary again, to pass the job of creating the access record
// to the API.
private void redirectToBatchDownloadAPI(String multiFileString, Boolean guestbookRecordsAlreadyWritten, Boolean downloadOriginal){
String fileDownloadUrl = "/api/access/datafiles/" + multiFileString;
if (guestbookRecordsAlreadyWritten && !downloadOriginal){
fileDownloadUrl += "?gbrecs=true";
} else if (guestbookRecordsAlreadyWritten && downloadOriginal){
fileDownloadUrl += "?gbrecs=true&format=original";
} else if (!guestbookRecordsAlreadyWritten && downloadOriginal){
fileDownloadUrl += "?format=original";
}
try {
FacesContext.getCurrentInstance().getExternalContext().redirect(fileDownloadUrl);
} catch (IOException ex) {
logger.info("Failed to issue a redirect to file download url.");
}
}
private void redirectToDownloadAPI(String downloadType, Long fileId, boolean guestBookRecordAlreadyWritten) {
String fileDownloadUrl = FileUtil.getFileDownloadUrlPath(downloadType, fileId, guestBookRecordAlreadyWritten);
logger.fine("Redirecting to file download url: " + fileDownloadUrl);
try {
FacesContext.getCurrentInstance().getExternalContext().redirect(fileDownloadUrl);
} catch (IOException ex) {
logger.info("Failed to issue a redirect to file download url (" + fileDownloadUrl + "): " + ex);
}
}
private void redirectToDownloadAPI(String downloadType, Long fileId) {
redirectToDownloadAPI(downloadType, fileId, true);
}
private void redirectToBatchDownloadAPI(String multiFileString, Boolean downloadOriginal){
redirectToBatchDownloadAPI(multiFileString, true, downloadOriginal);
}
/**
* Launch an "explore" tool which is a type of ExternalTool such as
* TwoRavens or Data Explorer. This method may be invoked directly from the
* xhtml if no popup is required (no terms of use, no guestbook, etc.).
*/
public void explore(GuestbookResponse guestbookResponse, FileMetadata fmd, ExternalTool externalTool) {
ApiToken apiToken = null;
User user = session.getUser();
if (user instanceof AuthenticatedUser) {
AuthenticatedUser authenticatedUser = (AuthenticatedUser) user;
apiToken = authService.findApiTokenByUser(authenticatedUser);
}
DataFile dataFile = null;
if (fmd != null) {
dataFile = fmd.getDataFile();
} else {
if (guestbookResponse != null) {
dataFile = guestbookResponse.getDataFile();
}
}
ExternalToolHandler externalToolHandler = new ExternalToolHandler(externalTool, dataFile, apiToken);
// Back when we only had TwoRavens, the downloadType was always "Explore". Now we persist the name of the tool (i.e. "TwoRavens", "Data Explorer", etc.)
guestbookResponse.setDownloadtype(externalTool.getDisplayName());
String toolUrl = externalToolHandler.getToolUrlWithQueryParams();
logger.fine("Exploring with " + toolUrl);
try {
FacesContext.getCurrentInstance().getExternalContext().redirect(toolUrl);
} catch (IOException ex) {
logger.info("Problem exploring with " + toolUrl + " - " + ex);
}
// This is the old logic from TwoRavens, null checks and all.
if (guestbookResponse != null && guestbookResponse.isWriteResponse()
&& ((fmd != null && fmd.getDataFile() != null) || guestbookResponse.getDataFile() != null)) {
if (guestbookResponse.getDataFile() == null && fmd != null) {
guestbookResponse.setDataFile(fmd.getDataFile());
}
if (fmd == null || !fmd.getDatasetVersion().isDraft()) {
writeGuestbookResponseRecord(guestbookResponse);
}
}
}
public String startWorldMapDownloadLink(GuestbookResponse guestbookResponse, FileMetadata fmd){
if (guestbookResponse != null && guestbookResponse.isWriteResponse() && ((fmd != null && fmd.getDataFile() != null) || guestbookResponse.getDataFile() != null)){
if(guestbookResponse.getDataFile() == null && fmd != null){
guestbookResponse.setDataFile(fmd.getDataFile());
}
if (fmd == null || !fmd.getDatasetVersion().isDraft()){
writeGuestbookResponseRecord(guestbookResponse);
}
}
DataFile file = null;
if (fmd != null){
file = fmd.getDataFile();
}
if (guestbookResponse != null && guestbookResponse.getDataFile() != null && file == null){
file = guestbookResponse.getDataFile();
}
String retVal = worldMapPermissionHelper.getMapLayerMetadata(file).getLayerLink();
try {
FacesContext.getCurrentInstance().getExternalContext().redirect(retVal);
return retVal;
} catch (IOException ex) {
logger.info("Failed to issue a redirect to file download url.");
}
return retVal;
}
public Boolean canSeeTwoRavensExploreButton(){
return false;
}
public Boolean canUserSeeExploreWorldMapButton(){
return false;
}
public void downloadDatasetCitationXML(Dataset dataset) {
downloadCitationXML(null, dataset, false);
}
public void downloadDatafileCitationXML(FileMetadata fileMetadata) {
downloadCitationXML(fileMetadata, null, false);
}
public void downloadDirectDatafileCitationXML(FileMetadata fileMetadata) {
downloadCitationXML(fileMetadata, null, true);
}
public void downloadCitationXML(FileMetadata fileMetadata, Dataset dataset, boolean direct) {
DataCitation citation=null;
if (dataset != null){
citation = new DataCitation(dataset.getLatestVersion());
} else {
citation= new DataCitation(fileMetadata, direct);
}
FacesContext ctx = FacesContext.getCurrentInstance();
HttpServletResponse response = (HttpServletResponse) ctx.getExternalContext().getResponse();
response.setContentType("text/xml");
String fileNameString;
if (fileMetadata == null || fileMetadata.getLabel() == null) {
// Dataset-level citation:
fileNameString = "attachment;filename=" + getFileNameDOI(citation.getPersistentId()) + ".xml";
} else {
// Datafile-level citation:
fileNameString = "attachment;filename=" + getFileNameDOI(citation.getPersistentId()) + "-" + FileUtil.getCiteDataFileFilename(citation.getFileTitle(), FileUtil.FileCitationExtension.ENDNOTE);
}
response.setHeader("Content-Disposition", fileNameString);
try {
ServletOutputStream out = response.getOutputStream();
citation.writeAsEndNoteCitation(out);
out.flush();
ctx.responseComplete();
} catch (IOException e) {
}
}
public void downloadDatasetCitationRIS(Dataset dataset) {
downloadCitationRIS(null, dataset, false);
}
public void downloadDatafileCitationRIS(FileMetadata fileMetadata) {
downloadCitationRIS(fileMetadata, null, false);
}
public void downloadDirectDatafileCitationRIS(FileMetadata fileMetadata) {
downloadCitationRIS(fileMetadata, null, true);
}
public void downloadCitationRIS(FileMetadata fileMetadata, Dataset dataset, boolean direct) {
DataCitation citation=null;
if (dataset != null){
citation = new DataCitation(dataset.getLatestVersion());
} else {
citation= new DataCitation(fileMetadata, direct);
}
FacesContext ctx = FacesContext.getCurrentInstance();
HttpServletResponse response = (HttpServletResponse) ctx.getExternalContext().getResponse();
response.setContentType("application/download");
String fileNameString;
if (fileMetadata == null || fileMetadata.getLabel() == null) {
// Dataset-level citation:
fileNameString = "attachment;filename=" + getFileNameDOI(citation.getPersistentId()) + ".ris";
} else {
// Datafile-level citation:
fileNameString = "attachment;filename=" + getFileNameDOI(citation.getPersistentId()) + "-" + FileUtil.getCiteDataFileFilename(citation.getFileTitle(), FileUtil.FileCitationExtension.RIS);
}
response.setHeader("Content-Disposition", fileNameString);
try {
ServletOutputStream out = response.getOutputStream();
citation.writeAsRISCitation(out);
out.flush();
ctx.responseComplete();
} catch (IOException e) {
}
}
private String getFileNameDOI(GlobalId id) {
return "DOI:" + id.getAuthority() + "_" + id.getIdentifier();
}
public void downloadDatasetCitationBibtex(Dataset dataset) {
downloadCitationBibtex(null, dataset, false);
}
public void downloadDatafileCitationBibtex(FileMetadata fileMetadata) {
downloadCitationBibtex(fileMetadata, null, false);
}
public void downloadDirectDatafileCitationBibtex(FileMetadata fileMetadata) {
downloadCitationBibtex(fileMetadata, null, true);
}
public void downloadCitationBibtex(FileMetadata fileMetadata, Dataset dataset, boolean direct) {
DataCitation citation=null;
if (dataset != null){
citation = new DataCitation(dataset.getLatestVersion());
} else {
citation= new DataCitation(fileMetadata, direct);
}
//SEK 12/3/2018 changing this to open the json in a new tab.
FacesContext ctx = FacesContext.getCurrentInstance();
HttpServletResponse response = (HttpServletResponse) ctx.getExternalContext().getResponse();
response.setContentType("application/json");
String fileNameString;
if (fileMetadata == null || fileMetadata.getLabel() == null) {
// Dataset-level citation:
fileNameString = "inline;filename=" + getFileNameDOI(citation.getPersistentId()) + ".bib";
} else {
// Datafile-level citation:
fileNameString = "inline;filename=" + getFileNameDOI(citation.getPersistentId()) + "-" + FileUtil.getCiteDataFileFilename(citation.getFileTitle(), FileUtil.FileCitationExtension.BIBTEX);
}
response.setHeader("Content-Disposition", fileNameString);
try {
ServletOutputStream out = response.getOutputStream();
citation.writeAsBibtexCitation(out);
out.flush();
ctx.responseComplete();
} catch (IOException e) {
}
}
public boolean requestAccess(Long fileId) {
if (dvRequestService.getDataverseRequest().getAuthenticatedUser() == null){
return false;
}
DataFile file = datafileService.find(fileId);
if (!file.getFileAccessRequesters().contains((AuthenticatedUser)session.getUser())) {
try {
commandEngine.submit(new RequestAccessCommand(dvRequestService.getDataverseRequest(), file));
return true;
} catch (CommandException ex) {
logger.info("Unable to request access for file id " + fileId + ". Exception: " + ex);
}
}
return false;
}
public void sendRequestFileAccessNotification(Dataset dataset, Long fileId, AuthenticatedUser requestor) {
permissionService.getUsersWithPermissionOn(Permission.ManageDatasetPermissions, dataset).stream().forEach((au) -> {
userNotificationService.sendNotification(au, new Timestamp(new Date().getTime()), UserNotification.Type.REQUESTFILEACCESS, fileId, null, requestor);
});
}
} | 1 | 39,240 | @qqmyers this looks fine but have you seen any performance issue here? If so, we could right some helper method like doesExternalToolNeedDataset(externalTool). But if there's trivial performance impact, not worth it. | IQSS-dataverse | java |
@@ -37,6 +37,8 @@
#define STATICBOXVERTICALSPACER 10
#define DAYOFWEEKBORDERSIZE 10
+bool usingLocalPrefs;
+
///////////////////////////////////////////////////////////////////////////
// NOTE: On MS Windows with wxWidgets 3.0, controls inside a wxStaticBox | 1 | // This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2015 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
// This code was initially generated with wxFormBuilder (version Oct 13 2006)
// http://www.wxformbuilder.org/
#include "stdwx.h"
#include "Events.h"
#include "diagnostics.h"
#include "util.h"
#include "mfile.h"
#include "miofile.h"
#include "parse.h"
#include "LogBOINC.h"
#include "BOINCGUIApp.h"
#include "MainDocument.h"
#include "SkinManager.h"
#include "DlgAdvPreferencesBase.h"
#define STATICBOXBORDERSIZE 8
#define STATICBOXVERTICALSPACER 10
#define DAYOFWEEKBORDERSIZE 10
///////////////////////////////////////////////////////////////////////////
// NOTE: On MS Windows with wxWidgets 3.0, controls inside a wxStaticBox
// don't refresh properly unless they are children of the wxStaticBox!
//
CDlgAdvPreferencesBase::CDlgAdvPreferencesBase( wxWindow* parent, int id, wxString title, wxPoint pos, wxSize size, int style ) :
wxDialog( parent, id, title, pos, size, style )
{
wxString strCaption = title;
if (strCaption.IsEmpty()) {
CSkinAdvanced* pSkinAdvanced = wxGetApp().GetSkinManager()->GetAdvanced();
wxASSERT(pSkinAdvanced);
wxASSERT(wxDynamicCast(pSkinAdvanced, CSkinAdvanced));
strCaption.Printf(_("%s - Computing preferences"), pSkinAdvanced->GetApplicationShortName().c_str());
}
this->SetExtraStyle( this->GetExtraStyle() | wxWS_EX_VALIDATE_RECURSIVELY );
this->Centre( wxBOTH );
this->SetTitle(strCaption);
wxBoxSizer* dialogSizer = new wxBoxSizer( wxVERTICAL );
bool usingLocalPrefs = doesLocalPrefsFileExist();
if (web_prefs_url->IsEmpty()) {
m_bmpWarning = NULL;
} else {
wxStaticBox* topControlsStaticBox = new wxStaticBox( this, -1, wxEmptyString );
wxStaticBoxSizer* topControlsSizer = new wxStaticBoxSizer( topControlsStaticBox, wxHORIZONTAL );
m_bmpWarning = new wxStaticBitmap( topControlsStaticBox, ID_DEFAULT, wxNullBitmap, wxDefaultPosition, wxDefaultSize, 0 );
m_bmpWarning->SetMinSize( wxSize( 48,48 ) );
topControlsSizer->Add( m_bmpWarning, 0, wxALIGN_CENTER_VERTICAL|wxALL, 0 );
wxBoxSizer* legendSizer = new wxBoxSizer( wxVERTICAL );
if (usingLocalPrefs) {
legendSizer->Add(
new wxStaticText( topControlsStaticBox, ID_DEFAULT,
_("Using local preferences.\n"
"Click \"Use web prefs\" to use web-based preferences from"
), wxDefaultPosition, wxDefaultSize, 0 ),
0, wxALL, 1
);
} else {
legendSizer->Add(
new wxStaticText( topControlsStaticBox, ID_DEFAULT,
_("Using web-based preferences from"),
wxDefaultPosition, wxDefaultSize, 0 ),
0, wxALL, 1
);
}
legendSizer->Add(
new wxHyperlinkCtrl(
topControlsStaticBox, wxID_ANY, *web_prefs_url, *web_prefs_url,
wxDefaultPosition, wxDefaultSize, wxHL_DEFAULT_STYLE
),
0, wxLEFT, 5
);
if (!usingLocalPrefs) {
legendSizer->Add(
new wxStaticText( topControlsStaticBox, ID_DEFAULT,
_("Set values and click Save to use local preferences instead."),
wxDefaultPosition, wxDefaultSize, 0 ),
0, wxALL, 1
);
}
topControlsSizer->Add( legendSizer, 1, wxALL, 1 );
m_btnClear = new wxButton( topControlsStaticBox, ID_BTN_CLEAR, _("Use web prefs"), wxDefaultPosition, wxDefaultSize, 0 );
m_btnClear->SetToolTip( _("Restore web-based preferences and close the dialog.") );
if (!usingLocalPrefs) {
m_btnClear->Hide();
}
topControlsSizer->Add( m_btnClear, 0, wxALIGN_BOTTOM|wxALL, 4 );
#ifdef __WXMAC__
dialogSizer->Add( topControlsSizer, 0, wxTOP|wxLEFT|wxRIGHT|wxEXPAND, 10 );
#else
dialogSizer->Add( topControlsSizer, 0, wxALL|wxEXPAND, 5 );
#endif
}
m_panelControls = new wxPanel( this, ID_DEFAULT, wxDefaultPosition, wxDefaultSize, wxTAB_TRAVERSAL );
m_panelControls->SetExtraStyle( wxWS_EX_VALIDATE_RECURSIVELY );
wxBoxSizer* notebookSizer = new wxBoxSizer( wxVERTICAL );
m_Notebook = new wxNotebook( m_panelControls, ID_DEFAULT, wxDefaultPosition, wxDefaultSize, wxNB_FLAT|wxNB_TOP );
m_Notebook->SetExtraStyle( wxWS_EX_VALIDATE_RECURSIVELY );
// Note: we must set the third AddPage argument ("select") to
// true for each page or ToolTips won't initialize properly.
m_panelProcessor = createProcessorTab(m_Notebook);
m_Notebook->AddPage( m_panelProcessor, _("Computing"), true );
m_panelNetwork = createNetworkTab(m_Notebook);
m_Notebook->AddPage( m_panelNetwork, _("Network"), true );
m_panelDiskAndMemory = createDiskAndMemoryTab(m_Notebook);
m_Notebook->AddPage( m_panelDiskAndMemory, _("Disk and memory"), true );
m_panelDailySchedules = createDailySchedulesTab(m_Notebook);
m_Notebook->AddPage( m_panelDailySchedules, _("Daily schedules"), true );
notebookSizer->Add( m_Notebook, 1, wxEXPAND | wxALL, 1 );
m_panelControls->SetSizer( notebookSizer );
m_panelControls->Layout();
notebookSizer->Fit( m_panelControls );
dialogSizer->Add( m_panelControls, 1, wxALL|wxEXPAND, 5 );
m_panelButtons = new wxPanel( this, ID_DEFAULT, wxDefaultPosition, wxDefaultSize, wxTAB_TRAVERSAL );
wxBoxSizer* buttonSizer = new wxBoxSizer( wxHORIZONTAL );
m_btnOK = new wxButton( m_panelButtons, wxID_OK, _("Save"), wxDefaultPosition, wxDefaultSize, 0 );
m_btnOK->SetToolTip( _("Save all values and close the dialog.") );
buttonSizer->Add( m_btnOK, 0, wxALL, 5 );
m_btnCancel = new wxButton( m_panelButtons, wxID_CANCEL, _("Cancel"), wxDefaultPosition, wxDefaultSize, 0 );
m_btnCancel->SetToolTip( _("Close the dialog without saving.") );
m_btnCancel->SetDefault();
buttonSizer->Add( m_btnCancel, 0, wxALL, 5 );
m_btnHelp = new wxButton( m_panelButtons, ID_HELPBOINC, _("Help"), wxDefaultPosition, wxDefaultSize, 0 );
m_btnHelp->SetToolTip( _("Shows the preferences web page.") );
buttonSizer->Add( m_btnHelp, 0, wxALL, 5 );
m_panelButtons->SetSizer( buttonSizer );
m_panelButtons->Layout();
buttonSizer->Fit( m_panelButtons );
dialogSizer->Add( m_panelButtons, 0, wxALIGN_BOTTOM|wxALIGN_CENTER_HORIZONTAL|wxALL, 1 );
dialogSizer->Fit( this );
this->SetSizer( dialogSizer );
}
void CDlgAdvPreferencesBase::addNewRowToSizer(
wxSizer* toSizer, wxString& toolTipText,
wxWindow* first, wxWindow* second, wxWindow* third,
wxWindow* fourth, wxWindow* fifth)
{
wxBoxSizer* rowSizer = new wxBoxSizer( wxHORIZONTAL );
#ifdef __WXMSW__
// MSW adds space to the right of checkbox label
if (first->IsKindOf(CLASSINFO(wxCheckBox))) {
rowSizer->Add(first, 0, wxTOP | wxBOTTOM |wxLEFT, 5 );
} else
#endif
rowSizer->Add(first, 0, wxALL, 5 );
first->SetToolTip(toolTipText);
rowSizer->Add(second, 0, wxALL, 2 );
second->SetToolTip(toolTipText);
rowSizer->Add(third, 0, wxALL, 5 );
third->SetToolTip(toolTipText);
if (fourth) {
rowSizer->Add(fourth, 0, wxALL, 2 );
fourth->SetToolTip(toolTipText);
}
if (fifth) {
rowSizer->Add(fifth, 0, wxALL, 5 );
fifth->SetToolTip(toolTipText);
}
toSizer->Add( rowSizer, 0, 0, 1 );
}
wxPanel* CDlgAdvPreferencesBase::createProcessorTab(wxNotebook* notebook)
{
CSkinAdvanced* pSkinAdvanced = wxGetApp().GetSkinManager()->GetAdvanced();
wxASSERT(pSkinAdvanced);
wxSize textCtrlSize = getTextCtrlSize(wxT("999.99"));
wxPanel* processorTab = new wxPanel( notebook, ID_TABPAGE_PROC, wxDefaultPosition, wxDefaultSize, wxTAB_TRAVERSAL );
processorTab->SetExtraStyle( wxWS_EX_VALIDATE_RECURSIVELY );
wxBoxSizer* processorTabSizer = new wxBoxSizer( wxVERTICAL );
wxStaticBox* usageLimitsStaticBox = new wxStaticBox(processorTab, -1, _("Usage limits") );
wxStaticBoxSizer* usageLimitsBoxSizer = new wxStaticBoxSizer(usageLimitsStaticBox, wxVERTICAL);
makeStaticBoxLabelItalic(usageLimitsStaticBox);
/*xgettext:no-c-format*/
wxString MaxCPUPctTT(_("Keep some CPUs free for other applications. Example: 75% means use 6 cores on an 8-core CPU."));
wxStaticText* staticText20 = new wxStaticText(
usageLimitsStaticBox, ID_DEFAULT, _("Use at most"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtProcUseProcessors = new wxTextCtrl( usageLimitsStaticBox, ID_TXTPROCUSEPROCESSORS, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
/*xgettext:no-c-format*/
wxStaticText* staticText21 = new wxStaticText( usageLimitsStaticBox, ID_DEFAULT, _("% of the CPUs"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(usageLimitsBoxSizer, MaxCPUPctTT, staticText20, m_txtProcUseProcessors, staticText21);
/*xgettext:no-c-format*/
wxString MaxCPUTimeTT(_("Suspend/resume computing every few seconds to reduce CPU temperature and energy usage. Example: 75% means compute for 3 seconds, wait for 1 second, and repeat."));
wxStaticText* staticText22 = new wxStaticText(
usageLimitsStaticBox, ID_DEFAULT, _("Use at most"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtProcUseCPUTime = new wxTextCtrl( usageLimitsStaticBox, ID_TXTPOCUSECPUTIME, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
/*xgettext:no-c-format*/
wxStaticText* staticText23 = new wxStaticText( usageLimitsStaticBox, ID_DEFAULT, _("% of CPU time"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(usageLimitsBoxSizer, MaxCPUTimeTT, staticText22, m_txtProcUseCPUTime, staticText23);
processorTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
processorTabSizer->Add( usageLimitsBoxSizer, 0, wxLEFT | wxRIGHT | wxEXPAND, STATICBOXBORDERSIZE );
wxStaticBox* suspendComputingStaticBox = new wxStaticBox(processorTab, -1, _("When to suspend") );
wxStaticBoxSizer* suspendComputingBoxSizer = new wxStaticBoxSizer(suspendComputingStaticBox, wxVERTICAL);
makeStaticBoxLabelItalic(suspendComputingStaticBox);
m_chkProcOnBatteries = new wxCheckBox(
suspendComputingStaticBox, ID_CHKPROCONBATTERIES,
_("Suspend when computer is on battery"), wxDefaultPosition, wxDefaultSize, 0
);
m_chkProcOnBatteries->SetToolTip(
_("Check this to suspend computing on portables when running on battery power.")
);
suspendComputingBoxSizer->Add( m_chkProcOnBatteries, 0, wxALL, 5 );
m_chkProcInUse = new wxCheckBox(
suspendComputingStaticBox, ID_CHKPROCINUSE,
_("Suspend when computer is in use"), wxDefaultPosition, wxDefaultSize, 0
);
m_chkProcInUse->SetToolTip(
_("Check this to suspend computing and file transfers when you're using the computer.")
);
suspendComputingBoxSizer->Add( m_chkProcInUse, 0, wxALL, 5 );
m_chkGPUProcInUse = new wxCheckBox(
suspendComputingStaticBox, ID_CHKGPUPROCINUSE,
_("Suspend GPU computing when computer is in use"), wxDefaultPosition, wxDefaultSize, 0
);
m_chkGPUProcInUse->SetToolTip(
_("Check this to suspend GPU computing when you're using the computer.")
);
suspendComputingBoxSizer->Add( m_chkGPUProcInUse, 0, wxALL, 5 );
// min idle time
wxString ProcIdleForTT(_("This determines when the computer is considered 'in use'."));
wxStaticText* staticText24 = new wxStaticText(
suspendComputingStaticBox, ID_DEFAULT,
// context: 'In use' means mouse/keyboard input in last ___ minutes
_("'In use' means mouse/keyboard input in last"),
wxDefaultPosition, wxDefaultSize, 0
);
m_txtProcIdleFor = new wxTextCtrl(
suspendComputingStaticBox, ID_TXTPROCIDLEFOR, wxEmptyString, wxDefaultPosition, getTextCtrlSize(wxT("999.99")), wxTE_RIGHT
);
wxStaticText* staticText25 = new wxStaticText(
suspendComputingStaticBox, ID_DEFAULT,
// context: 'In use' means mouse/keyboard input in last ___ minutes
_("minutes"),
wxDefaultPosition, wxDefaultSize, 0
);
addNewRowToSizer(suspendComputingBoxSizer, ProcIdleForTT, staticText24, m_txtProcIdleFor, staticText25);
// max CPU load
wxString MaxLoadCheckBoxText = wxEmptyString;
MaxLoadCheckBoxText.Printf(_("Suspend when non-BOINC CPU usage is above"));
wxString MaxLoadTT(_("Suspend computing when your computer is busy running other programs."));
m_chkMaxLoad = new wxCheckBox(
suspendComputingStaticBox, ID_CHKMAXLOAD, MaxLoadCheckBoxText, wxDefaultPosition, wxDefaultSize, 0);
m_txtMaxLoad = new wxTextCtrl(
suspendComputingStaticBox, ID_TXTMAXLOAD, wxEmptyString, wxDefaultPosition, getTextCtrlSize(wxT("100.00")), wxTE_RIGHT
);
wxStaticText* staticText26 = new wxStaticText( suspendComputingStaticBox, ID_DEFAULT, wxT("%"),
wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(suspendComputingBoxSizer, MaxLoadTT, m_chkMaxLoad, m_txtMaxLoad, staticText26);
suspendComputingBoxSizer->Add(
new wxStaticText( suspendComputingStaticBox, ID_DEFAULT, _("To suspend by time of day, see the \"Daily Schedules\" section."), wxDefaultPosition, wxDefaultSize, 0),
0, wxALL, 5
);
processorTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
processorTabSizer->Add( suspendComputingBoxSizer, 0, wxLEFT | wxRIGHT | wxEXPAND, STATICBOXBORDERSIZE );
wxStaticBox* miscProcStaticBox = new wxStaticBox(
processorTab, -1,
// Context: heading for a group of miscellaneous preferences
_("Other")
);
wxStaticBoxSizer* miscProcBoxSizer = new wxStaticBoxSizer( miscProcStaticBox, wxVERTICAL );
makeStaticBoxLabelItalic(miscProcStaticBox);
// buffer sizes
wxString NetConnectIntervalTT(_("Store at least enough tasks to keep the computer busy for this long."));
wxStaticText* staticText30 = new wxStaticText(
miscProcStaticBox, ID_DEFAULT,
// context: Store at least ___ days of work
_("Store at least"),
wxDefaultPosition, wxDefaultSize, 0
);
m_txtNetConnectInterval = new wxTextCtrl(
miscProcStaticBox, ID_TXTNETCONNECTINTERVAL, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT
);
wxStaticText* staticText31 = new wxStaticText(
miscProcStaticBox, ID_DEFAULT,
// context: Store at least ___ days of work
_("days of work"),
wxDefaultPosition, wxDefaultSize, 0
);
addNewRowToSizer(miscProcBoxSizer, NetConnectIntervalTT, staticText30, m_txtNetConnectInterval, staticText31);
wxString NetAdditionalDaysTT(_("Store additional tasks above the minimum level. Determines how much work is requested when contacting a project."));
wxStaticText* staticText331 = new wxStaticText(
miscProcStaticBox, ID_DEFAULT,
// context: Store up to an additional ___ days of work
_("Store up to an additional"),
wxDefaultPosition, wxDefaultSize, 0
);
staticText331->SetToolTip(NetAdditionalDaysTT);
m_txtNetAdditionalDays = new wxTextCtrl(
miscProcStaticBox, ID_TXTNETADDITIONALDAYS, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT
);
wxStaticText* staticText341 = new wxStaticText(
miscProcStaticBox, ID_DEFAULT,
// context: Store up to an additional ___ days of work
_("days of work"),
wxDefaultPosition, wxDefaultSize, 0
);
addNewRowToSizer(miscProcBoxSizer, NetAdditionalDaysTT, staticText331, m_txtNetAdditionalDays, staticText341);
wxString ProcSwitchEveryTT = wxEmptyString;
ProcSwitchEveryTT.Printf(_("If you run several projects, %s may switch between them this often."), pSkinAdvanced->GetApplicationShortName().c_str());
wxStaticText* staticText18 = new wxStaticText(
miscProcStaticBox, ID_DEFAULT,
// context: Switch between tasks every ___ minutes
_("Switch between tasks every"),
wxDefaultPosition, wxDefaultSize, 0
);
m_txtProcSwitchEvery = new wxTextCtrl( miscProcStaticBox, ID_TXTPROCSWITCHEVERY, wxEmptyString, wxDefaultPosition, getTextCtrlSize(wxT("9999.99")), wxTE_RIGHT );
wxStaticText* staticText19 = new wxStaticText(
miscProcStaticBox, ID_DEFAULT,
// context: Switch between tasks every ___ minutes
_("minutes"),
wxDefaultPosition, wxDefaultSize, 0
);
addNewRowToSizer(miscProcBoxSizer, ProcSwitchEveryTT, staticText18, m_txtProcSwitchEvery, staticText19);
wxString DiskWriteToDiskTT(_("This controls how often tasks save their state to disk, so that they later can be continued from that point."));
wxStaticText* staticText46 = new wxStaticText(
miscProcStaticBox, ID_DEFAULT,
// context: Request tasks to checkpoint at most every ___ seconds
_("Request tasks to checkpoint at most every"),
wxDefaultPosition, wxDefaultSize, 0
);
m_txtDiskWriteToDisk = new wxTextCtrl( miscProcStaticBox, ID_TXTDISKWRITETODISK, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
wxStaticText* staticText47 = new wxStaticText(
miscProcStaticBox, ID_DEFAULT,
// context: Request tasks to checkpoint at most every ___ seconds
_("seconds"),
wxDefaultPosition, wxDefaultSize, 0
);
addNewRowToSizer(miscProcBoxSizer, DiskWriteToDiskTT, staticText46, m_txtDiskWriteToDisk, staticText47);
miscProcBoxSizer->AddSpacer(1); // Ensure staticText22 is fully visible on Mac
processorTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
processorTabSizer->Add( miscProcBoxSizer, 0, wxLEFT | wxRIGHT | wxEXPAND, STATICBOXBORDERSIZE );
processorTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
processorTab->SetSizer( processorTabSizer );
processorTab->Layout();
processorTabSizer->Fit( processorTab );
return processorTab;
}
wxPanel* CDlgAdvPreferencesBase::createNetworkTab(wxNotebook* notebook)
{
CSkinAdvanced* pSkinAdvanced = wxGetApp().GetSkinManager()->GetAdvanced();
wxASSERT(pSkinAdvanced);
wxSize textCtrlSize = getTextCtrlSize(wxT("9999.99"));
wxPanel* networkTab = new wxPanel( notebook, ID_TABPAGE_NET, wxDefaultPosition, wxDefaultSize, wxTAB_TRAVERSAL );
networkTab->SetExtraStyle( wxWS_EX_VALIDATE_RECURSIVELY );
wxBoxSizer* networkTabSizer = new wxBoxSizer( wxVERTICAL );
wxStaticBox* networkUsageLimitsStaticBox = new wxStaticBox( networkTab, -1, _("Usage limits") );
wxStaticBoxSizer* networkUsageLimitsBoxSizer = new wxStaticBoxSizer( networkUsageLimitsStaticBox, wxVERTICAL );
makeStaticBoxLabelItalic(networkUsageLimitsStaticBox);
// upload/download rates
wxString NetDownloadRateTT(_("Limit the download rate of file transfers."));
m_chkNetDownloadRate = new wxCheckBox( networkUsageLimitsStaticBox, ID_CHKNETDOWNLOADRATE, _("Limit download rate to"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtNetDownloadRate = new wxTextCtrl( networkUsageLimitsStaticBox, ID_TXTNETDOWNLOADRATE, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
wxStaticText* staticText33 = new wxStaticText( networkUsageLimitsStaticBox, ID_DEFAULT, _("KB/second"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(networkUsageLimitsBoxSizer, NetDownloadRateTT, m_chkNetDownloadRate, m_txtNetDownloadRate, staticText33);
wxString NetUploadRateTT(_("Limit the upload rate of file transfers."));
m_chkNetUploadRate = new wxCheckBox( networkUsageLimitsStaticBox, ID_CHKNETUPLOADRATE, _("Limit upload rate to"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtNetUploadRate = new wxTextCtrl( networkUsageLimitsStaticBox, ID_TXTNETUPLOADRATE, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
wxStaticText* staticText35 = new wxStaticText( networkUsageLimitsStaticBox, ID_DEFAULT, _("KB/second"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(networkUsageLimitsBoxSizer, NetUploadRateTT, m_chkNetUploadRate, m_txtNetUploadRate, staticText35);
// long-term quota
wxString daily_xfer_limitTT = wxEmptyString;
daily_xfer_limitTT.Printf(_("Example: %s should transfer at most 2000 MB of data every 30 days."), pSkinAdvanced->GetApplicationShortName().c_str());
m_chk_daily_xfer_limit = new wxCheckBox( networkUsageLimitsStaticBox, ID_CHKDAILYXFERLIMIT, _("Limit usage to"), wxDefaultPosition, wxDefaultSize, 0 );
m_txt_daily_xfer_limit_mb = new wxTextCtrl( networkUsageLimitsStaticBox, ID_TXTNETDOWNLOADRATE, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
wxStaticText* staticText_daily_xfer2 = new wxStaticText( networkUsageLimitsStaticBox, ID_DEFAULT, _("MB every"), wxDefaultPosition, wxDefaultSize, 0 );
m_txt_daily_xfer_period_days = new wxTextCtrl( networkUsageLimitsStaticBox, ID_TXTNETUPLOADRATE, wxEmptyString, wxDefaultPosition, getTextCtrlSize(wxT("999.99")), wxTE_RIGHT );
wxStaticText* staticText_daily_xfer4 = new wxStaticText( networkUsageLimitsStaticBox, ID_DEFAULT, _("days"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(networkUsageLimitsBoxSizer, daily_xfer_limitTT, m_chk_daily_xfer_limit, m_txt_daily_xfer_limit_mb, staticText_daily_xfer2, m_txt_daily_xfer_period_days, staticText_daily_xfer4);
networkUsageLimitsBoxSizer->Add(
new wxStaticText( networkUsageLimitsStaticBox, ID_DEFAULT, _("To limit transfers by time of day, see the \"Daily Schedules\" section."), wxDefaultPosition, wxDefaultSize, 0),
0, wxALL, 5
);
networkTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
networkTabSizer->Add( networkUsageLimitsBoxSizer, 0, wxLEFT | wxRIGHT | wxEXPAND, STATICBOXBORDERSIZE );
// Context: heading for a group of miscellaneous preferences
wxStaticBox* connectOptionsStaticBox = new wxStaticBox(
networkTab, -1,
// Context: heading for a group of miscellaneous preferences
_("Other")
);
wxStaticBoxSizer* connectOptionsSizer = new wxStaticBoxSizer( connectOptionsStaticBox, wxVERTICAL );
makeStaticBoxLabelItalic(connectOptionsStaticBox);
wxString NetSkipImageVerificationTT = wxEmptyString;
NetSkipImageVerificationTT.Printf(_("Check this only if your Internet provider modifies image files. Skipping verification reduces the security of %s."), pSkinAdvanced->GetApplicationShortName().c_str());
m_chkNetSkipImageVerification = new wxCheckBox( connectOptionsStaticBox, ID_CHKNETSKIPIMAGEVERIFICATION, _("Skip data verification for image files"), wxDefaultPosition, wxDefaultSize, 0 );
m_chkNetSkipImageVerification->SetToolTip(NetSkipImageVerificationTT);
connectOptionsSizer->Add( m_chkNetSkipImageVerification, 0, wxALL, 5 );
m_chkNetConfirmBeforeConnect = new wxCheckBox( connectOptionsStaticBox, ID_CHKNETCONFIRMBEFORECONNECT, _("Confirm before connecting to Internet"), wxDefaultPosition, wxDefaultSize, 0 );
m_chkNetConfirmBeforeConnect->SetToolTip( _("Useful only if you have a modem, ISDN or VPN connection.") );
connectOptionsSizer->Add( m_chkNetConfirmBeforeConnect, 0, wxALL, 5 );
m_chkNetDisconnectWhenDone = new wxCheckBox( connectOptionsStaticBox, ID_CHKNETDISCONNECTWHENDONE, _("Disconnect when done"), wxDefaultPosition, wxDefaultSize, 0 );
m_chkNetDisconnectWhenDone->SetToolTip( _("Useful only if you have a modem, ISDN or VPN connection.") );
connectOptionsSizer->Add( m_chkNetDisconnectWhenDone, 0, wxALL, 5 );
networkTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
networkTabSizer->Add( connectOptionsSizer, 0, wxLEFT | wxRIGHT | wxEXPAND, STATICBOXBORDERSIZE );
networkTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
networkTab->SetSizer( networkTabSizer );
networkTab->Layout();
networkTabSizer->Fit( networkTab );
return networkTab;
}
wxPanel* CDlgAdvPreferencesBase::createDiskAndMemoryTab(wxNotebook* notebook)
{
CSkinAdvanced* pSkinAdvanced = wxGetApp().GetSkinManager()->GetAdvanced();
wxASSERT(pSkinAdvanced);
wxSize textCtrlSize = getTextCtrlSize(wxT("9999.99"));
wxPanel* diskMemoryTab = new wxPanel( notebook, ID_TABPAGE_DISK, wxDefaultPosition, wxDefaultSize, wxTAB_TRAVERSAL );
diskMemoryTab->SetExtraStyle( wxWS_EX_VALIDATE_RECURSIVELY );
wxBoxSizer* diskAndMemoryTabSizer = new wxBoxSizer( wxVERTICAL );
wxStaticBox* diskUsageStaticBox = new wxStaticBox( diskMemoryTab, -1, _("Disk") );
wxStaticBoxSizer* diskUsageBoxSizer = new wxStaticBoxSizer( diskUsageStaticBox, wxVERTICAL );
makeStaticBoxLabelItalic(diskUsageStaticBox);
wxString MostRestrictiveText = wxEmptyString;
MostRestrictiveText.Printf(_("%s will use the most restrictive of these settings:"), pSkinAdvanced->GetApplicationShortName().c_str());
diskUsageBoxSizer->Add(new wxStaticText( diskUsageStaticBox, -1, MostRestrictiveText, wxDefaultPosition, wxDefaultSize, 0),
0, wxALL, 5
);
wxString DiskMaxSpaceTT = wxEmptyString;
DiskMaxSpaceTT.Printf(_("Limit the total amount of disk space used by %s."), pSkinAdvanced->GetApplicationShortName().c_str());
m_chkDiskMaxSpace = new wxCheckBox (
diskUsageStaticBox, ID_CHKDISKMAXSPACE, _("Use no more than"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtDiskMaxSpace = new wxTextCtrl( diskUsageStaticBox, ID_TXTDISKMAXSPACE,wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
wxStaticText* staticText41 = new wxStaticText( diskUsageStaticBox, ID_DEFAULT, _("GB"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(diskUsageBoxSizer, DiskMaxSpaceTT, m_chkDiskMaxSpace, m_txtDiskMaxSpace, staticText41);
wxString DiskLeastFreeTT = wxEmptyString;
DiskLeastFreeTT.Printf(_("Limit disk usage to leave this much free space on the volume where %s stores data."), pSkinAdvanced->GetApplicationShortName().c_str());
m_chkDiskLeastFree = new wxCheckBox (
diskUsageStaticBox, ID_CHKDISKLEASTFREE, _("Leave at least"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtDiskLeastFree = new wxTextCtrl( diskUsageStaticBox, ID_TXTDISKLEASTFREE, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
wxStaticText* staticText43 = new wxStaticText( diskUsageStaticBox, ID_DEFAULT, _("GB free"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(diskUsageBoxSizer, DiskLeastFreeTT, m_chkDiskLeastFree, m_txtDiskLeastFree, staticText43);
wxString DiskMaxOfTotalTT = wxEmptyString;
DiskMaxOfTotalTT.Printf(_("Limit the percentage of disk space used by %s on the volume where it stores data."), pSkinAdvanced->GetApplicationShortName().c_str());
m_chkDiskMaxOfTotal = new wxCheckBox (
diskUsageStaticBox, ID_CHKDISKMAXOFTOTAL, _("Use no more than"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtDiskMaxOfTotal = new wxTextCtrl( diskUsageStaticBox, ID_TXTDISKMAXOFTOTAL, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
/*xgettext:no-c-format*/
wxStaticText* staticText45 = new wxStaticText( diskUsageStaticBox, ID_DEFAULT, _("% of total"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(diskUsageBoxSizer, DiskMaxOfTotalTT, m_chkDiskMaxOfTotal, m_txtDiskMaxOfTotal, staticText45);
diskAndMemoryTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
diskAndMemoryTabSizer->Add( diskUsageBoxSizer, 0, wxLEFT | wxRIGHT | wxEXPAND, STATICBOXBORDERSIZE );
wxStaticBox* memoryUsageStaticBox = new wxStaticBox( diskMemoryTab, -1, _("Memory") );
wxStaticBoxSizer* memoryUsageBoxSizer = new wxStaticBoxSizer( memoryUsageStaticBox, wxVERTICAL );
makeStaticBoxLabelItalic(memoryUsageStaticBox);
wxString MemoryMaxInUseTT = wxEmptyString;
MemoryMaxInUseTT.Printf(_("Limit the memory used by %s when you're using the computer."), pSkinAdvanced->GetApplicationShortName().c_str());
wxStaticText* staticText50 = new wxStaticText( memoryUsageStaticBox, ID_DEFAULT, _("When computer is in use, use at most"), wxDefaultPosition, wxDefaultSize, 0 );
textCtrlSize = getTextCtrlSize(wxT("100.00"));
m_txtMemoryMaxInUse = new wxTextCtrl( memoryUsageStaticBox, ID_TXTMEMORYMAXINUSE, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
/*xgettext:no-c-format*/
wxStaticText* staticText51 = new wxStaticText( memoryUsageStaticBox, ID_DEFAULT, _("%"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(memoryUsageBoxSizer, MemoryMaxInUseTT, staticText50, m_txtMemoryMaxInUse, staticText51);
wxString MemoryMaxOnIdleTT = wxEmptyString;
MemoryMaxOnIdleTT.Printf(_("Limit the memory used by %s when you're not using the computer."), pSkinAdvanced->GetApplicationShortName().c_str());
wxStaticText* staticText52 = new wxStaticText( memoryUsageStaticBox, ID_DEFAULT, _("When computer is not in use, use at most"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtMemoryMaxOnIdle = new wxTextCtrl( memoryUsageStaticBox, ID_TXTMEMORYMAXONIDLE, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
/*xgettext:no-c-format*/
wxStaticText* staticText53 = new wxStaticText( memoryUsageStaticBox, ID_DEFAULT, _("%"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(memoryUsageBoxSizer, MemoryMaxOnIdleTT, staticText52, m_txtMemoryMaxOnIdle, staticText53);
m_chkMemoryWhileSuspended = new wxCheckBox( memoryUsageStaticBox, ID_CHKMEMORYWHILESUSPENDED, _("Leave non-GPU tasks in memory while suspended"), wxDefaultPosition, wxDefaultSize, 0 );
m_chkMemoryWhileSuspended->SetToolTip( _("If checked, suspended tasks stay in memory, and resume with no work lost. If unchecked, suspended tasks are removed from memory, and resume from their last checkpoint.") );
memoryUsageBoxSizer->Add(m_chkMemoryWhileSuspended, 0, wxALL, 5 );
wxString DiskMaxSwapTT = wxEmptyString;
DiskMaxSwapTT.Printf(_("Limit the swap space (page file) used by %s."), pSkinAdvanced->GetApplicationShortName().c_str());
wxStaticText* staticText48 = new wxStaticText( memoryUsageStaticBox, ID_DEFAULT, _("Page/swap file: use at most"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtDiskMaxSwap = new wxTextCtrl( memoryUsageStaticBox, ID_TXTDISKWRITETODISK, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
/*xgettext:no-c-format*/
wxStaticText* staticText49 = new wxStaticText( memoryUsageStaticBox, ID_DEFAULT, _("%"), wxDefaultPosition, wxDefaultSize, 0 );
addNewRowToSizer(memoryUsageBoxSizer, DiskMaxSwapTT, staticText48, m_txtDiskMaxSwap, staticText49);
diskAndMemoryTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
diskAndMemoryTabSizer->Add( memoryUsageBoxSizer, 0, wxLEFT | wxRIGHT | wxEXPAND, STATICBOXBORDERSIZE );
diskAndMemoryTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
diskMemoryTab->SetSizer( diskAndMemoryTabSizer );
diskMemoryTab->Layout();
diskAndMemoryTabSizer->Fit( diskMemoryTab );
return diskMemoryTab;
}
wxPanel* CDlgAdvPreferencesBase::createDailySchedulesTab(wxNotebook* notebook)
{
wxSize textCtrlSize = getTextCtrlSize(wxT("23:59 "));
wxString andString(_("and"));
wxString toString(wxT(" ")+_("to")+wxT(" "));
wxPanel* dailySchedulesTab = new wxPanel(
notebook, ID_TABPAGE_SCHED, wxDefaultPosition,
wxDefaultSize, wxTAB_TRAVERSAL
);
dailySchedulesTab->SetExtraStyle( wxWS_EX_VALIDATE_RECURSIVELY );
wxBoxSizer* dailySchedulesTabSizer = new wxBoxSizer( wxVERTICAL );
// Computing schedule
//
wxStaticBox* computingTimesStaticBox = new wxStaticBox(
dailySchedulesTab, -1, _("Computing")
);
wxStaticBoxSizer* computingTimesStaticBoxSizer = new wxStaticBoxSizer(
computingTimesStaticBox, wxVERTICAL
);
makeStaticBoxLabelItalic(computingTimesStaticBox);
wxString ProcEveryDayTT(_("Compute only during a particular period each day."));
m_chkProcEveryDay = new wxCheckBox(
computingTimesStaticBox, ID_CHKPROCEVERYDAY,
_("Compute only between"), wxDefaultPosition, wxDefaultSize, 0
);
m_txtProcEveryDayStart = new wxTextCtrl(
computingTimesStaticBox, ID_TXTPROCEVERYDAYSTART, wxEmptyString,
wxDefaultPosition, textCtrlSize, wxTE_RIGHT
);
wxStaticText* staticText25 = new wxStaticText(
computingTimesStaticBox, ID_DEFAULT, andString, wxDefaultPosition,
wxDefaultSize, wxALIGN_CENTRE
);
m_txtProcEveryDayStop = new wxTextCtrl(
computingTimesStaticBox, ID_TXTPROCEVERYDAYSTOP, wxEmptyString,
wxDefaultPosition, textCtrlSize, wxTE_RIGHT
);
addNewRowToSizer(
computingTimesStaticBoxSizer, ProcEveryDayTT, m_chkProcEveryDay,
m_txtProcEveryDayStart, staticText25, m_txtProcEveryDayStop
);
wxStaticBox* procSpecialTimesStaticBox = new wxStaticBox(
computingTimesStaticBox, -1, _("Day-of-week override")
);
wxStaticBoxSizer* procSpecialTimesStaticBoxSizer = new wxStaticBoxSizer(
procSpecialTimesStaticBox, wxVERTICAL
);
makeStaticBoxLabelItalic(procSpecialTimesStaticBox);
wxStaticText* staticText36 = new wxStaticText(
procSpecialTimesStaticBox, ID_DEFAULT,
_("Override the times above on the selected days:"),
wxDefaultPosition, wxDefaultSize, 0
);
#ifdef __WXMAC__
procSpecialTimesStaticBoxSizer->Add( staticText36, 0, wxLEFT, 5 );
#else
procSpecialTimesStaticBoxSizer->Add( staticText36, 0, wxLEFT, DAYOFWEEKBORDERSIZE );
#endif
procSpecialTimesStaticBoxSizer->AddSpacer(3);
// procSpecialTimesStaticBox->SetToolTip(_("On each selected \"override\" day, ignore the \"Every day\" times above and suspend if the time is outside the range shown for that day"));
wxFlexGridSizer* procDaysSizer = new wxFlexGridSizer( 4, 9, 0, 0 );
procDaysSizer->SetFlexibleDirection( wxHORIZONTAL );
procDaysSizer->SetNonFlexibleGrowMode( wxFLEX_GROWMODE_SPECIFIED );
// Tooltips for Day-of-Week override wxCheckBoxes and wxTextCtrls
// are set in CDlgAdvPreferences::SetSpecialTooltips()
//
wxString procDaysTimeTT(PROC_DAY_OF_WEEK_TOOLTIP_TEXT);
m_chkProcMonday = new wxCheckBox(
procSpecialTimesStaticBox, ID_CHKPROCMONDAY, _("Monday"),
wxDefaultPosition, wxDefaultSize, 0
);
procDaysSizer->Add( m_chkProcMonday, 0, wxTOP, 5 );
m_txtProcMondayStart = new wxTextCtrl(
procSpecialTimesStaticBox, ID_TXTPROCMONDAYSTART, wxEmptyString,
wxDefaultPosition, textCtrlSize, 0
);
procDaysSizer->Add( m_txtProcMondayStart, 0, wxALL, 1 );
wxStaticText* toStringProcMonday = new wxStaticText(
procSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition,
wxDefaultSize, 0
);
toStringProcMonday->SetToolTip(procDaysTimeTT);
procDaysSizer->Add(toStringProcMonday , 0, wxTOP, 5 );
m_txtProcMondayStop = new wxTextCtrl(
procSpecialTimesStaticBox, ID_TXTPROCMONDAYSTOP, wxEmptyString,
wxDefaultPosition, textCtrlSize, 0
);
procDaysSizer->Add( m_txtProcMondayStop, 0, wxALL, 1 );
procDaysSizer->AddSpacer(15);
m_chkProcFriday = new wxCheckBox(
procSpecialTimesStaticBox, ID_CHKPROCFRIDAY, _("Friday"),
wxDefaultPosition, wxDefaultSize, 0
);
procDaysSizer->Add( m_chkProcFriday, 0, wxTOP, 5 );
m_txtProcFridayStart = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCFRIDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcFridayStart, 0, wxALL, 1 );
wxStaticText* toStringProcFriday = new wxStaticText( procSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringProcFriday->SetToolTip(procDaysTimeTT);
procDaysSizer->Add(toStringProcFriday , 0, wxTOP, 5 );
m_txtProcFridayStop = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCFRIDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcFridayStop, 0, wxALL, 1 );
m_chkProcTuesday = new wxCheckBox( procSpecialTimesStaticBox, ID_CHKPROCTUESDAY, _("Tuesday"), wxDefaultPosition, wxDefaultSize, 0 );
procDaysSizer->Add( m_chkProcTuesday, 0, wxTOP, 5 );
m_txtProcTuesdayStart = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCTUESDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcTuesdayStart, 0, wxALL, 1 );
wxStaticText* toStringProcTuesday = new wxStaticText( procSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringProcTuesday->SetToolTip(procDaysTimeTT);
procDaysSizer->Add(toStringProcTuesday , 0, wxTOP, 5 );
m_txtProcTuesdayStop = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCTUESDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcTuesdayStop, 0, wxALL, 1 );
procDaysSizer->AddSpacer(15);
m_chkProcSaturday = new wxCheckBox( procSpecialTimesStaticBox, ID_CHKPROCSATURDAY, _("Saturday"), wxDefaultPosition, wxDefaultSize, 0 );
procDaysSizer->Add( m_chkProcSaturday, 0, wxTOP, 5 );
m_txtProcSaturdayStart = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCSATURDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcSaturdayStart, 0, wxALL, 1 );
wxStaticText* toStringProcSaturday = new wxStaticText( procSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringProcSaturday->SetToolTip(procDaysTimeTT);
procDaysSizer->Add(toStringProcSaturday , 0, wxTOP, 5 );
m_txtProcSaturdayStop = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCSATURDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcSaturdayStop, 0, wxALL, 1 );
m_chkProcWednesday = new wxCheckBox( procSpecialTimesStaticBox, ID_CHKPROCWEDNESDAY, _("Wednesday"), wxDefaultPosition, wxDefaultSize, 0 );
procDaysSizer->Add( m_chkProcWednesday, 0, wxTOP, 5 );
m_txtProcWednesdayStart = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCWEDNESDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcWednesdayStart, 0, wxALL, 1 );
wxStaticText* toStringProcWednesday = new wxStaticText( procSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringProcWednesday->SetToolTip(procDaysTimeTT);
procDaysSizer->Add(toStringProcWednesday , 0, wxTOP, 5 );
m_txtProcWednesdayStop = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCWEDNESDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcWednesdayStop, 0, wxALL, 1 );
procDaysSizer->AddSpacer(15);
m_chkProcSunday = new wxCheckBox( procSpecialTimesStaticBox, ID_CHKPROCSUNDAY, _("Sunday"), wxDefaultPosition, wxDefaultSize, 0 );
procDaysSizer->Add( m_chkProcSunday, 0, wxTOP, 5 );
m_txtProcSundayStart = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCSUNDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcSundayStart, 0, wxALL, 1 );
wxStaticText* toStringProcSunday = new wxStaticText( procSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringProcSunday->SetToolTip(procDaysTimeTT);
procDaysSizer->Add(toStringProcSunday , 0, wxTOP, 5 );
m_txtProcSundayStop = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCSUNDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcSundayStop, 0, wxALL, 1 );
m_chkProcThursday = new wxCheckBox( procSpecialTimesStaticBox, ID_CHKPROCTHURSDAY, _("Thursday"), wxDefaultPosition, wxDefaultSize, 0 );
procDaysSizer->Add( m_chkProcThursday, 0, wxTOP, 5 );
m_txtProcThursdayStart = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCTHURSDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcThursdayStart, 0, wxALL, 1 );
wxStaticText* toStringProcThursday = new wxStaticText( procSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringProcThursday->SetToolTip(procDaysTimeTT);
procDaysSizer->Add(toStringProcThursday , 0, wxTOP, 5 );
m_txtProcThursdayStop = new wxTextCtrl( procSpecialTimesStaticBox, ID_TXTPROCTHURSDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
procDaysSizer->Add( m_txtProcThursdayStop, 0, wxALL, 1 );
#ifdef __WXMAC__
procSpecialTimesStaticBoxSizer->Add(
procDaysSizer, 0, wxRIGHT | wxBOTTOM, DAYOFWEEKBORDERSIZE
);
computingTimesStaticBoxSizer->Add(
procSpecialTimesStaticBoxSizer, 0, wxRIGHT | wxBOTTOM,
STATICBOXBORDERSIZE + 3
);
#else
procSpecialTimesStaticBoxSizer->Add(
procDaysSizer, 1, wxRIGHT | wxLEFT | wxBOTTOM, DAYOFWEEKBORDERSIZE
);
computingTimesStaticBoxSizer->Add(
procSpecialTimesStaticBoxSizer, 1, wxRIGHT | wxLEFT | wxBOTTOM,
STATICBOXBORDERSIZE
);
#endif
dailySchedulesTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
dailySchedulesTabSizer->Add(
computingTimesStaticBoxSizer, 1, wxRIGHT | wxLEFT | wxEXPAND,
STATICBOXBORDERSIZE
);
// Network schedule
//
wxStaticBox* networkTimesStaticBox = new wxStaticBox( dailySchedulesTab, -1, _("Network") );
wxStaticBoxSizer* networkTimesBoxSizer = new wxStaticBoxSizer( networkTimesStaticBox, wxVERTICAL );
makeStaticBoxLabelItalic(networkTimesStaticBox);
wxString NetEveryDayTT(_("Transfer files only during a particular period each day."));
m_chkNetEveryDay = new wxCheckBox(
networkTimesStaticBox, ID_CHKNETEVERYDAY, _("Transfer files only between"), wxDefaultPosition, wxDefaultSize, 0 );
m_txtNetEveryDayStart = new wxTextCtrl( networkTimesStaticBox, ID_TXTNETEVERYDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
wxStaticText* staticText37 = new wxStaticText( networkTimesStaticBox, ID_DEFAULT, andString, wxDefaultPosition, wxDefaultSize, wxALIGN_CENTRE );
m_txtNetEveryDayStop = new wxTextCtrl( networkTimesStaticBox, ID_TXTNETEVERYDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
addNewRowToSizer(networkTimesBoxSizer, NetEveryDayTT, m_chkNetEveryDay, m_txtNetEveryDayStart, staticText37, m_txtNetEveryDayStop);
wxStaticBox* netSpecialTimesStaticBox = new wxStaticBox(networkTimesStaticBox, -1, _("Day-of-week override") );
wxStaticBoxSizer* netSpecialTimesStaticBoxSizer = new wxStaticBoxSizer(netSpecialTimesStaticBox, wxVERTICAL);
makeStaticBoxLabelItalic(netSpecialTimesStaticBox);
wxStaticText* staticText39 = new wxStaticText( netSpecialTimesStaticBox, ID_DEFAULT, _("Override the times above on the selected days:"), wxDefaultPosition, wxDefaultSize, 0 );
#ifdef __WXMAC__
netSpecialTimesStaticBoxSizer->Add( staticText39, 0, wxLEFT, 5 );
#else
netSpecialTimesStaticBoxSizer->Add( staticText39, 0, wxLEFT, DAYOFWEEKBORDERSIZE );
#endif
netSpecialTimesStaticBoxSizer->AddSpacer(3);
// netSpecialTimesStaticBox->SetToolTip(_("On each selected \"override\" day, ignore the \"Every day\" times above and suspend if the time is outside the range shown for that day"));
// Tooltips for Day-of-Week overrides are set in CDlgAdvPreferences::SetSpecialTooltips()
wxString netDaysTimeTT(NET_DAY_OF_WEEK_TOOLTIP_TEXT);
wxFlexGridSizer* netDaysGridSizer = new wxFlexGridSizer( 4, 9, 0, 0 );
netDaysGridSizer->SetFlexibleDirection( wxHORIZONTAL );
netDaysGridSizer->SetNonFlexibleGrowMode( wxFLEX_GROWMODE_SPECIFIED );
m_chkNetMonday = new wxCheckBox( netSpecialTimesStaticBox, ID_CHKNETMONDAY, _("Monday"), wxDefaultPosition, wxDefaultSize, 0 );
netDaysGridSizer->Add( m_chkNetMonday, 0, wxTOP, 5 );
m_txtNetMondayStart = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETMONDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetMondayStart, 0, wxALL, 1 );
wxStaticText* toStringNetMonday = new wxStaticText( netSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringNetMonday->SetToolTip(netDaysTimeTT);
netDaysGridSizer->Add(toStringNetMonday , 0, wxTOP, 5 );
m_txtNetMondayStop = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETMONDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetMondayStop, 0, wxALL, 1 );
netDaysGridSizer->AddSpacer(15);
m_chkNetFriday = new wxCheckBox( netSpecialTimesStaticBox, ID_CHKNETFRIDAY, _("Friday"), wxDefaultPosition, wxDefaultSize, 0 );
netDaysGridSizer->Add( m_chkNetFriday, 0, wxTOP, 5 );
m_txtNetFridayStart = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETFRIDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetFridayStart, 0, wxALL, 1 );
wxStaticText* toStringNetFriday = new wxStaticText( netSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringNetFriday->SetToolTip(netDaysTimeTT);
netDaysGridSizer->Add(toStringNetFriday , 0, wxTOP, 5 );
m_txtNetFridayStop = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETFRIDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetFridayStop, 0, wxALL, 1 );
m_chkNetTuesday = new wxCheckBox( netSpecialTimesStaticBox, ID_CHKNETTUESDAY, _("Tuesday"), wxDefaultPosition, wxDefaultSize, 0 );
netDaysGridSizer->Add( m_chkNetTuesday, 0, wxTOP, 5 );
m_txtNetTuesdayStart = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETTUESDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetTuesdayStart, 0, wxALL, 1 );
wxStaticText* toStringNetTuesay = new wxStaticText( netSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringNetTuesay->SetToolTip(netDaysTimeTT);
netDaysGridSizer->Add(toStringNetTuesay , 0, wxTOP, 5 );
m_txtNetTuesdayStop = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETTUESDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetTuesdayStop, 0, wxALL, 1 );
netDaysGridSizer->AddSpacer(15);
m_chkNetSaturday = new wxCheckBox( netSpecialTimesStaticBox, ID_CHKNETSATURDAY, _("Saturday"), wxDefaultPosition, wxDefaultSize, 0 );
netDaysGridSizer->Add( m_chkNetSaturday, 0, wxTOP, 5 );
m_txtNetSaturdayStart = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETSATURDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetSaturdayStart, 0, wxALL, 1 );
wxStaticText* toStringNetSaturday = new wxStaticText( netSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringNetSaturday->SetToolTip(netDaysTimeTT);
netDaysGridSizer->Add(toStringNetSaturday , 0, wxTOP, 5 );
m_txtNetSaturdayStop = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETSATURDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetSaturdayStop, 0, wxALL, 1 );
m_chkNetWednesday = new wxCheckBox( netSpecialTimesStaticBox, ID_CHKNETWEDNESDAY, _("Wednesday"), wxDefaultPosition, wxDefaultSize, 0 );
netDaysGridSizer->Add( m_chkNetWednesday, 0, wxTOP, 5 );
m_txtNetWednesdayStart = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETWEDNESDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetWednesdayStart, 0, wxALL, 1 );
wxStaticText* toStringNetWednesday = new wxStaticText( netSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringNetWednesday->SetToolTip(netDaysTimeTT);
netDaysGridSizer->Add(toStringNetWednesday , 0, wxTOP, 5 );
m_txtNetWednesdayStop = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETWEDNESDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetWednesdayStop, 0, wxALL, 1 );
netDaysGridSizer->AddSpacer(15);
m_chkNetSunday = new wxCheckBox( netSpecialTimesStaticBox, ID_CHKNETSUNDAY, _("Sunday"), wxDefaultPosition, wxDefaultSize, 0 );
netDaysGridSizer->Add( m_chkNetSunday, 0, wxTOP, 5 );
m_txtNetSundayStart = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETSUNDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetSundayStart, 0, wxALL, 1 );
wxStaticText* toStringNetSunday = new wxStaticText( netSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringNetSunday->SetToolTip(netDaysTimeTT);
netDaysGridSizer->Add(toStringNetSunday , 0, wxTOP, 5 );
m_txtNetSundayStop = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETSUNDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetSundayStop, 0, wxALL, 1 );
m_chkNetThursday = new wxCheckBox( netSpecialTimesStaticBox, ID_CHKNETTHURSDAY, _("Thursday"), wxDefaultPosition, wxDefaultSize, 0 );
netDaysGridSizer->Add( m_chkNetThursday, 0, wxTOP, 5 );
m_txtNetThursdayStart = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETTHURSDAYSTART, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetThursdayStart, 0, wxALL, 1 );
wxStaticText* toStringNetThursday = new wxStaticText( netSpecialTimesStaticBox, ID_DEFAULT, toString, wxDefaultPosition, wxDefaultSize, 0 );
toStringNetThursday->SetToolTip(netDaysTimeTT);
netDaysGridSizer->Add(toStringNetThursday , 0, wxTOP, 5 );
m_txtNetThursdayStop = new wxTextCtrl( netSpecialTimesStaticBox, ID_TXTNETTHURSDAYSTOP, wxEmptyString, wxDefaultPosition, textCtrlSize, 0 );
netDaysGridSizer->Add( m_txtNetThursdayStop, 0, wxALL, 1 );
#ifdef __WXMAC__
netSpecialTimesStaticBoxSizer->Add( netDaysGridSizer, 1, wxRIGHT | wxBOTTOM, DAYOFWEEKBORDERSIZE );
networkTimesBoxSizer->Add(netSpecialTimesStaticBoxSizer, 1, wxRIGHT | wxBOTTOM, STATICBOXBORDERSIZE +3 );
#else
netSpecialTimesStaticBoxSizer->Add( netDaysGridSizer, 1, wxRIGHT | wxLEFT | wxBOTTOM, DAYOFWEEKBORDERSIZE );
networkTimesBoxSizer->Add(netSpecialTimesStaticBoxSizer, 1, wxRIGHT | wxLEFT | wxBOTTOM, STATICBOXBORDERSIZE );
#endif
dailySchedulesTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
dailySchedulesTabSizer->Add( networkTimesBoxSizer, 1, wxRIGHT | wxLEFT | wxEXPAND, STATICBOXBORDERSIZE );
dailySchedulesTabSizer->AddSpacer( STATICBOXVERTICALSPACER );
dailySchedulesTab->SetSizer( dailySchedulesTabSizer );
dailySchedulesTab->Layout();
dailySchedulesTabSizer->Fit( dailySchedulesTab );
return dailySchedulesTab;
}
wxSize CDlgAdvPreferencesBase::getTextCtrlSize(wxString maxText) {
int w, h, margin;
wxSize sz;
wxFont f = GetParent()->GetFont();
GetTextExtent(maxText, &w, &h, NULL, NULL, &f);
margin = w/3;
if (margin < 9) margin = 9;
sz.x = w + margin;
sz.y = wxDefaultCoord;
return sz;
}
bool CDlgAdvPreferencesBase::doesLocalPrefsFileExist() {
std::string s;
int retval;
bool local_prefs_found = false;
MIOFILE mf;
bool found_venue;
GLOBAL_PREFS web_prefs;
GLOBAL_PREFS_MASK mask;
CMainDocument* pDoc = wxGetApp().GetDocument();
wxASSERT(pDoc);
wxASSERT(wxDynamicCast(pDoc, CMainDocument));
retval = pDoc->rpc.get_global_prefs_override(s);
local_prefs_found = (retval == BOINC_SUCCESS);
s.clear();
web_prefs.init();
retval = pDoc->rpc.get_global_prefs_file(s);
if (retval) {
web_prefs_url = new wxString(wxEmptyString);
} else {
mf.init_buf_read(s.c_str());
XML_PARSER xp(&mf);
web_prefs.parse(xp, "", found_venue, mask);
web_prefs_url = new wxString(web_prefs.source_project);
}
return local_prefs_found;
}
void CDlgAdvPreferencesBase::makeStaticBoxLabelItalic(wxStaticBox* staticBox) {
#if defined(__WXMSW__) || defined(__WXGTK__)
wxFont myFont = staticBox->GetFont();
myFont.MakeItalic();
myFont.MakeBold();
staticBox->SetOwnFont(myFont);
#endif
}
| 1 | 11,203 | Make this a member variable instead (move to header inside `class` and rename to `m_bUsingLocalPrefs`). | BOINC-boinc | php |
@@ -110,7 +110,7 @@ describe('Transactions (spec)', function() {
afterEach(() => cleanupAfterSuite(testContext));
testSuite.tests.forEach(testData => {
- const maybeSkipIt = testData.skipReason ? it.skip : it;
+ const maybeSkipIt = testData.skipReason || testSuite.name === 'pin-mongos' ? it.skip : it;
maybeSkipIt(testData.description, function() {
let testPromise = Promise.resolve();
| 1 | 'use strict';
const Promise = require('bluebird');
const path = require('path');
const fs = require('fs');
const chai = require('chai');
const expect = chai.expect;
const EJSON = require('mongodb-extjson');
// mlaunch init --replicaset --arbiter --name rs --hostname localhost --port 31000 --setParameter enableTestCommands=1 --binarypath /Users/mbroadst/Downloads/mongodb-osx-x86_64-enterprise-4.1.0-158-g3d62f3c/bin
chai.use(require('chai-subset'));
chai.config.includeStack = true;
chai.config.showDiff = true;
chai.config.truncateThreshold = 0;
function isPlainObject(value) {
return value !== null && typeof value === 'object' && Array.isArray(value) === false;
}
process.on('unhandledRejection', err => console.dir(err));
/**
* Finds placeholder values in a deeply nested object.
*
* NOTE: This also mutates the object, by removing the values for comparison
*
* @param {Object} input the object to find placeholder values in
*/
function findPlaceholders(value, parent) {
return Object.keys(value).reduce((result, key) => {
if (isPlainObject(value[key])) {
return result.concat(
findPlaceholders(value[key], [value, key]).map(x => {
if (x.path.startsWith('$')) {
x.path = key;
} else {
x.path = `${key}.${x.path}`;
}
return x;
})
);
}
if (value[key] === null) {
delete value[key];
result.push({ path: key, type: null });
} else if (value[key] === 42 || value[key] === '42') {
if (key.startsWith('$number') || value[key] === 42) {
result.push({ path: key, type: 'number' });
} else {
result.push({ path: key, type: 'string' });
}
// NOTE: fix this, it just passes the current examples
delete parent[0][parent[1]];
} else if (value[key] === '') {
result.push({ path: key, type: 'string' });
}
return result;
}, []);
}
function translateClientOptions(options) {
Object.keys(options).forEach(key => {
if (key === 'readConcernLevel') {
options.readConcern = { level: options.readConcernLevel };
delete options[key];
}
});
return options;
}
// Main test runner
describe('Transactions (spec)', function() {
const testContext = {
dbName: 'transaction-tests',
collectionName: 'test'
};
const testSuites = fs
.readdirSync(`${__dirname}/spec/transactions`)
.filter(x => x.indexOf('.json') !== -1)
.map(x =>
Object.assign(JSON.parse(fs.readFileSync(`${__dirname}/spec/transactions/${x}`)), {
name: path.basename(x, '.json')
})
);
after(() => testContext.sharedClient.close());
before(function() {
// create a shared client for admin tasks
const config = this.configuration;
testContext.url = `mongodb://${config.host}:${config.port}/${testContext.dbName}?replicaSet=${
config.replicasetName
}`;
testContext.sharedClient = config.newClient(testContext.url);
return testContext.sharedClient.connect();
});
testSuites.forEach(testSuite => {
describe(testSuite.name, {
metadata: { requires: { topology: ['replicaset', 'mongos'], mongodb: '>=3.7.x' } },
test: function() {
beforeEach(() => prepareDatabaseForSuite(testSuite, testContext));
afterEach(() => cleanupAfterSuite(testContext));
testSuite.tests.forEach(testData => {
const maybeSkipIt = testData.skipReason ? it.skip : it;
maybeSkipIt(testData.description, function() {
let testPromise = Promise.resolve();
if (testData.failPoint) {
testPromise = testPromise.then(() =>
enableFailPoint(testData.failPoint, testContext)
);
}
// run the actual test
testPromise = testPromise.then(() =>
runTestSuiteTest(this.configuration, testData, testContext)
);
if (testData.failPoint) {
testPromise = testPromise.then(() =>
disableFailPoint(testData.failPoint, testContext)
);
}
return testPromise;
});
});
}
});
});
});
// Test runner helpers
function prepareDatabaseForSuite(suite, context) {
const db = context.sharedClient.db();
const coll = db.collection(context.collectionName);
return db
.admin()
.command({ killAllSessions: [] })
.catch(() => {}) // ignore any error from this
.then(() => coll.drop({ writeConcern: 'majority' }))
.catch(err => {
if (!err.message.match(/ns not found/)) throw err;
})
.then(() => db.createCollection(context.collectionName, { w: 'majority' }))
.then(() => {
if (suite.data && Array.isArray(suite.data) && suite.data.length > 0) {
return coll.insert(suite.data, { w: 'majority' });
}
});
}
function cleanupAfterSuite(context) {
if (context.testClient) {
return context.testClient.close().then(() => {
delete context.testClient;
});
}
}
function enableFailPoint(failPoint, testContext) {
return testContext.sharedClient.db(testContext.dbName).executeDbAdminCommand(failPoint);
}
function disableFailPoint(failPoint, testContext) {
return testContext.sharedClient.db(testContext.dbName).executeDbAdminCommand({
configureFailPoint: failPoint.configureFailPoint,
mode: 'off'
});
}
let displayCommands = false;
function runTestSuiteTest(configuration, testData, context) {
const commandEvents = [];
const clientOptions = translateClientOptions(
Object.assign({ monitorCommands: true }, testData.clientOptions)
);
// test-specific client options
clientOptions.autoReconnect = false;
clientOptions.haInterval = 100;
const client = configuration.newClient(context.url, clientOptions);
return client.connect().then(client => {
context.testClient = client;
client.on('commandStarted', event => {
if (event.databaseName === context.dbName || isTransactionCommand(event.commandName)) {
commandEvents.push(event);
}
// very useful for debugging
if (displayCommands) {
console.dir(event, { depth: 5 });
}
});
const sessionOptions = Object.assign({}, testData.transactionOptions);
testData.sessionOptions = testData.sessionOptions || {};
const database = client.db();
const session0 = client.startSession(
Object.assign({}, sessionOptions, testData.sessionOptions.session0)
);
const session1 = client.startSession(
Object.assign({}, sessionOptions, testData.sessionOptions.session1)
);
// enable to see useful APM debug information at the time of actual test run
// displayCommands = true;
const operationContext = { database, session0, session1 };
let testPromise = Promise.resolve();
return testPromise
.then(() => testOperations(client, testData, operationContext))
.catch(err => {
// If the driver throws an exception / returns an error while executing this series
// of operations, store the error message.
throw err;
})
.then(() => {
session0.endSession();
session1.endSession();
return validateExpectations(commandEvents, testData, context, operationContext);
});
});
}
function validateExpectations(commandEvents, testData, testContext, operationContext) {
const session0 = operationContext.session0;
const session1 = operationContext.session1;
if (
testData.expectations &&
Array.isArray(testData.expectations) &&
testData.expectations.length > 0
) {
const actualEvents = normalizeCommandShapes(commandEvents);
const rawExpectedEvents = testData.expectations.map(x =>
linkSessionData(x.command_started_event, { session0, session1 })
);
const expectedEventPlaceholders = rawExpectedEvents.map(event =>
findPlaceholders(event.command)
);
const expectedEvents = normalizeCommandShapes(rawExpectedEvents);
expect(actualEvents).to.have.length(expectedEvents.length);
expectedEvents.forEach((expected, idx) => {
const actual = actualEvents[idx];
const placeHolders = expectedEventPlaceholders[idx]; // eslint-disable-line
expect(actual.commandName).to.equal(expected.commandName);
expect(actual.databaseName).to.equal(expected.databaseName);
const actualCommand = actual.command;
const expectedCommand = expected.command;
// handle validation of placeholder values
// placeHolders.forEach(placeholder => {
// const parsedActual = EJSON.parse(JSON.stringify(actualCommand), {
// relaxed: true
// });
// if (placeholder.type === null) {
// expect(parsedActual).to.not.have.all.nested.property(placeholder.path);
// } else if (placeholder.type === 'string') {
// expect(parsedActual).nested.property(placeholder.path).to.exist;
// expect(parsedActual)
// .nested.property(placeholder.path)
// .to.have.length.greaterThan(0);
// } else if (placeholder.type === 'number') {
// expect(parsedActual).nested.property(placeholder.path).to.exist;
// expect(parsedActual)
// .nested.property(placeholder.path)
// .to.be.greaterThan(0);
// }
// });
// compare the command
expect(actualCommand).to.containSubset(expectedCommand);
});
}
if (testData.outcome) {
if (testData.outcome.collection) {
// use the client without transactions to verify
return testContext.sharedClient
.db()
.collection(testContext.collectionName)
.find({})
.toArray()
.then(docs => {
expect(docs).to.eql(testData.outcome.collection.data);
});
}
}
}
function linkSessionData(command, context) {
const session = context[command.command.lsid];
const result = Object.assign({}, command);
result.command.lsid = JSON.parse(EJSON.stringify(session.id));
return result;
}
function normalizeCommandShapes(commands) {
return commands.map(command =>
JSON.parse(
EJSON.stringify({
command: command.command,
commandName: command.command_name ? command.command_name : command.commandName,
databaseName: command.database_name ? command.database_name : command.databaseName
})
)
);
}
function extractCrudResult(result, operation) {
if (Array.isArray(result) || !isPlainObject(result)) {
return result;
}
if (result.value) {
// some of our findAndModify results return more than just an id, so we need to pluck
const resultKeys = Object.keys(operation.result);
if (resultKeys.length === 1 && resultKeys[0] === '_id') {
return { _id: result.value._id };
}
return result.value;
}
return Object.keys(operation.result).reduce((crudResult, key) => {
if (result.hasOwnProperty(key) && result[key] != null) {
// FIXME(major): update crud results are broken and need to be changed
crudResult[key] = key === 'upsertedId' ? result[key]._id : result[key];
}
return crudResult;
}, {});
}
function isTransactionCommand(command) {
return ['startTransaction', 'commitTransaction', 'abortTransaction'].indexOf(command) !== -1;
}
function extractBulkRequests(requests) {
return requests.map(request => ({ [request.name]: request.arguments }));
}
function translateOperationName(operationName) {
if (operationName === 'runCommand') return 'command';
return operationName;
}
/**
*
* @param {Object} operation the operation definition from the spec test
* @param {Object} obj the object to call the operation on
* @param {Object} context a context object containing sessions used for the test
*/
function testOperation(operation, obj, context) {
const opOptions = {};
const args = [];
const operationName = translateOperationName(operation.name);
if (operation.arguments) {
Object.keys(operation.arguments).forEach(key => {
if (['filter', 'fieldName', 'document', 'documents', 'pipeline'].indexOf(key) !== -1) {
return args.unshift(operation.arguments[key]);
}
if (key === 'command') return args.unshift(operation.arguments[key]);
if (key === 'requests') return args.unshift(extractBulkRequests(operation.arguments[key]));
if (key === 'update' || key === 'replacement') return args.push(operation.arguments[key]);
if (key === 'session') {
if (isTransactionCommand(operationName)) return;
opOptions.session = context[operation.arguments.session];
return;
}
if (key === 'returnDocument') {
opOptions.returnOriginal = operation.arguments[key] === 'Before' ? true : false;
return;
}
if (key === 'options') {
Object.assign(opOptions, operation.arguments[key]);
if (opOptions.readPreference) {
opOptions.readPreference = opOptions.readPreference.mode.toLowerCase();
}
return;
}
if (key === 'readPreference') {
opOptions[key] = operation.arguments[key].mode.toLowerCase();
return;
}
opOptions[key] = operation.arguments[key];
});
}
if (args.length === 0 && !isTransactionCommand(operationName)) {
args.push({});
}
if (Object.keys(opOptions).length > 0) {
// NOTE: this is awful, but in order to provide options for some methods we need to add empty
// query objects.
if (operationName === 'distinct') {
args.push({});
}
args.push(opOptions);
}
let opPromise;
if (operationName === 'find' || operationName === 'aggregate') {
// `find` creates a cursor, so we need to call `toArray` on it
const cursor = obj[operationName].apply(obj, args);
opPromise = cursor.toArray();
} else {
// wrap this in a `Promise.try` because some operations might throw
opPromise = Promise.try(() => obj[operationName].apply(obj, args));
}
if (operation.result) {
const result = operation.result;
if (
result.errorContains ||
result.errorCodeName ||
result.errorLabelsContain ||
result.errorLabelsOmit
) {
return opPromise
.then(() => {
throw new Error('expected an error!');
})
.catch(err => {
const errorContains = result.errorContains;
const errorCodeName = result.errorCodeName;
const errorLabelsContain = result.errorLabelsContain;
const errorLabelsOmit = result.errorLabelsOmit;
if (errorLabelsContain) {
expect(err.errorLabels).to.include.members(errorLabelsContain);
}
if (errorLabelsOmit) {
if (err.errorLabels && Array.isArray(err.errorLabels) && err.errorLabels.length !== 0) {
expect(err.errorLabels).to.not.include.members(errorLabelsOmit);
}
}
if (operation.result.errorContains) {
expect(err).to.match(new RegExp(errorContains, 'i'));
}
if (errorCodeName) {
expect(err.codeName).to.equal(errorCodeName);
}
});
}
return opPromise.then(opResult => {
const actual = extractCrudResult(opResult, operation);
expect(actual).to.eql(operation.result);
});
}
return opPromise;
}
function convertCollectionOptions(options) {
const result = {};
Object.keys(options).forEach(key => {
if (key === 'readPreference') {
result[key] = options[key].mode.toLowerCase();
} else {
result[key] = options[key];
}
});
return result;
}
function testOperations(client, testData, operationContext) {
return testData.operations.reduce((combined, operation) => {
return combined.then(() => {
if (operation.object === 'collection') {
const db = operationContext.database;
const collectionOptions = operation.collectionOptions || {};
operationContext[operation.object] = db.collection(
'test',
convertCollectionOptions(collectionOptions)
);
}
return testOperation(operation, operationContext[operation.object], operationContext);
});
}, Promise.resolve());
}
| 1 | 15,129 | Generally I'd say we should factor this out into something more extensible (check an array of potentially skipped tests, for examples), but since we're likely to remove this soon for scheduled work I think this is fine. What do you think @daprahamian? | mongodb-node-mongodb-native | js |
@@ -18,7 +18,7 @@ THE SOFTWARE.
*/
/* HIT_START
- * BUILD: %t %s ../../test_common.cpp NVCC_OPTIONS -std=c++11
+ * BUILD: %t %s ../../test_common.cpp NVCC_OPTIONS -std=c++11 EXCLUDE_HIP_PLATFORM clang
* TEST: %t
* HIT_END
*/ | 1 | /*
Copyright (c) 2015-Present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
/* HIT_START
* BUILD: %t %s ../../test_common.cpp NVCC_OPTIONS -std=c++11
* TEST: %t
* HIT_END
*/
#include <hip/hip_runtime_api.h>
#include <iostream>
#include "test_common.h"
__global__
void fn(float* px, float* py)
{
bool a[42];
__shared__ double b[69];
for (auto&& x : b) x = *py++;
for (auto&& x : a) x = *px++ > 0.0;
for (auto&& x : a) if (x) *--py = *--px;
}
int main() {
hipFuncAttributes attr{};
auto r = hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(&fn));
if (r != hipSuccess || attr.maxThreadsPerBlock == 0) {
failed("Failed to read attributes.");
}
passed();
}
| 1 | 8,574 | Why are we skipping these tests? they should be passing in HIP-Clang. | ROCm-Developer-Tools-HIP | cpp |
@@ -279,9 +279,14 @@ class RandString(RandField):
def __mul__(self, n):
return self._fix()*n
-class RandBin(RandString):
+class RandBin(RandString, object):
def __init__(self, size=None):
- RandString.__init__(self, size, "".join(map(chr, range(256))))
+ super(RandBin, self).__init__(size=size, chars=b"".join(chb(c) for c in range(256)))
+ def _fix(self):
+ s = b""
+ for _ in range(self.size):
+ s += chb(random.choice(self.chars))
+ return s
class RandTermString(RandString): | 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Fields that hold random numbers.
"""
from __future__ import absolute_import
import random,time,math
from scapy.base_classes import Net
from scapy.compat import *
from scapy.utils import corrupt_bits,corrupt_bytes
from scapy.modules.six.moves import range
####################
## Random numbers ##
####################
class RandomEnumeration:
"""iterate through a sequence in random order.
When all the values have been drawn, if forever=1, the drawing is done again.
If renewkeys=0, the draw will be in the same order, guaranteeing that the same
number will be drawn in not less than the number of integers of the sequence"""
def __init__(self, inf, sup, seed=None, forever=1, renewkeys=0):
self.forever = forever
self.renewkeys = renewkeys
self.inf = inf
self.rnd = random.Random(seed)
self.sbox_size = 256
self.top = sup-inf+1
n=0
while (1<<n) < self.top:
n += 1
self.n =n
self.fs = min(3,(n+1)//2)
self.fsmask = 2**self.fs-1
self.rounds = max(self.n,3)
self.turns = 0
self.i = 0
def __iter__(self):
return self
def next(self):
while True:
if self.turns == 0 or (self.i == 0 and self.renewkeys):
self.cnt_key = self.rnd.randint(0,2**self.n-1)
self.sbox = [self.rnd.randint(0, self.fsmask)
for _ in range(self.sbox_size)]
self.turns += 1
while self.i < 2**self.n:
ct = self.i^self.cnt_key
self.i += 1
for _ in range(self.rounds): # Unbalanced Feistel Network
lsb = ct & self.fsmask
ct >>= self.fs
lsb ^= self.sbox[ct%self.sbox_size]
ct |= lsb << (self.n-self.fs)
if ct < self.top:
return self.inf+ct
self.i = 0
if not self.forever:
raise StopIteration
__next__ = next
class VolatileValue:
def __repr__(self):
return "<%s>" % self.__class__.__name__
def __eq__(self, other):
x = self._fix()
y = other._fix() if isinstance(other, VolatileValue) else other
if not isinstance(x, type(y)):
return False
return x == y
def __getattr__(self, attr):
if attr in ["__setstate__", "__getstate__"]:
raise AttributeError(attr)
return getattr(self._fix(),attr)
def __str__(self):
return str(self._fix())
def __bytes__(self):
return raw(self._fix())
def __len__(self):
return len(self._fix())
def _fix(self):
return None
class RandField(VolatileValue):
pass
class RandNum(RandField):
"""Instances evaluate to random integers in selected range"""
min = 0
max = 0
def __init__(self, min, max):
self.min = min
self.max = max
def _fix(self):
return random.randrange(self.min, self.max+1)
def __int__(self):
return int(self._fix())
def __index__(self):
return int(self)
def __nonzero__(self):
return bool(self.value)
__bool__ = __nonzero__
def __add__(self, other):
return self._fix() + other
def __radd__(self, other):
return other + self._fix()
def __sub__(self, other):
return self._fix() - other
def __rsub__(self, other):
return other - self._fix()
def __mul__(self, other):
return self._fix() * other
def __rmul__(self, other):
return other * self._fix()
def __floordiv__(self, other):
return self._fix() / other
__div__ = __floordiv__
def __lt__(self, other):
return self._fix() < other
def __le__(self, other):
return self._fix() <= other
def __eq__(self, other):
return self._fix() == other
def __ne__(self, other):
return self._fix() != other
def __ge__(self, other):
return self._fix() >= other
def __gt__(self, other):
return self._fix() > other
def __lshift__(self, other):
return self._fix() << other
def __rshift__(self, other):
return self._fix() >> other
def __and__(self, other):
return self._fix() & other
def __rand__(self, other):
return other & self._fix()
def __or__(self, other):
return self._fix() | other
def __ror__(self, other):
return other | self._fix()
class RandNumGamma(RandNum):
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def _fix(self):
return int(round(random.gammavariate(self.alpha, self.beta)))
class RandNumGauss(RandNum):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def _fix(self):
return int(round(random.gauss(self.mu, self.sigma)))
class RandNumExpo(RandNum):
def __init__(self, lambd, base=0):
self.lambd = lambd
self.base = base
def _fix(self):
return self.base+int(round(random.expovariate(self.lambd)))
class RandEnum(RandNum):
"""Instances evaluate to integer sampling without replacement from the given interval"""
def __init__(self, min, max, seed=None):
self.seq = RandomEnumeration(min,max,seed)
def _fix(self):
return next(self.seq)
class RandByte(RandNum):
def __init__(self):
RandNum.__init__(self, 0, 2**8-1)
class RandSByte(RandNum):
def __init__(self):
RandNum.__init__(self, -2**7, 2**7-1)
class RandShort(RandNum):
def __init__(self):
RandNum.__init__(self, 0, 2**16-1)
class RandSShort(RandNum):
def __init__(self):
RandNum.__init__(self, -2**15, 2**15-1)
class RandInt(RandNum):
def __init__(self):
RandNum.__init__(self, 0, 2**32-1)
class RandSInt(RandNum):
def __init__(self):
RandNum.__init__(self, -2**31, 2**31-1)
class RandLong(RandNum):
def __init__(self):
RandNum.__init__(self, 0, 2**64-1)
class RandSLong(RandNum):
def __init__(self):
RandNum.__init__(self, -2**63, 2**63-1)
class RandEnumByte(RandEnum):
def __init__(self):
RandEnum.__init__(self, 0, 2**8-1)
class RandEnumSByte(RandEnum):
def __init__(self):
RandEnum.__init__(self, -2**7, 2**7-1)
class RandEnumShort(RandEnum):
def __init__(self):
RandEnum.__init__(self, 0, 2**16-1)
class RandEnumSShort(RandEnum):
def __init__(self):
RandEnum.__init__(self, -2**15, 2**15-1)
class RandEnumInt(RandEnum):
def __init__(self):
RandEnum.__init__(self, 0, 2**32-1)
class RandEnumSInt(RandEnum):
def __init__(self):
RandEnum.__init__(self, -2**31, 2**31-1)
class RandEnumLong(RandEnum):
def __init__(self):
RandEnum.__init__(self, 0, 2**64-1)
class RandEnumSLong(RandEnum):
def __init__(self):
RandEnum.__init__(self, -2**63, 2**63-1)
class RandEnumKeys(RandEnum):
"""Picks a random value from dict keys list. """
def __init__(self, enum, seed=None):
self.enum = list(enum)
self.seq = RandomEnumeration(0, len(self.enum) - 1, seed)
def _fix(self):
return self.enum[next(self.seq)]
class RandChoice(RandField):
def __init__(self, *args):
if not args:
raise TypeError("RandChoice needs at least one choice")
self._choice = args
def _fix(self):
return random.choice(self._choice)
class RandString(RandField):
def __init__(self, size=None, chars="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"):
if size is None:
size = RandNumExpo(0.01)
self.size = size
self.chars = chars
def _fix(self):
s = ""
for _ in range(self.size):
s += random.choice(self.chars)
return s
def __mul__(self, n):
return self._fix()*n
class RandBin(RandString):
def __init__(self, size=None):
RandString.__init__(self, size, "".join(map(chr, range(256))))
class RandTermString(RandString):
def __init__(self, size, term):
RandString.__init__(self, size, "".join(map(chr, range(1,256))))
self.term = term
def _fix(self):
return RandString._fix(self)+self.term
def __str__(self):
return str(self._fix())
def __bytes__(self):
return raw(self._fix())
class RandIP(RandString):
def __init__(self, iptemplate="0.0.0.0/0"):
self.ip = Net(iptemplate)
def _fix(self):
return self.ip.choice()
class RandMAC(RandString):
def __init__(self, template="*"):
template += ":*:*:*:*:*"
template = template.split(":")
self.mac = ()
for i in range(6):
if template[i] == "*":
v = RandByte()
elif "-" in template[i]:
x,y = template[i].split("-")
v = RandNum(int(x,16), int(y,16))
else:
v = int(template[i],16)
self.mac += (v,)
def _fix(self):
return "%02x:%02x:%02x:%02x:%02x:%02x" % self.mac
class RandIP6(RandString):
def __init__(self, ip6template="**"):
self.tmpl = ip6template
self.sp = self.tmpl.split(":")
for i,v in enumerate(self.sp):
if not v or v == "**":
continue
if "-" in v:
a,b = v.split("-")
elif v == "*":
a=b=""
else:
a=b=v
if not a:
a = "0"
if not b:
b = "ffff"
if a==b:
self.sp[i] = int(a,16)
else:
self.sp[i] = RandNum(int(a,16), int(b,16))
self.variable = "" in self.sp
self.multi = self.sp.count("**")
def _fix(self):
done = 0
nbm = self.multi
ip = []
for i,n in enumerate(self.sp):
if n == "**":
nbm -= 1
remain = 8-(len(self.sp)-i-1)-len(ip)+nbm
if "" in self.sp:
remain += 1
if nbm or self.variable:
remain = random.randint(0,remain)
for j in range(remain):
ip.append("%04x" % random.randint(0,65535))
elif isinstance(n, RandNum):
ip.append("%04x" % n)
elif n == 0:
ip.append("0")
elif not n:
ip.append("")
else:
ip.append("%04x" % n)
if len(ip) == 9:
ip.remove("")
if ip[-1] == "":
ip[-1] = "0"
return ":".join(ip)
class RandOID(RandString):
def __init__(self, fmt=None, depth=RandNumExpo(0.1), idnum=RandNumExpo(0.01)):
self.ori_fmt = fmt
if fmt is not None:
fmt = fmt.split(".")
for i in range(len(fmt)):
if "-" in fmt[i]:
fmt[i] = tuple(map(int, fmt[i].split("-")))
self.fmt = fmt
self.depth = depth
self.idnum = idnum
def __repr__(self):
if self.ori_fmt is None:
return "<%s>" % self.__class__.__name__
else:
return "<%s [%s]>" % (self.__class__.__name__, self.ori_fmt)
def _fix(self):
if self.fmt is None:
return ".".join(str(self.idnum) for _ in range(1 + self.depth))
else:
oid = []
for i in self.fmt:
if i == "*":
oid.append(str(self.idnum))
elif i == "**":
oid += [str(self.idnum) for i in range(1 + self.depth)]
elif isinstance(i, tuple):
oid.append(str(random.randrange(*i)))
else:
oid.append(i)
return ".".join(oid)
class RandRegExp(RandField):
def __init__(self, regexp, lambda_=0.3,):
self._regexp = regexp
self._lambda = lambda_
@staticmethod
def choice_expand(s): #XXX does not support special sets like (ex ':alnum:')
m = ""
invert = s and s[0] == "^"
while True:
p = s.find("-")
if p < 0:
break
if p == 0 or p == len(s)-1:
m = "-"
if p:
s = s[:-1]
else:
s = s[1:]
else:
c1 = s[p-1]
c2 = s[p+1]
rng = "".join(map(chr, range(ord(c1), ord(c2)+1)))
s = s[:p-1]+rng+s[p+1:]
res = m+s
if invert:
res = "".join(chr(x) for x in range(256) if chr(x) not in res)
return res
@staticmethod
def stack_fix(lst, index):
r = ""
mul = 1
for e in lst:
if isinstance(e, list):
if mul != 1:
mul = mul-1
r += RandRegExp.stack_fix(e[1:]*mul, index)
# only the last iteration should be kept for back reference
f = RandRegExp.stack_fix(e[1:], index)
for i,idx in enumerate(index):
if e is idx:
index[i] = f
r += f
mul = 1
elif isinstance(e, tuple):
kind,val = e
if kind == "cite":
r += index[val-1]
elif kind == "repeat":
mul = val
elif kind == "choice":
if mul == 1:
c = random.choice(val)
r += RandRegExp.stack_fix(c[1:], index)
else:
r += RandRegExp.stack_fix([e]*mul, index)
mul = 1
else:
if mul != 1:
r += RandRegExp.stack_fix([e]*mul, index)
mul = 1
else:
r += str(e)
return r
def _fix(self):
stack = [None]
index = []
current = stack
i = 0
ln = len(self._regexp)
interp = True
while i < ln:
c = self._regexp[i]
i+=1
if c == '(':
current = [current]
current[0].append(current)
elif c == '|':
p = current[0]
ch = p[-1]
if not isinstance(ch, tuple):
ch = ("choice",[current])
p[-1] = ch
else:
ch[1].append(current)
current = [p]
elif c == ')':
ch = current[0][-1]
if isinstance(ch, tuple):
ch[1].append(current)
index.append(current)
current = current[0]
elif c == '[' or c == '{':
current = [current]
current[0].append(current)
interp = False
elif c == ']':
current = current[0]
choice = RandRegExp.choice_expand("".join(current.pop()[1:]))
current.append(RandChoice(*list(choice)))
interp = True
elif c == '}':
current = current[0]
num = "".join(current.pop()[1:])
e = current.pop()
if "," not in num:
n = int(num)
current.append([current]+[e]*n)
else:
num_min,num_max = num.split(",")
if not num_min:
num_min = "0"
if num_max:
n = RandNum(int(num_min),int(num_max))
else:
n = RandNumExpo(self._lambda,base=int(num_min))
current.append(("repeat",n))
current.append(e)
interp = True
elif c == '\\':
c = self._regexp[i]
if c == "s":
c = RandChoice(" ","\t")
elif c in "0123456789":
c = ("cite",ord(c)-0x30)
current.append(c)
i += 1
elif not interp:
current.append(c)
elif c == '+':
e = current.pop()
current.append([current]+[e]*(int(random.expovariate(self._lambda))+1))
elif c == '*':
e = current.pop()
current.append([current]+[e]*int(random.expovariate(self._lambda)))
elif c == '?':
if random.randint(0,1):
current.pop()
elif c == '.':
current.append(RandChoice(*[chr(x) for x in range(256)]))
elif c == '$' or c == '^':
pass
else:
current.append(c)
return RandRegExp.stack_fix(stack[1:], index)
def __repr__(self):
return "<%s [%r]>" % (self.__class__.__name__, self._regexp)
class RandSingularity(RandChoice):
pass
class RandSingNum(RandSingularity):
@staticmethod
def make_power_of_two(end):
sign = 1
if end == 0:
end = 1
if end < 0:
end = -end
sign = -1
end_n = int(math.log(end)/math.log(2))+1
return {sign*2**i for i in range(end_n)}
def __init__(self, mn, mx):
sing = {0, mn, mx, int((mn+mx)/2)}
sing |= self.make_power_of_two(mn)
sing |= self.make_power_of_two(mx)
for i in sing.copy():
sing.add(i+1)
sing.add(i-1)
for i in sing.copy():
if not mn <= i <= mx:
sing.remove(i)
self._choice = list(sing)
self._choice.sort()
class RandSingByte(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, 0, 2**8-1)
class RandSingSByte(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, -2**7, 2**7-1)
class RandSingShort(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, 0, 2**16-1)
class RandSingSShort(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, -2**15, 2**15-1)
class RandSingInt(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, 0, 2**32-1)
class RandSingSInt(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, -2**31, 2**31-1)
class RandSingLong(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, 0, 2**64-1)
class RandSingSLong(RandSingNum):
def __init__(self):
RandSingNum.__init__(self, -2**63, 2**63-1)
class RandSingString(RandSingularity):
def __init__(self):
self._choice = [ "",
"%x",
"%%",
"%s",
"%i",
"%n",
"%x%x%x%x%x%x%x%x%x",
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
"%",
"%%%",
"A"*4096,
b"\x00"*4096,
b"\xff"*4096,
b"\x7f"*4096,
b"\x80"*4096,
" "*4096,
"\\"*4096,
"("*4096,
"../"*1024,
"/"*1024,
"${HOME}"*512,
" or 1=1 --",
"' or 1=1 --",
'" or 1=1 --',
" or 1=1; #",
"' or 1=1; #",
'" or 1=1; #',
";reboot;",
"$(reboot)",
"`reboot`",
"index.php%00",
b"\x00",
"%00",
"\\",
"../../../../../../../../../../../../../../../../../etc/passwd",
"%2e%2e%2f" * 20 + "etc/passwd",
"%252e%252e%252f" * 20 + "boot.ini",
"..%c0%af" * 20 + "etc/passwd",
"..%c0%af" * 20 + "boot.ini",
"//etc/passwd",
r"..\..\..\..\..\..\..\..\..\..\..\..\..\..\..\..\..\boot.ini",
"AUX:",
"CLOCK$",
"COM:",
"CON:",
"LPT:",
"LST:",
"NUL:",
"CON:",
r"C:\CON\CON",
r"C:\boot.ini",
r"\\myserver\share",
"foo.exe:",
"foo.exe\\", ]
def __str__(self):
return str(self._fix())
def __bytes__(self):
return raw(self._fix())
class RandPool(RandField):
def __init__(self, *args):
"""Each parameter is a volatile object or a couple (volatile object, weight)"""
pool = []
for p in args:
w = 1
if isinstance(p, tuple):
p,w = p
pool += [p]*w
self._pool = pool
def _fix(self):
r = random.choice(self._pool)
return r._fix()
# Automatic timestamp
class AutoTime(VolatileValue):
def __init__(self, base=None):
if base == None:
self.diff = 0
else:
self.diff = time.time()-base
def _fix(self):
return time.time()-self.diff
class IntAutoTime(AutoTime):
def _fix(self):
return int(time.time()-self.diff)
class ZuluTime(AutoTime):
def __init__(self, diff=0):
self.diff = diff
def _fix(self):
return time.strftime("%y%m%d%H%M%SZ",
time.gmtime(time.time() + self.diff))
class GeneralizedTime(AutoTime):
def __init__(self, diff=0):
self.diff = diff
def _fix(self):
return time.strftime("%Y%m%d%H%M%SZ",
time.gmtime(time.time() + self.diff))
class DelayedEval(VolatileValue):
""" Example of usage: DelayedEval("time.time()") """
def __init__(self, expr):
self.expr = expr
def _fix(self):
return eval(self.expr)
class IncrementalValue(VolatileValue):
def __init__(self, start=0, step=1, restart=-1):
self.start = self.val = start
self.step = step
self.restart = restart
def _fix(self):
v = self.val
if self.val == self.restart :
self.val = self.start
else:
self.val += self.step
return v
class CorruptedBytes(VolatileValue):
def __init__(self, s, p=0.01, n=None):
self.s = s
self.p = p
self.n = n
def _fix(self):
return corrupt_bytes(self.s, self.p, self.n)
class CorruptedBits(CorruptedBytes):
def _fix(self):
return corrupt_bits(self.s, self.p, self.n)
| 1 | 11,984 | I think a "string" in Scapy's spirit (if such a thing exists) is actually a `bytes` object in Python 3 (see `Str*Field`s). So maybe `RandString._fix()` should return a `bytes` object. What do you think? | secdev-scapy | py |
@@ -12,6 +12,8 @@
const path = require('path')
const fs = require('fs')
+const jsdom = require("jsdom");
+const { JSDOM } = jsdom;
const srcDir = path.resolve(path.join(__dirname, '..', 'src'))
| 1 | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* This file manages the following:
* - Lists of files needed to be translated (Which is all top level GRD and JSON files)
* - All mappings for auto-generated Brave files from the associated Chromium files.
* - Top level global string replacements, such as replacing Chromium with Brave
*/
const path = require('path')
const fs = require('fs')
const srcDir = path.resolve(path.join(__dirname, '..', 'src'))
// chromium_strings.grd and any of its parts files that we track localization for in transifex
// These map to brave/app/resources/chromium_strings*.xtb
const chromiumStringsPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'chromium_strings.grd'))
const braveStringsPath = path.resolve(path.join(srcDir, 'brave', 'app', 'brave_strings.grd'))
const chromiumSettingsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'settings_chromium_strings.grdp'))
const braveSettingsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'settings_brave_strings.grdp'))
//Replace android strings.
const androidChromeStringsPath = path.resolve(path.join(srcDir, 'chrome', 'android', 'java', 'strings', 'android_chrome_strings.grd'))
const braveAndroidChromeStringsPath = path.resolve(path.join(srcDir, 'brave', 'android', 'java', 'strings', 'android_chrome_strings.grd'))
// component_chromium_strings.grd and any of its parts files that we track localization for in transifex
// These map to brave/app/strings/components_chromium_strings*.xtb
const chromiumComponentsChromiumStringsPath = path.resolve(path.join(srcDir, 'components', 'components_chromium_strings.grd'))
const braveComponentsBraveStringsPath = path.resolve(path.join(srcDir, 'brave', 'components', 'components_brave_strings.grd'))
// component_strings.grd and any of its parts files that we track localization for in transifex
// These map to brave/app/strings/components_strings*.xtb
const chromiumComponentsStringsPath = path.resolve(path.join(srcDir, 'components', 'components_strings.grd'))
const braveComponentsStringsPath = path.resolve(path.join(srcDir, 'brave', 'components', 'components_strings.grd'))
const chromiumBookmarksBarStringsPartPath = path.resolve(path.join(srcDir, 'components', 'bookmark_bar_strings.grdp'))
const braveBookmarksBarStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'components', 'bookmark_bar_strings.grdp'))
// src/components/components_strings.grd and any of its parts files.
const chromiumManagementStringsPartPath = path.resolve(path.join(srcDir, 'components', 'management_strings.grdp'))
const braveManagementStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'components', 'management_strings.grdp'))
const chromiumPasswordManagerStringsPartPath = path.resolve(path.join(srcDir, 'components', 'password_manager_strings.grdp'))
const bravePasswordManagerStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'components', 'password_manager_strings.grdp'))
// generated_resources.grd and any of its parts files that we track localization for in transifex
// There is also chromeos_strings.grdp but we don't need to track it here
// These map to brave/app/resources/generated_resoruces*.xtb
const chromiumGeneratedResourcesPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'generated_resources.grd'))
const braveGeneratedResourcesPath = path.resolve(path.join(srcDir, 'brave', 'app', 'generated_resources.grd'))
const chromiumBookmarksPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'bookmarks_strings.grdp'))
const braveBookmarksPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'bookmarks_strings.grdp'))
const chromiumMediaRouterPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'media_router_strings.grdp'))
const braveMediaRouterPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'media_router_strings.grdp'))
const chromiumSettingsStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'settings_strings.grdp'))
const braveSettingsStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'settings_strings.grdp'))
const chromiumExtensionsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'extensions_strings.grdp'))
const braveExtensionsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'extensions_strings.grdp'))
const chromiumPrintingStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'printing_strings.grdp'))
const bravePrintingStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'printing_strings.grdp'))
const chromiumProfileSettingsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'profiles_strings.grdp'))
const braveProfileSettingsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'profiles_strings.grdp'))
const chromiumVRStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'vr_strings.grdp'))
const braveVRStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'vr_strings.grdp'))
const chromiumWelcomeStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'welcome_strings.grdp'))
const braveWelcomeStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'welcome_strings.grdp'))
const chromiumAppManagementStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'app_management_strings.grdp'))
const braveAppManagementStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'app_management_strings.grdp'))
const chromiumGlobalMediaControlsStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'global_media_controls_strings.grdp'))
const braveGlobalMediaControlsStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'global_media_controls_strings.grdp'))
const chromiumSupervisedUserErrorPageStringsPartPath = path.resolve(path.join(srcDir, 'chrome', 'app', 'supervised_user_error_page_strings.grdp'))
const braveSupervisedUserErrorPageStringsPartPath = path.resolve(path.join(srcDir, 'brave', 'app', 'supervised_user_error_page_strings.grdp'))
// The following are not generated files but still need to be tracked so they get sent to transifex
// These xtb files don't need to be copied anywhere.
// brave_generated_resources.grd maps to brave/app/resources/brave_generated_resources*.xtb,
// brave_components_strings.grd maps to brave/components/resources/strings/brave_components_resources*.xtb
// messages.json localization is handled inside of brave-extension.
const braveSpecificGeneratedResourcesPath = path.resolve(path.join(srcDir, 'brave', 'app', 'brave_generated_resources.grd'))
const braveResourcesComponentsStringsPath = path.resolve(path.join(srcDir, 'brave', 'components', 'resources', 'brave_components_strings.grd'))
const braveExtensionMessagesPath = path.resolve(path.join(srcDir, 'brave', 'components', 'brave_extension', 'extension', 'brave_extension', '_locales', 'en_US', 'messages.json'))
const braveRewardsExtensionMessagesPath = path.resolve(path.join(srcDir, 'brave', 'components', 'brave_rewards', 'resources', 'extension', 'brave_rewards', '_locales', 'en_US', 'messages.json'))
// When adding new grd or grd files, never add a grdp part path without a parent grd path.
// Group them with a leading and trailing newline to keep this file organized.
// Add all mappings here whether it is a GRD or a GRDP.
// Brave specific only grd and grdp files should not be added here.
const chromiumToAutoGeneratedBraveMapping = {
[chromiumStringsPath]: braveStringsPath,
[chromiumSettingsPartPath]: braveSettingsPartPath,
[chromiumComponentsChromiumStringsPath]: braveComponentsBraveStringsPath,
[chromiumComponentsStringsPath]: braveComponentsStringsPath,
[chromiumBookmarksBarStringsPartPath]: braveBookmarksBarStringsPartPath,
[chromiumGeneratedResourcesPath]: braveGeneratedResourcesPath,
[chromiumBookmarksPartPath]: braveBookmarksPartPath,
[chromiumMediaRouterPartPath]: braveMediaRouterPartPath,
[chromiumSettingsStringsPartPath]: braveSettingsStringsPartPath,
[chromiumExtensionsPartPath]: braveExtensionsPartPath,
[chromiumPrintingStringsPartPath]: bravePrintingStringsPartPath,
[chromiumProfileSettingsPartPath]: braveProfileSettingsPartPath,
[chromiumVRStringsPartPath]: braveVRStringsPartPath,
[chromiumWelcomeStringsPartPath]: braveWelcomeStringsPartPath,
[chromiumAppManagementStringsPartPath]: braveAppManagementStringsPartPath,
[chromiumGlobalMediaControlsStringsPartPath]: braveGlobalMediaControlsStringsPartPath,
[chromiumSupervisedUserErrorPageStringsPartPath]: braveSupervisedUserErrorPageStringsPartPath,
[androidChromeStringsPath]: braveAndroidChromeStringsPath,
[chromiumManagementStringsPartPath]: braveManagementStringsPartPath,
[chromiumPasswordManagerStringsPartPath]: bravePasswordManagerStringsPartPath
}
// Same as with chromiumToAutoGeneratedBraveMapping but maps in the opposite direction
module.exports.autoGeneratedBraveToChromiumMapping = Object.keys(chromiumToAutoGeneratedBraveMapping)
.reduce((obj, key) => ({ ...obj, [chromiumToAutoGeneratedBraveMapping[key]]: key }), {})
// All paths which are not generated
module.exports.braveNonGeneratedPaths = [
braveSpecificGeneratedResourcesPath, braveResourcesComponentsStringsPath, braveExtensionMessagesPath, braveRewardsExtensionMessagesPath
]
// All paths which are generated
module.exports.braveAutoGeneratedPaths = Object.values(chromiumToAutoGeneratedBraveMapping)
// Brave specific strings and Chromium mapped Brave strings will be here.
// But you only need to add the Brave specific strings manually here.
module.exports.allBravePaths = module.exports.braveNonGeneratedPaths.concat(module.exports.braveAutoGeneratedPaths)
// Get all GRD and JSON paths whether they are generatd or not
// Push and pull scripts for l10n use this.
// Transifex manages files per grd and not per grd or grdp.
// This is because only 1 xtb is created per grd per locale even if it has multiple grdp files.
module.exports.braveTopLevelPaths = module.exports.allBravePaths.filter((x) => ['grd', 'json'].includes(x.split('.').pop()))
// ethereum-remote-client path relative to the Brave paths
module.exports.ethereumRemoteClientPaths = [
'../../../ethereum-remote-client/app/_locales/en/messages.json',
'../../../ethereum-remote-client/brave/app/_locales/en/messages.json'
]
// This simply reads Chromium files that are passed to it and replaces branding strings
// with Brave specific branding strings.
// Do not use this for filtering XML, instead use chromium-rebase-l10n.py.
// Only add idempotent replacements here (i.e. don't append replace A with AX here)
module.exports.rebaseBraveStringFilesOnChromiumL10nFiles = async function (path) {
const ops = Object.entries(chromiumToAutoGeneratedBraveMapping).map(async ([sourcePath, destPath]) => {
let contents = await new Promise(resolve => fs.readFile(sourcePath, 'utf8', (err, data) => resolve(data)))
for (const replacement of defaultReplacements) {
contents = contents.replace(replacement[0], replacement[1])
}
await new Promise(resolve => fs.writeFile(destPath, contents, 'utf8', resolve))
})
await Promise.all(ops)
}
// Straight-forward string replacement list.
// Consider mapping chromium resource ID to a new brave resource ID
// for whole-message replacements, instead of adding to this list.
const defaultReplacements = [
[/Automatically send usage statistics and crash reports to Google/g, 'Automatically send crash reports to Google'],
[/Automatically sends usage statistics and crash reports to Google/g, 'Automatically sends crash reports to Google'],
[/Chrome Web Store/g, 'Web Store'],
[/The Chromium Authors/g, 'Brave Software Inc'],
[/Google Chrome/g, 'Brave'],
[/Chromium/g, 'Brave'],
[/Chrome/g, 'Brave'],
[/Google/g, 'Brave'],
[/You're incognito/g, 'This is a private window'],
[/an incognito/g, 'a private'],
[/an Incognito/g, 'a Private'],
[/incognito/g, 'private'],
[/Incognito/g, 'Private'],
[/inco\&\;gnito/g, '&private'],
[/Inco\&\;gnito/g, '&Private'],
[/People/g, 'Profiles'],
// 'people' but only in the context of profiles, not humans.
[/(?<!authenticate )people(?! with slow connections?)/g, 'profiles'],
[/(Person)(?!\w)/g, 'Profile'],
[/(person)(?!\w)/g, 'profile'],
[/Bookmarks Bar\n/g, 'Bookmarks\n'],
[/Bookmarks bar\n/g, 'Bookmarks\n'],
[/bookmarks bar\n/g, 'bookmarks\n'],
]
| 1 | 6,239 | This can be combined to 1 line, just tested, seems to work. | brave-brave-browser | js |
@@ -1404,6 +1404,16 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
throw new IllegalStateException(
"GoQuorum compatibility mode (enabled) can only be used if genesis file has 'isQuorum' flag set to true.");
}
+ genesisConfigOptions
+ .getChainId()
+ .ifPresent(chainId -> ensureGoQuorumCompatibilityModeNotUsedOnMainnet(chainId));
+ }
+ }
+
+ private void ensureGoQuorumCompatibilityModeNotUsedOnMainnet(final BigInteger chainId) {
+ if (chainId.equals(EthNetworkConfig.MAINNET_NETWORK_ID)) {
+ throw new IllegalStateException(
+ "GoQuorum compatibility mode (enabled) cannot be used on Mainnet.");
}
}
| 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.hyperledger.besu.cli.DefaultCommandValues.getDefaultBesuDataPath;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG;
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
import static org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration.DEFAULT_GRAPHQL_HTTP_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_JSON_RPC_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT;
import static org.hyperledger.besu.ethereum.permissioning.QuorumPermissioningConfiguration.QIP714_DEFAULT_BLOCK;
import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES;
import static org.hyperledger.besu.metrics.MetricsProtocol.PROMETHEUS;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PUSH_PORT;
import static org.hyperledger.besu.nat.kubernetes.KubernetesNatManager.DEFAULT_BESU_SERVICE_NAME_FILTER;
import org.hyperledger.besu.BesuInfo;
import org.hyperledger.besu.Runner;
import org.hyperledger.besu.RunnerBuilder;
import org.hyperledger.besu.chainexport.RlpBlockExporter;
import org.hyperledger.besu.chainimport.JsonBlockImporter;
import org.hyperledger.besu.chainimport.RlpBlockImporter;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.cli.config.NetworkName;
import org.hyperledger.besu.cli.converter.MetricCategoryConverter;
import org.hyperledger.besu.cli.converter.PercentageConverter;
import org.hyperledger.besu.cli.converter.RpcApisConverter;
import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty;
import org.hyperledger.besu.cli.custom.JsonRPCAllowlistHostsProperty;
import org.hyperledger.besu.cli.custom.RpcAuthFileValidator;
import org.hyperledger.besu.cli.error.BesuExceptionHandler;
import org.hyperledger.besu.cli.options.unstable.DnsOptions;
import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions;
import org.hyperledger.besu.cli.options.unstable.EthstatsOptions;
import org.hyperledger.besu.cli.options.unstable.MetricsCLIOptions;
import org.hyperledger.besu.cli.options.unstable.MiningOptions;
import org.hyperledger.besu.cli.options.unstable.NatOptions;
import org.hyperledger.besu.cli.options.unstable.NativeLibraryOptions;
import org.hyperledger.besu.cli.options.unstable.NetworkingOptions;
import org.hyperledger.besu.cli.options.unstable.RPCOptions;
import org.hyperledger.besu.cli.options.unstable.SynchronizerOptions;
import org.hyperledger.besu.cli.options.unstable.TransactionPoolOptions;
import org.hyperledger.besu.cli.presynctasks.PreSynchronizationTaskRunner;
import org.hyperledger.besu.cli.presynctasks.PrivateDatabaseMigrationPreSyncTask;
import org.hyperledger.besu.cli.subcommands.PasswordSubCommand;
import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand;
import org.hyperledger.besu.cli.subcommands.RetestethSubCommand;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand;
import org.hyperledger.besu.cli.subcommands.operator.OperatorSubCommand;
import org.hyperledger.besu.cli.subcommands.rlp.RLPSubCommand;
import org.hyperledger.besu.cli.util.BesuCommandCustomFactory;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.cli.util.ConfigOptionSearchAndRunHandler;
import org.hyperledger.besu.cli.util.VersionProvider;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.controller.BesuControllerBuilder;
import org.hyperledger.besu.controller.TargetingGasLimitCalculator;
import org.hyperledger.besu.crypto.KeyPairSecurityModule;
import org.hyperledger.besu.crypto.KeyPairUtil;
import org.hyperledger.besu.crypto.NodeKey;
import org.hyperledger.besu.crypto.SECP256K1;
import org.hyperledger.besu.enclave.EnclaveFactory;
import org.hyperledger.besu.ethereum.api.ApiConfiguration;
import org.hyperledger.besu.ethereum.api.ImmutableApiConfiguration;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.api.tls.FileBasedPasswordProvider;
import org.hyperledger.besu.ethereum.api.tls.TlsClientAuthConfiguration;
import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration;
import org.hyperledger.besu.ethereum.blockcreation.GasLimitCalculator;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.mainnet.precompiles.AbstractAltBnPrecompiledContract;
import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeDnsConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL;
import org.hyperledger.besu.ethereum.p2p.peers.StaticNodesParser;
import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfigurationBuilder;
import org.hyperledger.besu.ethereum.permissioning.QuorumPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.worldstate.PrunerConfiguration;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.metrics.MetricCategoryRegistryImpl;
import org.hyperledger.besu.metrics.MetricsProtocol;
import org.hyperledger.besu.metrics.MetricsSystemFactory;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.StandardMetricCategory;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.metrics.vertx.VertxMetricsAdapterFactory;
import org.hyperledger.besu.nat.NatMethod;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.BesuEvents;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.PicoCLIOptions;
import org.hyperledger.besu.plugin.services.SecurityModuleService;
import org.hyperledger.besu.plugin.services.StorageService;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry;
import org.hyperledger.besu.plugin.services.securitymodule.SecurityModule;
import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin;
import org.hyperledger.besu.services.BesuEventsImpl;
import org.hyperledger.besu.services.BesuPluginContextImpl;
import org.hyperledger.besu.services.PicoCLIOptionsImpl;
import org.hyperledger.besu.services.SecurityModuleServiceImpl;
import org.hyperledger.besu.services.StorageServiceImpl;
import org.hyperledger.besu.util.NetworkUtility;
import org.hyperledger.besu.util.PermissioningConfigurationValidator;
import org.hyperledger.besu.util.number.Fraction;
import org.hyperledger.besu.util.number.Percentage;
import org.hyperledger.besu.util.number.PositiveNumber;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.file.Path;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Resources;
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import io.vertx.core.json.DecodeException;
import io.vertx.core.metrics.MetricsOptions;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;
import picocli.CommandLine;
import picocli.CommandLine.AbstractParseResultHandler;
import picocli.CommandLine.Command;
import picocli.CommandLine.ExecutionException;
import picocli.CommandLine.Option;
import picocli.CommandLine.ParameterException;
@SuppressWarnings("FieldCanBeLocal") // because Picocli injected fields report false positives
@Command(
description = "This command runs the Besu Ethereum client full node.",
abbreviateSynopsis = true,
name = "besu",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class,
header = "Usage:",
synopsisHeading = "%n",
descriptionHeading = "%nDescription:%n%n",
optionListHeading = "%nOptions:%n",
footerHeading = "%n",
footer = "Besu is licensed under the Apache License 2.0")
public class BesuCommand implements DefaultCommandValues, Runnable {
@SuppressWarnings("PrivateStaticFinalLoggers")
// non-static for testing
private final Logger logger;
private CommandLine commandLine;
private final Supplier<RlpBlockImporter> rlpBlockImporter;
private final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory;
private final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory;
// Unstable CLI options
final NetworkingOptions unstableNetworkingOptions = NetworkingOptions.create();
final SynchronizerOptions unstableSynchronizerOptions = SynchronizerOptions.create();
final EthProtocolOptions unstableEthProtocolOptions = EthProtocolOptions.create();
final MetricsCLIOptions unstableMetricsCLIOptions = MetricsCLIOptions.create();
final TransactionPoolOptions unstableTransactionPoolOptions = TransactionPoolOptions.create();
private final EthstatsOptions unstableEthstatsOptions = EthstatsOptions.create();
private final DnsOptions unstableDnsOptions = DnsOptions.create();
private final MiningOptions unstableMiningOptions = MiningOptions.create();
private final NatOptions unstableNatOptions = NatOptions.create();
private final NativeLibraryOptions unstableNativeLibraryOptions = NativeLibraryOptions.create();
private final RPCOptions unstableRPCOptions = RPCOptions.create();
private final RunnerBuilder runnerBuilder;
private final BesuController.Builder controllerBuilderFactory;
private final BesuPluginContextImpl besuPluginContext;
private final StorageServiceImpl storageService;
private final SecurityModuleServiceImpl securityModuleService;
private final Map<String, String> environment;
private final MetricCategoryRegistryImpl metricCategoryRegistry =
new MetricCategoryRegistryImpl();
private final MetricCategoryConverter metricCategoryConverter = new MetricCategoryConverter();
// Public IP stored to prevent having to research it each time we need it.
private InetAddress autoDiscoveredDefaultIP = null;
private final PreSynchronizationTaskRunner preSynchronizationTaskRunner =
new PreSynchronizationTaskRunner();
private final Set<Integer> allocatedPorts = new HashSet<>();
// CLI options defined by user at runtime.
// Options parsing is done with CLI library Picocli https://picocli.info/
// While this variable is never read it is needed for the PicoCLI to create
// the config file option that is read elsewhere.
@SuppressWarnings("UnusedVariable")
@CommandLine.Option(
names = {CONFIG_FILE_OPTION_NAME},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "TOML config file (default: none)")
private final File configFile = null;
@CommandLine.Option(
names = {"--data-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "The path to Besu data directory (default: ${DEFAULT-VALUE})")
final Path dataPath = getDefaultBesuDataPath(this);
// Genesis file path with null default option if the option
// is not defined on command line as this default is handled by Runner
// to use mainnet json file from resources as indicated in the
// default network option
// Then we have no control over genesis default value here.
@CommandLine.Option(
names = {"--genesis-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Genesis file. Setting this option makes --network option ignored and requires --network-id to be set.")
private final File genesisFile = null;
@CommandLine.Option(
names = {"--node-private-key-file"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description =
"The node's private key file (default: a file named \"key\" in the Besu data folder)")
private final File nodePrivateKeyFile = null;
@Option(
names = "--identity",
paramLabel = "<String>",
description = "Identification for this node in the Client ID",
arity = "1")
private final Optional<String> identityString = Optional.empty();
// Completely disables P2P within Besu.
@Option(
names = {"--p2p-enabled"},
description = "Enable P2P functionality (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean p2pEnabled = true;
// Boolean option to indicate if peers should NOT be discovered, default to
// false indicates that
// the peers should be discovered by default.
//
// This negative option is required because of the nature of the option that is
// true when
// added on the command line. You can't do --option=false, so false is set as
// default
// and you have not to set the option at all if you want it false.
// This seems to be the only way it works with Picocli.
// Also many other software use the same negative option scheme for false
// defaults
// meaning that it's probably the right way to handle disabling options.
@Option(
names = {"--discovery-enabled"},
description = "Enable P2P discovery (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean peerDiscoveryEnabled = true;
// A list of bootstrap nodes can be passed
// and a hardcoded list will be used otherwise by the Runner.
// NOTE: we have no control over default value here.
@Option(
names = {"--bootnodes"},
paramLabel = "<enode://id@host:port>",
description =
"Comma separated enode URLs for P2P discovery bootstrap. "
+ "Default is a predefined list.",
split = ",",
arity = "0..*")
private final List<String> bootNodes = null;
@Option(
names = {"--max-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description = "Maximum P2P connections that can be established (default: ${DEFAULT-VALUE})")
private final Integer maxPeers = DEFAULT_MAX_PEERS;
@Option(
names = {"--remote-connections-limit-enabled"},
description =
"Whether to limit the number of P2P connections initiated remotely. (default: ${DEFAULT-VALUE})")
private final Boolean isLimitRemoteWireConnectionsEnabled = true;
@Option(
names = {"--remote-connections-max-percentage"},
paramLabel = MANDATORY_DOUBLE_FORMAT_HELP,
description =
"The maximum percentage of P2P connections that can be initiated remotely. Must be between 0 and 100 inclusive. (default: ${DEFAULT-VALUE})",
arity = "1",
converter = PercentageConverter.class)
private final Integer maxRemoteConnectionsPercentage =
Fraction.fromFloat(DEFAULT_FRACTION_REMOTE_WIRE_CONNECTIONS_ALLOWED)
.toPercentage()
.getValue();
@Option(
names = {"--random-peer-priority-enabled"},
description =
"Allow for incoming connections to be prioritized randomly. This will prevent (typically small, stable) networks from forming impenetrable peer cliques. (default: ${DEFAULT-VALUE})")
private final Boolean randomPeerPriority = false;
@Option(
names = {"--banned-node-ids", "--banned-node-id"},
paramLabel = MANDATORY_NODE_ID_FORMAT_HELP,
description = "A list of node IDs to ban from the P2P network.",
split = ",",
arity = "1..*")
void setBannedNodeIds(final List<String> values) {
try {
bannedNodeIds =
values.stream()
.filter(value -> !value.isEmpty())
.map(EnodeURL::parseNodeId)
.collect(Collectors.toList());
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Invalid ids supplied to '--banned-node-ids'. " + e.getMessage());
}
}
private Collection<Bytes> bannedNodeIds = new ArrayList<>();
@Option(
names = {"--sync-mode"},
paramLabel = MANDATORY_MODE_FORMAT_HELP,
description =
"Synchronization mode, possible values are ${COMPLETION-CANDIDATES} (default: FAST if a --network is supplied and privacy isn't enabled. FULL otherwise.)")
private SyncMode syncMode = null;
@Option(
names = {"--fast-sync-min-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Minimum number of peers required before starting fast sync. (default: ${DEFAULT-VALUE})")
private final Integer fastSyncMinPeerCount = FAST_SYNC_MIN_PEER_COUNT;
@Option(
names = {"--network"},
paramLabel = MANDATORY_NETWORK_FORMAT_HELP,
description =
"Synchronize against the indicated network, possible values are ${COMPLETION-CANDIDATES}."
+ " (default: MAINNET)")
private final NetworkName network = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Ip address this node advertises to its peers (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pHost = autoDiscoverDefaultIP().getHostAddress();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-interface"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description =
"The network interface address on which this node listens for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pInterface = NetworkUtility.INADDR_ANY;
@Option(
names = {"--p2p-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port on which to listen for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer p2pPort = EnodeURL.DEFAULT_LISTENING_PORT;
@Option(
names = {"--nat-method"},
description =
"Specify the NAT circumvention method to be used, possible values are ${COMPLETION-CANDIDATES}."
+ " NONE disables NAT functionality. (default: ${DEFAULT-VALUE})")
private final NatMethod natMethod = DEFAULT_NAT_METHOD;
@Option(
names = {"--network-id"},
paramLabel = "<BIG INTEGER>",
description =
"P2P network identifier. (default: the selected network chain ID or custom genesis chain ID)",
arity = "1")
private final BigInteger networkId = null;
@Option(
names = {"--graphql-http-enabled"},
description = "Set to start the GraphQL HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isGraphQLHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--graphql-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String graphQLHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--graphql-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer graphQLHttpPort = DEFAULT_GRAPHQL_HTTP_PORT;
@Option(
names = {"--graphql-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty graphQLHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-enabled"},
description = "Set to start the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcHttpPort = DEFAULT_JSON_RPC_PORT;
// A list of origins URLs that are accepted by the JsonRpcHttpServer (CORS)
@Option(
names = {"--rpc-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty rpcHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-api", "--rpc-http-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Collection<RpcApi> rpcHttpApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-http-authentication-enabled"},
description =
"Require authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-http-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC HTTP authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-http-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC HTTP authentication",
arity = "1")
private final File rpcHttpAuthenticationPublicKeyFile = null;
@Option(
names = {"--rpc-http-tls-enabled"},
description = "Enable TLS for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsEnabled = false;
@Option(
names = {"--rpc-http-tls-keystore-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Keystore (PKCS#12) containing key/certificate for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStoreFile = null;
@Option(
names = {"--rpc-http-tls-keystore-password-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"File containing password to unlock keystore for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStorePasswordFile = null;
@Option(
names = {"--rpc-http-tls-client-auth-enabled"},
description =
"Enable TLS client authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsClientAuthEnabled = false;
@Option(
names = {"--rpc-http-tls-known-clients-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to file containing clients certificate common name and fingerprint for client authentication")
private final Path rpcHttpTlsKnownClientsFile = null;
@Option(
names = {"--rpc-http-tls-ca-clients-enabled"},
description =
"Enable to accept clients certificate signed by a valid CA for client authentication (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsCAClientsEnabled = false;
@Option(
names = {"--rpc-ws-enabled"},
description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-ws-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-ws-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT;
@Option(
names = {"--rpc-ws-api", "--rpc-ws-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final List<RpcApi> rpcWsApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-ws-authentication-enabled"},
description =
"Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-ws-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC WebSocket authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-ws-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC WebSocket authentication",
arity = "1")
private final File rpcWsAuthenticationPublicKeyFile = null;
@Option(
names = {"--privacy-tls-enabled"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Enable TLS for connecting to privacy enclave (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyTlsEnabled = false;
@Option(
names = "--privacy-tls-keystore-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to a PKCS#12 formatted keystore; used to enable TLS on inbound connections.")
private final Path privacyKeyStoreFile = null;
@Option(
names = "--privacy-tls-keystore-password-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the password used to decrypt the keystore.")
private final Path privacyKeyStorePasswordFile = null;
@Option(
names = "--privacy-tls-known-enclave-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the fingerprints of the authorized privacy enclave.")
private final Path privacyTlsKnownEnclaveFile = null;
@Option(
names = {"--metrics-enabled"},
description = "Set to start the metrics exporter (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-protocol"},
description =
"Metrics protocol, one of PROMETHEUS, OPENTELEMETRY or NONE. (default: ${DEFAULT-VALUE})")
private MetricsProtocol metricsProtocol = PROMETHEUS;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPort = DEFAULT_METRICS_PORT;
@Option(
names = {"--metrics-category", "--metrics-categories"},
paramLabel = "<category name>",
split = ",",
arity = "1..*",
description =
"Comma separated list of categories to track metrics for (default: ${DEFAULT-VALUE})")
private final Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES;
@Option(
names = {"--metrics-push-enabled"},
description = "Enable the metrics push gateway integration (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsPushEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPushHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-push-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushPort = DEFAULT_METRICS_PUSH_PORT;
@Option(
names = {"--metrics-push-interval"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Interval in seconds to push metrics when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushInterval = 15;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-prometheus-job"},
description = "Job name to use when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPrometheusJob = "besu-client";
@Option(
names = {"--host-allowlist"},
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})",
defaultValue = "localhost,127.0.0.1")
private final JsonRPCAllowlistHostsProperty hostsAllowlist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--host-whitelist"},
hidden = true,
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Deprecated in favor of --host-allowlist. Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})")
private final JsonRPCAllowlistHostsProperty hostsWhitelist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--logging", "-l"},
paramLabel = "<LOG VERBOSITY LEVEL>",
description = "Logging verbosity levels: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL")
private final Level logLevel = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--color-enabled"},
description =
"Force color output to be enabled/disabled (default: colorized only if printing to console")
private static Boolean colorEnabled = null;
@Option(
names = {"--reorg-logging-threshold"},
description =
"How deep a chain reorganization must be in order for it to be logged (default: ${DEFAULT-VALUE})")
private final Long reorgLoggingThreshold = 6L;
@Option(
names = {"--miner-enabled"},
description = "Set if node will perform mining (default: ${DEFAULT-VALUE})")
private final Boolean isMiningEnabled = false;
@Option(
names = {"--miner-stratum-enabled"},
description = "Set if node will perform Stratum mining (default: ${DEFAULT-VALUE})")
private final Boolean iStratumMiningEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--miner-stratum-host"},
description = "Host for Stratum network mining service (default: ${DEFAULT-VALUE})")
private String stratumNetworkInterface = "0.0.0.0";
@Option(
names = {"--miner-stratum-port"},
description = "Stratum port binding (default: ${DEFAULT-VALUE})")
private final Integer stratumPort = 8008;
@Option(
names = {"--miner-coinbase"},
description =
"Account to which mining rewards are paid. You must specify a valid coinbase if "
+ "mining is enabled using --miner-enabled option",
arity = "1")
private final Address coinbase = null;
@Option(
names = {"--min-gas-price"},
description =
"Minimum price (in Wei) offered by a transaction for it to be included in a mined "
+ "block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei minTransactionGasPrice = DEFAULT_MIN_TRANSACTION_GAS_PRICE;
@Option(
names = {"--rpc-tx-feecap"},
description =
"Maximum transaction fees (in Wei) accepted for transaction submitted through RPC (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei txFeeCap = DEFAULT_RPC_TX_FEE_CAP;
@Option(
names = {"--min-block-occupancy-ratio"},
description = "Minimum occupancy ratio for a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Double minBlockOccupancyRatio = DEFAULT_MIN_BLOCK_OCCUPANCY_RATIO;
@Option(
names = {"--miner-extra-data"},
description =
"A hex string representing the (32) bytes to be included in the extra data "
+ "field of a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Bytes extraData = DEFAULT_EXTRA_DATA;
@Option(
names = {"--pruning-enabled"},
description =
"Enable disk-space saving optimization that removes old state that is unlikely to be required (default: ${DEFAULT-VALUE})")
private final Boolean pruningEnabled = false;
@Option(
names = {"--permissions-nodes-config-file-enabled"},
description = "Enable node level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-nodes-config-file"},
description =
"Node permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String nodePermissionsConfigFile = null;
@Option(
names = {"--permissions-accounts-config-file-enabled"},
description = "Enable account level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-accounts-config-file"},
description =
"Account permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String accountPermissionsConfigFile = null;
@Option(
names = {"--permissions-nodes-contract-address"},
description = "Address of the node permissioning smart contract",
arity = "1")
private final Address permissionsNodesContractAddress = null;
@Option(
names = {"--permissions-nodes-contract-version"},
description = "Version of the EEA Node Permissioning interface (default: ${DEFAULT-VALUE})")
private final Integer permissionsNodesContractVersion = 1;
@Option(
names = {"--permissions-nodes-contract-enabled"},
description = "Enable node level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesContractEnabled = false;
@Option(
names = {"--permissions-accounts-contract-address"},
description = "Address of the account permissioning smart contract",
arity = "1")
private final Address permissionsAccountsContractAddress = null;
@Option(
names = {"--permissions-accounts-contract-enabled"},
description =
"Enable account level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsContractEnabled = false;
@Option(
names = {"--privacy-enabled"},
description = "Enable private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyEnabled = false;
@Option(
names = {"--privacy-multi-tenancy-enabled"},
description = "Enable multi-tenant private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyMultiTenancyEnabled = false;
@Option(
names = {"--revert-reason-enabled"},
description =
"Enable passing the revert reason back through TransactionReceipts (default: ${DEFAULT-VALUE})")
private final Boolean isRevertReasonEnabled = false;
@Option(
names = {"--required-blocks", "--required-block"},
paramLabel = "BLOCK=HASH",
description = "Block number and hash peers are required to have.",
arity = "*",
split = ",")
private final Map<Long, Hash> requiredBlocks = new HashMap<>();
@Option(
names = {"--privacy-url"},
description = "The URL on which the enclave is running")
private final URI privacyUrl = PrivacyParameters.DEFAULT_ENCLAVE_URL;
@CommandLine.Option(
names = {"--privacy-public-key-file"},
description = "The enclave's public key file")
private final File privacyPublicKeyFile = null;
@Option(
names = {"--privacy-precompiled-address"},
description =
"The address to which the privacy pre-compiled contract will be mapped (default: ${DEFAULT-VALUE})",
hidden = true)
private final Integer privacyPrecompiledAddress = Address.PRIVACY;
@Option(
names = {"--privacy-marker-transaction-signing-key-file"},
description =
"The name of a file containing the private key used to sign privacy marker transactions. If unset, each will be signed with a random key.")
private final Path privacyMarkerTransactionSigningKeyPath = null;
@Option(
names = {"--privacy-enable-database-migration"},
description = "Enable private database metadata migration (default: ${DEFAULT-VALUE})")
private final Boolean migratePrivateDatabase = false;
@Option(
names = {"--privacy-flexible-groups-enabled", "--privacy-onchain-groups-enabled"},
description = "Enable flexible (onchain) privacy groups (default: ${DEFAULT-VALUE})")
private final Boolean isFlexiblePrivacyGroupsEnabled = false;
@Option(
names = {"--target-gas-limit"},
description =
"Sets target gas limit per block. If set each block's gas limit will approach this setting over time if the current gas limit is different.")
private final Long targetGasLimit = null;
@Option(
names = {"--tx-pool-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transactions that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer txPoolMaxSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS;
@Option(
names = {"--tx-pool-hashes-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transaction hashes that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pooledTransactionHashesSize =
TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS_HASHES;
@Option(
names = {"--tx-pool-retention-hours"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum retention period of pending transactions in hours (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pendingTxRetentionPeriod =
TransactionPoolConfiguration.DEFAULT_TX_RETENTION_HOURS;
@Option(
names = {"--tx-pool-price-bump"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
converter = PercentageConverter.class,
description =
"Price bump percentage to replace an already existing transaction (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer priceBump = TransactionPoolConfiguration.DEFAULT_PRICE_BUMP.getValue();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--key-value-storage"},
description = "Identity for the key-value storage to be used.",
arity = "1")
private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--security-module"},
paramLabel = "<NAME>",
description = "Identity for the Security Module to be used.",
arity = "1")
private String securityModuleName = DEFAULT_SECURITY_MODULE;
@Option(
names = {"--auto-log-bloom-caching-enabled"},
description = "Enable automatic log bloom caching (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean autoLogBloomCachingEnabled = true;
@Option(
names = {"--override-genesis-config"},
paramLabel = "NAME=VALUE",
description = "Overrides configuration values in the genesis file. Use with care.",
arity = "*",
hidden = true,
split = ",")
private final Map<String, String> genesisConfigOverrides =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
@Option(
names = {"--pruning-blocks-retained"},
defaultValue = "1024",
paramLabel = "<INTEGER>",
description =
"Minimum number of recent blocks for which to keep entire world state (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlocksRetained = PrunerConfiguration.DEFAULT_PRUNING_BLOCKS_RETAINED;
@Option(
names = {"--pruning-block-confirmations"},
defaultValue = "10",
paramLabel = "<INTEGER>",
description =
"Minimum number of confirmations on a block before marking begins (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlockConfirmations =
PrunerConfiguration.DEFAULT_PRUNING_BLOCK_CONFIRMATIONS;
@CommandLine.Option(
names = {"--pid-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Path to PID file (optional)")
private final Path pidPath = null;
@CommandLine.Option(
names = {"--api-gas-price-blocks"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Number of blocks to consider for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceBlocks = 100L;
@CommandLine.Option(
names = {"--api-gas-price-percentile"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Percentile value to measure for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Double apiGasPricePercentile = 50.0;
@CommandLine.Option(
names = {"--api-gas-price-max"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Maximum gas price for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceMax = 500_000_000_000L;
@Option(
names = {"--goquorum-compatibility-enabled"},
hidden = true,
description = "Start Besu in GoQuorum compatibility mode (default: ${DEFAULT-VALUE})")
private final Boolean isGoQuorumCompatibilityMode = false;
private EthNetworkConfig ethNetworkConfig;
private JsonRpcConfiguration jsonRpcConfiguration;
private GraphQLConfiguration graphQLConfiguration;
private WebSocketConfiguration webSocketConfiguration;
private ApiConfiguration apiConfiguration;
private MetricsConfiguration metricsConfiguration;
private Optional<PermissioningConfiguration> permissioningConfiguration;
private Collection<EnodeURL> staticNodes;
private BesuController besuController;
private BesuConfiguration pluginCommonConfiguration;
private final Supplier<ObservableMetricsSystem> metricsSystem =
Suppliers.memoize(() -> MetricsSystemFactory.create(metricsConfiguration()));
private Vertx vertx;
private EnodeDnsConfiguration enodeDnsConfiguration;
public BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment) {
this(
logger,
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
runnerBuilder,
controllerBuilderFactory,
besuPluginContext,
environment,
new StorageServiceImpl(),
new SecurityModuleServiceImpl());
}
@VisibleForTesting
protected BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment,
final StorageServiceImpl storageService,
final SecurityModuleServiceImpl securityModuleService) {
this.logger = logger;
this.rlpBlockImporter = rlpBlockImporter;
this.rlpBlockExporterFactory = rlpBlockExporterFactory;
this.jsonBlockImporterFactory = jsonBlockImporterFactory;
this.runnerBuilder = runnerBuilder;
this.controllerBuilderFactory = controllerBuilderFactory;
this.besuPluginContext = besuPluginContext;
this.environment = environment;
this.storageService = storageService;
this.securityModuleService = securityModuleService;
pluginCommonConfiguration = new BesuCommandConfigurationService();
besuPluginContext.addService(BesuConfiguration.class, pluginCommonConfiguration);
}
public void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final InputStream in,
final String... args) {
commandLine =
new CommandLine(this, new BesuCommandCustomFactory(besuPluginContext))
.setCaseInsensitiveEnumValuesAllowed(true);
enableExperimentalEIPs();
addSubCommands(resultHandler, in);
registerConverters();
handleUnstableOptions();
preparePlugins();
parse(resultHandler, exceptionHandler, args);
}
@Override
public void run() {
try {
configureLogging(true);
configureNativeLibs();
logger.info("Starting Besu version: {}", BesuInfo.nodeName(identityString));
// Need to create vertx after cmdline has been parsed, such that metricsSystem is configurable
vertx = createVertx(createVertxOptions(metricsSystem.get()));
final BesuCommand controller = validateOptions().configure().controller();
preSynchronizationTaskRunner.runTasks(controller.besuController);
controller.startPlugins().startSynchronization();
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage(), e);
}
}
@VisibleForTesting
void setBesuConfiguration(final BesuConfiguration pluginCommonConfiguration) {
this.pluginCommonConfiguration = pluginCommonConfiguration;
}
private void enableExperimentalEIPs() {
// Usage of static command line flags is strictly reserved for experimental EIPs
commandLine.addMixin("experimentalEIPs", ExperimentalEIPs.class);
}
private void addSubCommands(
final AbstractParseResultHandler<List<Object>> resultHandler, final InputStream in) {
commandLine.addSubcommand(
BlocksSubCommand.COMMAND_NAME,
new BlocksSubCommand(
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
resultHandler.out()));
commandLine.addSubcommand(
PublicKeySubCommand.COMMAND_NAME,
new PublicKeySubCommand(resultHandler.out(), this::buildNodeKey));
commandLine.addSubcommand(
PasswordSubCommand.COMMAND_NAME, new PasswordSubCommand(resultHandler.out()));
commandLine.addSubcommand(RetestethSubCommand.COMMAND_NAME, new RetestethSubCommand());
commandLine.addSubcommand(
RLPSubCommand.COMMAND_NAME, new RLPSubCommand(resultHandler.out(), in));
commandLine.addSubcommand(
OperatorSubCommand.COMMAND_NAME, new OperatorSubCommand(resultHandler.out()));
}
private void registerConverters() {
commandLine.registerConverter(Address.class, Address::fromHexStringStrict);
commandLine.registerConverter(Bytes.class, Bytes::fromHexString);
commandLine.registerConverter(Level.class, Level::valueOf);
commandLine.registerConverter(SyncMode.class, SyncMode::fromString);
commandLine.registerConverter(MetricsProtocol.class, MetricsProtocol::fromString);
commandLine.registerConverter(UInt256.class, (arg) -> UInt256.valueOf(new BigInteger(arg)));
commandLine.registerConverter(Wei.class, (arg) -> Wei.of(Long.parseUnsignedLong(arg)));
commandLine.registerConverter(PositiveNumber.class, PositiveNumber::fromString);
commandLine.registerConverter(Hash.class, Hash::fromHexString);
commandLine.registerConverter(Optional.class, Optional::of);
commandLine.registerConverter(Double.class, Double::parseDouble);
metricCategoryConverter.addCategories(BesuMetricCategory.class);
metricCategoryConverter.addCategories(StandardMetricCategory.class);
commandLine.registerConverter(MetricCategory.class, metricCategoryConverter);
}
private void handleUnstableOptions() {
// Add unstable options
final ImmutableMap.Builder<String, Object> unstableOptionsBuild = ImmutableMap.builder();
final ImmutableMap<String, Object> unstableOptions =
unstableOptionsBuild
.put("Ethereum Wire Protocol", unstableEthProtocolOptions)
.put("Metrics", unstableMetricsCLIOptions)
.put("P2P Network", unstableNetworkingOptions)
.put("RPC", unstableRPCOptions)
.put("DNS Configuration", unstableDnsOptions)
.put("NAT Configuration", unstableNatOptions)
.put("Synchronizer", unstableSynchronizerOptions)
.put("TransactionPool", unstableTransactionPoolOptions)
.put("Ethstats", unstableEthstatsOptions)
.put("Mining", unstableMiningOptions)
.put("Native Library", unstableNativeLibraryOptions)
.build();
UnstableOptionsSubCommand.createUnstableOptions(commandLine, unstableOptions);
}
private void preparePlugins() {
besuPluginContext.addService(PicoCLIOptions.class, new PicoCLIOptionsImpl(commandLine));
besuPluginContext.addService(SecurityModuleService.class, securityModuleService);
besuPluginContext.addService(StorageService.class, storageService);
besuPluginContext.addService(MetricCategoryRegistry.class, metricCategoryRegistry);
// register built-in plugins
new RocksDBPlugin().register(besuPluginContext);
besuPluginContext.registerPlugins(pluginsDir());
metricCategoryRegistry
.getMetricCategories()
.forEach(metricCategoryConverter::addRegistryCategory);
// register default security module
securityModuleService.register(
DEFAULT_SECURITY_MODULE, Suppliers.memoize(this::defaultSecurityModule));
}
private SecurityModule defaultSecurityModule() {
return new KeyPairSecurityModule(loadKeyPair());
}
@VisibleForTesting
SECP256K1.KeyPair loadKeyPair() {
return KeyPairUtil.loadKeyPair(nodePrivateKeyFile());
}
private void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final String... args) {
// Create a handler that will search for a config file option and use it for
// default values
// and eventually it will run regular parsing of the remaining options.
final ConfigOptionSearchAndRunHandler configParsingHandler =
new ConfigOptionSearchAndRunHandler(
resultHandler, exceptionHandler, CONFIG_FILE_OPTION_NAME, environment);
commandLine.parseWithHandlers(configParsingHandler, exceptionHandler, args);
}
private void startSynchronization() {
synchronize(
besuController,
p2pEnabled,
peerDiscoveryEnabled,
ethNetworkConfig,
maxPeers,
p2pHost,
p2pInterface,
p2pPort,
graphQLConfiguration,
jsonRpcConfiguration,
webSocketConfiguration,
apiConfiguration,
metricsConfiguration,
permissioningConfiguration,
staticNodes,
pidPath);
}
private BesuCommand startPlugins() {
besuPluginContext.addService(
BesuEvents.class,
new BesuEventsImpl(
besuController.getProtocolContext().getBlockchain(),
besuController.getProtocolManager().getBlockBroadcaster(),
besuController.getTransactionPool(),
besuController.getSyncState()));
besuPluginContext.addService(MetricsSystem.class, getMetricsSystem());
besuController.getAdditionalPluginServices().appendPluginServices(besuPluginContext);
besuPluginContext.startPlugins();
return this;
}
public void configureLogging(final boolean announce) {
// To change the configuration if color was enabled/disabled
Configurator.reconfigure();
// set log level per CLI flags
if (logLevel != null) {
if (announce) {
System.out.println("Setting logging level to " + logLevel.name());
}
Configurator.setAllLevels("", logLevel);
}
}
public static Optional<Boolean> getColorEnabled() {
return Optional.ofNullable(colorEnabled);
}
private void configureNativeLibs() {
if (unstableNativeLibraryOptions.getNativeAltbn128()) {
AbstractAltBnPrecompiledContract.enableNative();
}
if (unstableNativeLibraryOptions.getNativeSecp256k1()) {
SECP256K1.enableNative();
}
}
private BesuCommand validateOptions() {
issueOptionWarnings();
validateP2PInterface(p2pInterface);
validateMiningParams();
validateNatParams();
validateNetStatsParams();
validateDnsOptionsParams();
validateGoQuorumCompatibilityModeParam();
return this;
}
@SuppressWarnings("ConstantConditions")
private void validateMiningParams() {
if (isMiningEnabled && coinbase == null) {
throw new ParameterException(
this.commandLine,
"Unable to mine without a valid coinbase. Either disable mining (remove --miner-enabled) "
+ "or specify the beneficiary of mining (via --miner-coinbase <Address>)");
}
if (!isMiningEnabled && iStratumMiningEnabled) {
throw new ParameterException(
this.commandLine,
"Unable to mine with Stratum if mining is disabled. Either disable Stratum mining (remove --miner-stratum-enabled) "
+ "or specify mining is enabled (--miner-enabled)");
}
}
protected void validateP2PInterface(final String p2pInterface) {
final String failMessage = "The provided --p2p-interface is not available: " + p2pInterface;
try {
if (!NetworkUtility.isNetworkInterfaceAvailable(p2pInterface)) {
throw new ParameterException(commandLine, failMessage);
}
} catch (final UnknownHostException | SocketException e) {
throw new ParameterException(commandLine, failMessage, e);
}
}
@SuppressWarnings("ConstantConditions")
private void validateNatParams() {
if (!(natMethod.equals(NatMethod.AUTO) || natMethod.equals(NatMethod.KUBERNETES))
&& !unstableNatOptions
.getNatManagerServiceName()
.equals(DEFAULT_BESU_SERVICE_NAME_FILTER)) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-kube-service-name` parameter is only used in kubernetes mode. Either remove --Xnat-kube-service-name"
+ " or select the KUBERNETES mode (via --nat--method=KUBERNETES)");
}
if (natMethod.equals(NatMethod.AUTO) && !unstableNatOptions.getNatMethodFallbackEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-method-fallback-enabled` parameter cannot be used in AUTO mode. Either remove --Xnat-method-fallback-enabled"
+ " or select another mode (via --nat--method=XXXX)");
}
}
private void validateNetStatsParams() {
if (Strings.isNullOrEmpty(unstableEthstatsOptions.getEthstatsUrl())
&& !unstableEthstatsOptions.getEthstatsContact().isEmpty()) {
throw new ParameterException(
this.commandLine,
"The `--Xethstats-contact` requires ethstats server URL to be provided. Either remove --Xethstats-contact"
+ " or provide an url (via --Xethstats=nodename:secret@host:port)");
}
}
private void validateDnsOptionsParams() {
if (!unstableDnsOptions.getDnsEnabled() && unstableDnsOptions.getDnsUpdateEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xdns-update-enabled` requires dns to be enabled. Either remove --Xdns-update-enabled"
+ " or specify dns is enabled (--Xdns-enabled)");
}
}
private void validateGoQuorumCompatibilityModeParam() {
if (isGoQuorumCompatibilityMode) {
final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions();
if (!genesisConfigOptions.isQuorum()) {
throw new IllegalStateException(
"GoQuorum compatibility mode (enabled) can only be used if genesis file has 'isQuorum' flag set to true.");
}
}
}
private GenesisConfigOptions readGenesisConfigOptions() {
final GenesisConfigOptions genesisConfigOptions;
try {
final GenesisConfigFile genesisConfigFile = GenesisConfigFile.fromConfig(genesisConfig());
genesisConfigOptions = genesisConfigFile.getConfigOptions(genesisConfigOverrides);
} catch (Exception e) {
throw new IllegalStateException("Unable to read genesis file for GoQuorum options", e);
}
return genesisConfigOptions;
}
private void issueOptionWarnings() {
// Check that P2P options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--p2p-enabled",
!p2pEnabled,
asList(
"--bootnodes",
"--discovery-enabled",
"--max-peers",
"--banned-node-id",
"--banned-node-ids",
"--p2p-host",
"--p2p-interface",
"--p2p-port",
"--remote-connections-max-percentage"));
// Check that mining options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--miner-enabled",
!isMiningEnabled,
asList(
"--miner-coinbase",
"--min-gas-price",
"--min-block-occupancy-ratio",
"--miner-extra-data",
"--miner-stratum-enabled",
"--Xminer-remote-sealers-limit",
"--Xminer-remote-sealers-hashrate-ttl"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--sync-mode",
!SyncMode.FAST.equals(syncMode),
singletonList("--fast-sync-min-peers"));
if (!securityModuleName.equals(DEFAULT_SECURITY_MODULE) && nodePrivateKeyFile != null) {
logger.warn(
DEPENDENCY_WARNING_MSG,
"--node-private-key-file",
"--security-module=" + DEFAULT_SECURITY_MODULE);
}
}
private BesuCommand configure() throws Exception {
checkPortClash();
if (isGoQuorumCompatibilityMode) {
checkGoQuorumCompatibilityConfig();
}
syncMode =
Optional.ofNullable(syncMode)
.orElse(
genesisFile == null && !isPrivacyEnabled && network != NetworkName.DEV
? SyncMode.FAST
: SyncMode.FULL);
ethNetworkConfig = updateNetworkConfig(getNetwork());
jsonRpcConfiguration = jsonRpcConfiguration();
graphQLConfiguration = graphQLConfiguration();
webSocketConfiguration = webSocketConfiguration();
apiConfiguration = apiConfiguration();
// hostsWhitelist is a hidden option. If it is specified, add the list to hostAllowlist
if (!hostsWhitelist.isEmpty()) {
// if allowlist == default values, remove the default values
if (hostsAllowlist.size() == 2
&& hostsAllowlist.containsAll(List.of("localhost", "127.0.0.1"))) {
hostsAllowlist.removeAll(List.of("localhost", "127.0.0.1"));
}
hostsAllowlist.addAll(hostsWhitelist);
}
permissioningConfiguration = permissioningConfiguration();
staticNodes = loadStaticNodes();
logger.info("Connecting to {} static nodes.", staticNodes.size());
logger.trace("Static Nodes = {}", staticNodes);
final List<EnodeURL> enodeURIs = ethNetworkConfig.getBootNodes();
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(enodeURIs, p));
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(staticNodes, p));
metricsConfiguration = metricsConfiguration();
logger.info("Security Module: {}", securityModuleName);
return this;
}
private NetworkName getNetwork() {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
return network == null ? MAINNET : network;
}
private void ensureAllNodesAreInAllowlist(
final Collection<EnodeURL> enodeAddresses,
final LocalPermissioningConfiguration permissioningConfiguration) {
try {
PermissioningConfigurationValidator.areAllNodesAreInAllowlist(
enodeAddresses, permissioningConfiguration);
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage());
}
}
private BesuCommand controller() {
besuController = buildController();
return this;
}
public BesuController buildController() {
try {
return getControllerBuilder().build();
} catch (final Exception e) {
throw new ExecutionException(this.commandLine, e.getMessage(), e);
}
}
public BesuControllerBuilder getControllerBuilder() {
return controllerBuilderFactory
.fromEthNetworkConfig(updateNetworkConfig(getNetwork()), genesisConfigOverrides)
.synchronizerConfiguration(buildSyncConfig())
.ethProtocolConfiguration(unstableEthProtocolOptions.toDomainObject())
.dataDirectory(dataDir())
.miningParameters(
new MiningParameters(
coinbase,
minTransactionGasPrice,
extraData,
isMiningEnabled,
iStratumMiningEnabled,
stratumNetworkInterface,
stratumPort,
unstableMiningOptions.getStratumExtranonce(),
Optional.empty(),
minBlockOccupancyRatio,
unstableMiningOptions.getRemoteSealersLimit(),
unstableMiningOptions.getRemoteSealersTimeToLive()))
.transactionPoolConfiguration(buildTransactionPoolConfiguration())
.nodeKey(buildNodeKey())
.metricsSystem(metricsSystem.get())
.privacyParameters(privacyParameters())
.clock(Clock.systemUTC())
.isRevertReasonEnabled(isRevertReasonEnabled)
.storageProvider(keyStorageProvider(keyValueStorageName))
.isPruningEnabled(isPruningEnabled())
.pruningConfiguration(
new PrunerConfiguration(pruningBlockConfirmations, pruningBlocksRetained))
.genesisConfigOverrides(genesisConfigOverrides)
.gasLimitCalculator(
Optional.ofNullable(targetGasLimit)
.<GasLimitCalculator>map(TargetingGasLimitCalculator::new)
.orElse(GasLimitCalculator.constant()))
.requiredBlocks(requiredBlocks)
.reorgLoggingThreshold(reorgLoggingThreshold);
}
private GraphQLConfiguration graphQLConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--graphql-http-enabled",
!isGraphQLHttpEnabled,
asList("--graphql-http-cors-origins", "--graphql-http-host", "--graphql-http-port"));
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(isGraphQLHttpEnabled);
graphQLConfiguration.setHost(graphQLHttpHost);
graphQLConfiguration.setPort(graphQLHttpPort);
graphQLConfiguration.setHostsAllowlist(hostsAllowlist);
graphQLConfiguration.setCorsAllowedDomains(graphQLHttpCorsAllowedOrigins);
graphQLConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return graphQLConfiguration;
}
private JsonRpcConfiguration jsonRpcConfiguration() {
checkRpcTlsClientAuthOptionsDependencies();
checkRpcTlsOptionsDependencies();
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-enabled",
!isRpcHttpEnabled,
asList(
"--rpc-http-api",
"--rpc-http-apis",
"--rpc-http-cors-origins",
"--rpc-http-host",
"--rpc-http-port",
"--rpc-http-authentication-enabled",
"--rpc-http-authentication-credentials-file",
"--rpc-http-authentication-public-key-file",
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
if (isRpcHttpAuthenticationEnabled
&& rpcHttpAuthenticationCredentialsFile() == null
&& rpcHttpAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file");
}
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(isRpcHttpEnabled);
jsonRpcConfiguration.setHost(rpcHttpHost);
jsonRpcConfiguration.setPort(rpcHttpPort);
jsonRpcConfiguration.setCorsAllowedDomains(rpcHttpCorsAllowedOrigins);
jsonRpcConfiguration.setRpcApis(rpcHttpApis.stream().distinct().collect(Collectors.toList()));
jsonRpcConfiguration.setHostsAllowlist(hostsAllowlist);
jsonRpcConfiguration.setAuthenticationEnabled(isRpcHttpAuthenticationEnabled);
jsonRpcConfiguration.setAuthenticationCredentialsFile(rpcHttpAuthenticationCredentialsFile());
jsonRpcConfiguration.setAuthenticationPublicKeyFile(rpcHttpAuthenticationPublicKeyFile);
jsonRpcConfiguration.setTlsConfiguration(rpcHttpTlsConfiguration());
jsonRpcConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return jsonRpcConfiguration;
}
private void checkRpcTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-enabled",
!isRpcHttpTlsEnabled,
asList(
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
}
private void checkRpcTlsClientAuthOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-client-auth-enabled",
!isRpcHttpTlsClientAuthEnabled,
asList("--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled"));
}
private void checkPrivacyTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-tls-enabled",
!isPrivacyTlsEnabled,
asList(
"--privacy-tls-keystore-file",
"--privacy-tls-keystore-password-file",
"--privacy-tls-known-enclave-file"));
}
private Optional<TlsConfiguration> rpcHttpTlsConfiguration() {
if (!isRpcTlsConfigurationRequired()) {
return Optional.empty();
}
if (rpcHttpTlsKeyStoreFile == null) {
throw new ParameterException(
commandLine, "Keystore file is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (rpcHttpTlsKeyStorePasswordFile == null) {
throw new ParameterException(
commandLine,
"File containing password to unlock keystore is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (isRpcHttpTlsClientAuthEnabled
&& !isRpcHttpTlsCAClientsEnabled
&& rpcHttpTlsKnownClientsFile == null) {
throw new ParameterException(
commandLine,
"Known-clients file must be specified or CA clients must be enabled when TLS client authentication is enabled for JSON-RPC HTTP endpoint");
}
return Optional.of(
TlsConfiguration.Builder.aTlsConfiguration()
.withKeyStorePath(rpcHttpTlsKeyStoreFile)
.withKeyStorePasswordSupplier(
new FileBasedPasswordProvider(rpcHttpTlsKeyStorePasswordFile))
.withClientAuthConfiguration(rpcHttpTlsClientAuthConfiguration())
.build());
}
private TlsClientAuthConfiguration rpcHttpTlsClientAuthConfiguration() {
if (isRpcHttpTlsClientAuthEnabled) {
return TlsClientAuthConfiguration.Builder.aTlsClientAuthConfiguration()
.withKnownClientsFile(rpcHttpTlsKnownClientsFile)
.withCaClientsEnabled(isRpcHttpTlsCAClientsEnabled)
.build();
}
return null;
}
private boolean isRpcTlsConfigurationRequired() {
return isRpcHttpEnabled && isRpcHttpTlsEnabled;
}
private WebSocketConfiguration webSocketConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-enabled",
!isRpcWsEnabled,
asList(
"--rpc-ws-api",
"--rpc-ws-apis",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-authentication-enabled",
"--rpc-ws-authentication-credentials-file",
"--rpc-ws-authentication-public-key-file"));
if (isRpcWsAuthenticationEnabled
&& rpcWsAuthenticationCredentialsFile() == null
&& rpcWsAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(isRpcWsEnabled);
webSocketConfiguration.setHost(rpcWsHost);
webSocketConfiguration.setPort(rpcWsPort);
webSocketConfiguration.setRpcApis(rpcWsApis);
webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled);
webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile());
webSocketConfiguration.setHostsAllowlist(hostsAllowlist);
webSocketConfiguration.setAuthenticationPublicKeyFile(rpcWsAuthenticationPublicKeyFile);
webSocketConfiguration.setTimeoutSec(unstableRPCOptions.getWsTimeoutSec());
return webSocketConfiguration;
}
private ApiConfiguration apiConfiguration() {
return ImmutableApiConfiguration.builder()
.gasPriceBlocks(apiGasPriceBlocks)
.gasPricePercentile(apiGasPricePercentile)
.gasPriceMin(minTransactionGasPrice.toLong())
.gasPriceMax(apiGasPriceMax)
.build();
}
public MetricsConfiguration metricsConfiguration() {
if (isMetricsEnabled && isMetricsPushEnabled) {
throw new ParameterException(
this.commandLine,
"--metrics-enabled option and --metrics-push-enabled option can't be used at the same "
+ "time. Please refer to CLI reference for more details about this constraint.");
}
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-enabled",
!isMetricsEnabled,
asList("--metrics-host", "--metrics-port"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-push-enabled",
!isMetricsPushEnabled,
asList(
"--metrics-push-host",
"--metrics-push-port",
"--metrics-push-interval",
"--metrics-push-prometheus-job"));
return unstableMetricsCLIOptions
.toDomainObject()
.enabled(isMetricsEnabled)
.host(metricsHost)
.port(metricsPort)
.protocol(metricsProtocol)
.metricCategories(metricCategories)
.pushEnabled(isMetricsPushEnabled)
.pushHost(metricsPushHost)
.pushPort(metricsPushPort)
.pushInterval(metricsPushInterval)
.hostsAllowlist(hostsAllowlist)
.prometheusJob(metricsPrometheusJob)
.build();
}
private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception {
if (!(localPermissionsEnabled() || contractPermissionsEnabled())) {
if (rpcHttpApis.contains(RpcApis.PERM) || rpcWsApis.contains(RpcApis.PERM)) {
logger.warn(
"Permissions are disabled. Cannot enable PERM APIs when not using Permissions.");
}
return Optional.empty();
}
final Optional<LocalPermissioningConfiguration> localPermissioningConfigurationOptional;
if (localPermissionsEnabled()) {
final Optional<String> nodePermissioningConfigFile =
Optional.ofNullable(nodePermissionsConfigFile);
final Optional<String> accountPermissioningConfigFile =
Optional.ofNullable(accountPermissionsConfigFile);
final LocalPermissioningConfiguration localPermissioningConfiguration =
PermissioningConfigurationBuilder.permissioningConfiguration(
permissionsNodesEnabled,
getEnodeDnsConfiguration(),
nodePermissioningConfigFile.orElse(getDefaultPermissioningFilePath()),
permissionsAccountsEnabled,
accountPermissioningConfigFile.orElse(getDefaultPermissioningFilePath()));
localPermissioningConfigurationOptional = Optional.of(localPermissioningConfiguration);
} else {
if (nodePermissionsConfigFile != null && !permissionsNodesEnabled) {
logger.warn(
"Node permissioning config file set {} but no permissions enabled",
nodePermissionsConfigFile);
}
if (accountPermissionsConfigFile != null && !permissionsAccountsEnabled) {
logger.warn(
"Account permissioning config file set {} but no permissions enabled",
accountPermissionsConfigFile);
}
localPermissioningConfigurationOptional = Optional.empty();
}
final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration =
SmartContractPermissioningConfiguration.createDefault();
if (permissionsNodesContractEnabled) {
if (permissionsNodesContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No node permissioning contract address specified. Cannot enable smart contract based node permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractNodeAllowlistEnabled(
permissionsNodesContractEnabled);
smartContractPermissioningConfiguration.setNodeSmartContractAddress(
permissionsNodesContractAddress);
smartContractPermissioningConfiguration.setNodeSmartContractInterfaceVersion(
permissionsNodesContractVersion);
}
} else if (permissionsNodesContractAddress != null) {
logger.warn(
"Node permissioning smart contract address set {} but smart contract node permissioning is disabled.",
permissionsNodesContractAddress);
}
if (permissionsAccountsContractEnabled) {
if (permissionsAccountsContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No account permissioning contract address specified. Cannot enable smart contract based account permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractAccountAllowlistEnabled(
permissionsAccountsContractEnabled);
smartContractPermissioningConfiguration.setAccountSmartContractAddress(
permissionsAccountsContractAddress);
}
} else if (permissionsAccountsContractAddress != null) {
logger.warn(
"Account permissioning smart contract address set {} but smart contract account permissioning is disabled.",
permissionsAccountsContractAddress);
}
final PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
localPermissioningConfigurationOptional,
Optional.of(smartContractPermissioningConfiguration),
quorumPermissioningConfig());
return Optional.of(permissioningConfiguration);
}
private Optional<QuorumPermissioningConfiguration> quorumPermissioningConfig() {
if (!isGoQuorumCompatibilityMode) {
return Optional.empty();
}
try {
final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions();
final OptionalLong qip714BlockNumber = genesisConfigOptions.getQip714BlockNumber();
return Optional.of(
QuorumPermissioningConfiguration.enabled(qip714BlockNumber.orElse(QIP714_DEFAULT_BLOCK)));
} catch (Exception e) {
throw new IllegalStateException("Error reading GoQuorum permissioning options", e);
}
}
private boolean localPermissionsEnabled() {
return permissionsAccountsEnabled || permissionsNodesEnabled;
}
private boolean contractPermissionsEnabled() {
return permissionsNodesContractEnabled || permissionsAccountsContractEnabled;
}
private PrivacyParameters privacyParameters() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-enabled",
!isPrivacyEnabled,
asList(
"--privacy-url",
"--privacy-public-key-file",
"--privacy-multi-tenancy-enabled",
"--privacy-tls-enabled"));
checkPrivacyTlsOptionsDependencies();
final PrivacyParameters.Builder privacyParametersBuilder = new PrivacyParameters.Builder();
if (isPrivacyEnabled) {
final String errorSuffix = "cannot be enabled with privacy.";
if (syncMode == SyncMode.FAST) {
throw new ParameterException(commandLine, String.format("%s %s", "Fast sync", errorSuffix));
}
if (isPruningEnabled()) {
throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix));
}
if (isPrivacyMultiTenancyEnabled
&& !jsonRpcConfiguration.isAuthenticationEnabled()
&& !webSocketConfiguration.isAuthenticationEnabled()) {
throw new ParameterException(
commandLine,
"Privacy multi-tenancy requires either http authentication to be enabled or WebSocket authentication to be enabled");
}
privacyParametersBuilder.setEnabled(true);
privacyParametersBuilder.setEnclaveUrl(privacyUrl);
privacyParametersBuilder.setMultiTenancyEnabled(isPrivacyMultiTenancyEnabled);
privacyParametersBuilder.setOnchainPrivacyGroupsEnabled(isFlexiblePrivacyGroupsEnabled);
final boolean hasPrivacyPublicKey = privacyPublicKeyFile != null;
if (hasPrivacyPublicKey && !isPrivacyMultiTenancyEnabled) {
try {
privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile);
} catch (final IOException e) {
throw new ParameterException(
commandLine, "Problem with privacy-public-key-file: " + e.getMessage(), e);
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Contents of privacy-public-key-file invalid: " + e.getMessage(), e);
}
} else if (hasPrivacyPublicKey) {
throw new ParameterException(
commandLine, "Privacy multi-tenancy and privacy public key cannot be used together");
} else if (!isPrivacyMultiTenancyEnabled) {
throw new ParameterException(
commandLine, "Please specify Enclave public key file path to enable privacy");
}
if (Wei.ZERO.compareTo(minTransactionGasPrice) < 0) {
// if gas is required, cannot use random keys to sign private tx
// ie --privacy-marker-transaction-signing-key-file must be set
if (privacyMarkerTransactionSigningKeyPath == null) {
throw new ParameterException(
commandLine,
"Not a free gas network. --privacy-marker-transaction-signing-key-file must be specified and must be a funded account. Private transactions cannot be signed by random (non-funded) accounts in paid gas networks");
}
}
if (!Address.PRIVACY.equals(privacyPrecompiledAddress)) {
logger.warn(
"--privacy-precompiled-address option is deprecated. This address is derived, based on --privacy-onchain-groups-enabled.");
}
privacyParametersBuilder.setPrivateKeyPath(privacyMarkerTransactionSigningKeyPath);
privacyParametersBuilder.setStorageProvider(
privacyKeyStorageProvider(keyValueStorageName + "-privacy"));
if (isPrivacyTlsEnabled) {
privacyParametersBuilder.setPrivacyKeyStoreFile(privacyKeyStoreFile);
privacyParametersBuilder.setPrivacyKeyStorePasswordFile(privacyKeyStorePasswordFile);
privacyParametersBuilder.setPrivacyTlsKnownEnclaveFile(privacyTlsKnownEnclaveFile);
}
privacyParametersBuilder.setEnclaveFactory(new EnclaveFactory(vertx));
} else {
if (anyPrivacyApiEnabled()) {
logger.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
}
}
final PrivacyParameters privacyParameters = privacyParametersBuilder.build();
if (isPrivacyEnabled) {
preSynchronizationTaskRunner.addTask(
new PrivateDatabaseMigrationPreSyncTask(privacyParameters, migratePrivateDatabase));
}
return privacyParameters;
}
private boolean anyPrivacyApiEnabled() {
return rpcHttpApis.contains(RpcApis.EEA)
|| rpcWsApis.contains(RpcApis.EEA)
|| rpcHttpApis.contains(RpcApis.PRIV)
|| rpcWsApis.contains(RpcApis.PRIV);
}
private PrivacyKeyValueStorageProvider privacyKeyStorageProvider(final String name) {
return new PrivacyKeyValueStorageProviderBuilder()
.withStorageFactory(privacyKeyValueStorageFactory(name))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
private PrivacyKeyValueStorageFactory privacyKeyValueStorageFactory(final String name) {
return (PrivacyKeyValueStorageFactory)
storageService
.getByName(name)
.orElseThrow(
() -> new StorageException("No KeyValueStorageFactory found for key: " + name));
}
private KeyValueStorageProvider keyStorageProvider(final String name) {
return new KeyValueStorageProviderBuilder()
.withStorageFactory(
storageService
.getByName(name)
.orElseThrow(
() -> new StorageException("No KeyValueStorageFactory found for key: " + name)))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
private SynchronizerConfiguration buildSyncConfig() {
return unstableSynchronizerOptions
.toDomainObject()
.syncMode(syncMode)
.fastSyncMinimumPeerCount(fastSyncMinPeerCount)
.build();
}
private TransactionPoolConfiguration buildTransactionPoolConfiguration() {
return unstableTransactionPoolOptions
.toDomainObject()
.txPoolMaxSize(txPoolMaxSize)
.pooledTransactionHashesSize(pooledTransactionHashesSize)
.pendingTxRetentionPeriod(pendingTxRetentionPeriod)
.priceBump(Percentage.fromInt(priceBump))
.txFeeCap(txFeeCap)
.build();
}
private boolean isPruningEnabled() {
return pruningEnabled;
}
// Blockchain synchronisation from peers.
private void synchronize(
final BesuController controller,
final boolean p2pEnabled,
final boolean peerDiscoveryEnabled,
final EthNetworkConfig ethNetworkConfig,
final int maxPeers,
final String p2pAdvertisedHost,
final String p2pListenInterface,
final int p2pListenPort,
final GraphQLConfiguration graphQLConfiguration,
final JsonRpcConfiguration jsonRpcConfiguration,
final WebSocketConfiguration webSocketConfiguration,
final ApiConfiguration apiConfiguration,
final MetricsConfiguration metricsConfiguration,
final Optional<PermissioningConfiguration> permissioningConfiguration,
final Collection<EnodeURL> staticNodes,
final Path pidPath) {
checkNotNull(runnerBuilder);
permissioningConfiguration.ifPresent(runnerBuilder::permissioningConfiguration);
final ObservableMetricsSystem metricsSystem = this.metricsSystem.get();
final Runner runner =
runnerBuilder
.vertx(vertx)
.besuController(controller)
.p2pEnabled(p2pEnabled)
.natMethod(natMethod)
.natManagerServiceName(unstableNatOptions.getNatManagerServiceName())
.natMethodFallbackEnabled(unstableNatOptions.getNatMethodFallbackEnabled())
.discovery(peerDiscoveryEnabled)
.ethNetworkConfig(ethNetworkConfig)
.p2pAdvertisedHost(p2pAdvertisedHost)
.p2pListenInterface(p2pListenInterface)
.p2pListenPort(p2pListenPort)
.maxPeers(maxPeers)
.limitRemoteWireConnectionsEnabled(isLimitRemoteWireConnectionsEnabled)
.fractionRemoteConnectionsAllowed(
Fraction.fromPercentage(maxRemoteConnectionsPercentage).getValue())
.randomPeerPriority(randomPeerPriority)
.networkingConfiguration(unstableNetworkingOptions.toDomainObject())
.graphQLConfiguration(graphQLConfiguration)
.jsonRpcConfiguration(jsonRpcConfiguration)
.webSocketConfiguration(webSocketConfiguration)
.apiConfiguration(apiConfiguration)
.pidPath(pidPath)
.dataDir(dataDir())
.bannedNodeIds(bannedNodeIds)
.metricsSystem(metricsSystem)
.metricsConfiguration(metricsConfiguration)
.staticNodes(staticNodes)
.identityString(identityString)
.besuPluginContext(besuPluginContext)
.autoLogBloomCaching(autoLogBloomCachingEnabled)
.ethstatsUrl(unstableEthstatsOptions.getEthstatsUrl())
.ethstatsContact(unstableEthstatsOptions.getEthstatsContact())
.build();
addShutdownHook(runner);
runner.start();
runner.awaitStop();
}
protected Vertx createVertx(final VertxOptions vertxOptions) {
return Vertx.vertx(vertxOptions);
}
private VertxOptions createVertxOptions(final MetricsSystem metricsSystem) {
return new VertxOptions()
.setMetricsOptions(
new MetricsOptions()
.setEnabled(true)
.setFactory(new VertxMetricsAdapterFactory(metricsSystem)));
}
private void addShutdownHook(final Runner runner) {
Runtime.getRuntime()
.addShutdownHook(
new Thread(
() -> {
try {
besuPluginContext.stopPlugins();
runner.close();
LogManager.shutdown();
} catch (final Exception e) {
logger.error("Failed to stop Besu");
}
}));
}
// Used to discover the default IP of the client.
// Loopback IP is used by default as this is how smokeTests require it to be
// and it's probably a good security behaviour to default only on the localhost.
private InetAddress autoDiscoverDefaultIP() {
if (autoDiscoveredDefaultIP != null) {
return autoDiscoveredDefaultIP;
}
autoDiscoveredDefaultIP = InetAddress.getLoopbackAddress();
return autoDiscoveredDefaultIP;
}
private EthNetworkConfig updateNetworkConfig(final NetworkName network) {
final EthNetworkConfig.Builder builder =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(network));
// custom genesis file use comes with specific default values for the genesis
// file itself
// but also for the network id and the bootnodes list.
if (genesisFile != null) {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
if (this.network != null) {
// We check if network option was really provided by user and not only looking
// at the
// default value.
// if user provided it and provided the genesis file option at the same time, it
// raises a
// conflict error
throw new ParameterException(
this.commandLine,
"--network option and --genesis-file option can't be used at the same time. Please "
+ "refer to CLI reference for more details about this constraint.");
}
builder.setGenesisConfig(genesisConfig());
if (networkId == null) {
// if no network id option is defined on the CLI we have to set a default value
// from the
// genesis file.
// We do the genesis parsing only in this case as we already have network id
// constants
// for known networks to speed up the process.
// Also we have to parse the genesis as we don't already have a parsed version
// at this
// stage.
// If no chain id is found in the genesis as it's an optional, we use mainnet
// network id.
try {
builder.setNetworkId(
getGenesisConfigFile()
.getConfigOptions(genesisConfigOverrides)
.getChainId()
.orElse(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId()));
} catch (final DecodeException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to parse genesis file %s.", genesisFile), e);
} catch (final ArithmeticException e) {
throw new ParameterException(
this.commandLine,
"No networkId specified and chainId in "
+ "genesis file is too large to be used as a networkId");
}
}
if (bootNodes == null) {
// We default to an empty bootnodes list if the option is not provided on CLI
// because
// mainnet bootnodes won't work as the default value for a custom genesis,
// so it's better to have an empty list as default value that forces to create a
// custom one
// than a useless one that may make user think that it can work when it can't.
builder.setBootNodes(new ArrayList<>());
}
builder.setDnsDiscoveryUrl(null);
}
if (networkId != null) {
builder.setNetworkId(networkId);
}
if (bootNodes != null) {
try {
final List<EnodeURL> listBootNodes =
bootNodes.stream()
.filter(value -> !value.isEmpty())
.map(url -> EnodeURL.fromString(url, getEnodeDnsConfiguration()))
.collect(Collectors.toList());
DiscoveryConfiguration.assertValidBootnodes(listBootNodes);
builder.setBootNodes(listBootNodes);
} catch (final IllegalArgumentException e) {
throw new ParameterException(commandLine, e.getMessage());
}
}
return builder.build();
}
private GenesisConfigFile getGenesisConfigFile() {
return GenesisConfigFile.fromConfig(genesisConfig());
}
private String genesisConfig() {
try {
return Resources.toString(genesisFile.toURI().toURL(), UTF_8);
} catch (final IOException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to load genesis file %s.", genesisFile), e);
}
}
// dataDir() is public because it is accessed by subcommands
public Path dataDir() {
return dataPath.toAbsolutePath();
}
private Path pluginsDir() {
final String pluginsDir = System.getProperty("besu.plugins.dir");
if (pluginsDir == null) {
return new File(System.getProperty("besu.home", "."), "plugins").toPath();
} else {
return new File(pluginsDir).toPath();
}
}
@VisibleForTesting
NodeKey buildNodeKey() {
return new NodeKey(securityModule());
}
private SecurityModule securityModule() {
return securityModuleService
.getByName(securityModuleName)
.orElseThrow(() -> new RuntimeException("Security Module not found: " + securityModuleName))
.get();
}
private File nodePrivateKeyFile() {
return Optional.ofNullable(nodePrivateKeyFile)
.orElseGet(() -> KeyPairUtil.getDefaultKeyFile(dataDir()));
}
private String rpcHttpAuthenticationCredentialsFile() {
final String filename = rpcHttpAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "HTTP");
}
return filename;
}
private String rpcWsAuthenticationCredentialsFile() {
final String filename = rpcWsAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "WS");
}
return filename;
}
private String getDefaultPermissioningFilePath() {
return dataDir()
+ System.getProperty("file.separator")
+ DefaultCommandValues.PERMISSIONING_CONFIG_LOCATION;
}
public MetricsSystem getMetricsSystem() {
return metricsSystem.get();
}
private Set<EnodeURL> loadStaticNodes() throws IOException {
final String staticNodesFilename = "static-nodes.json";
final Path staticNodesPath = dataDir().resolve(staticNodesFilename);
return StaticNodesParser.fromPath(staticNodesPath, getEnodeDnsConfiguration());
}
public BesuExceptionHandler exceptionHandler() {
return new BesuExceptionHandler(this::getLogLevel);
}
public EnodeDnsConfiguration getEnodeDnsConfiguration() {
if (enodeDnsConfiguration == null) {
enodeDnsConfiguration = unstableDnsOptions.toDomainObject();
}
return enodeDnsConfiguration;
}
private void checkPortClash() {
// List of port parameters
final List<Integer> ports =
asList(
p2pPort,
graphQLHttpPort,
rpcHttpPort,
rpcWsPort,
metricsPort,
metricsPushPort,
stratumPort);
ports.stream()
.filter(Objects::nonNull)
.forEach(
port -> {
if (port != 0 && !allocatedPorts.add(port)) {
throw new ParameterException(
commandLine,
"Port number '"
+ port
+ "' has been specified multiple times. Please review the supplied configuration.");
}
});
}
private void checkGoQuorumCompatibilityConfig() {
if (genesisFile != null
&& getGenesisConfigFile().getConfigOptions().isQuorum()
&& !minTransactionGasPrice.isZero()) {
throw new ParameterException(
this.commandLine,
"--min-gas-price must be set to zero if GoQuorum compatibility is enabled in the genesis config.");
}
}
@VisibleForTesting
Level getLogLevel() {
return logLevel;
}
private class BesuCommandConfigurationService implements BesuConfiguration {
@Override
public Path getStoragePath() {
return dataDir().resolve(DATABASE_PATH);
}
@Override
public Path getDataPath() {
return dataDir();
}
}
}
| 1 | 23,984 | what needs to happen if it is not present? | hyperledger-besu | java |
@@ -88,6 +88,11 @@ func (e *deployExecutor) Execute(sig executor.StopSignal) model.StageStatus {
return model.StageStatus_STAGE_FAILURE
}
+ chartRepoName := e.deployCfg.Input.HelmChart.Repository
+ if chartRepoName != "" {
+ e.deployCfg.Input.HelmChart.Insecure = ds.DeploymentConfig.PipedSpec.IsInsecureChartRepository(chartRepoName)
+ }
+
e.provider = provider.NewProvider(e.Deployment.ApplicationName, ds.AppDir, ds.RepoDir, e.Deployment.GitPath.ConfigFilename, e.deployCfg.Input, e.Logger)
e.Logger.Info("start executing kubernetes stage",
zap.String("stage-name", e.Stage.Name), | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"context"
"errors"
"fmt"
"strings"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
provider "github.com/pipe-cd/pipe/pkg/app/piped/cloudprovider/kubernetes"
"github.com/pipe-cd/pipe/pkg/app/piped/executor"
"github.com/pipe-cd/pipe/pkg/cache"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
variantLabel = "pipecd.dev/variant" // Variant name: primary, stage, baseline
)
type deployExecutor struct {
executor.Input
commit string
deployCfg *config.KubernetesDeploymentSpec
provider provider.Provider
}
type registerer interface {
Register(stage model.Stage, f executor.Factory) error
RegisterRollback(kind model.ApplicationKind, f executor.Factory) error
}
// Register registers this executor factory into a given registerer.
func Register(r registerer) {
f := func(in executor.Input) executor.Executor {
return &deployExecutor{
Input: in,
}
}
r.Register(model.StageK8sSync, f)
r.Register(model.StageK8sPrimaryRollout, f)
r.Register(model.StageK8sCanaryRollout, f)
r.Register(model.StageK8sCanaryClean, f)
r.Register(model.StageK8sBaselineRollout, f)
r.Register(model.StageK8sBaselineClean, f)
r.Register(model.StageK8sTrafficRouting, f)
r.RegisterRollback(model.ApplicationKind_KUBERNETES, func(in executor.Input) executor.Executor {
return &rollbackExecutor{
Input: in,
}
})
}
func (e *deployExecutor) Execute(sig executor.StopSignal) model.StageStatus {
ctx := sig.Context()
e.commit = e.Deployment.Trigger.Commit.Hash
ds, err := e.TargetDSP.Get(ctx, e.LogPersister)
if err != nil {
e.LogPersister.Errorf("Failed to prepare target deploy source data (%v)", err)
return model.StageStatus_STAGE_FAILURE
}
e.deployCfg = ds.DeploymentConfig.KubernetesDeploymentSpec
if e.deployCfg == nil {
e.LogPersister.Error("Malformed deployment configuration: missing KubernetesDeploymentSpec")
return model.StageStatus_STAGE_FAILURE
}
e.provider = provider.NewProvider(e.Deployment.ApplicationName, ds.AppDir, ds.RepoDir, e.Deployment.GitPath.ConfigFilename, e.deployCfg.Input, e.Logger)
e.Logger.Info("start executing kubernetes stage",
zap.String("stage-name", e.Stage.Name),
zap.String("app-dir", ds.AppDir),
)
var (
originalStatus = e.Stage.Status
status model.StageStatus
)
switch model.Stage(e.Stage.Name) {
case model.StageK8sSync:
status = e.ensureSync(ctx)
case model.StageK8sPrimaryRollout:
status = e.ensurePrimaryRollout(ctx)
case model.StageK8sCanaryRollout:
status = e.ensureCanaryRollout(ctx)
case model.StageK8sCanaryClean:
status = e.ensureCanaryClean(ctx)
case model.StageK8sBaselineRollout:
status = e.ensureBaselineRollout(ctx)
case model.StageK8sBaselineClean:
status = e.ensureBaselineClean(ctx)
case model.StageK8sTrafficRouting:
status = e.ensureTrafficRouting(ctx)
default:
e.LogPersister.Errorf("Unsupported stage %s for kubernetes application", e.Stage.Name)
return model.StageStatus_STAGE_FAILURE
}
return executor.DetermineStageStatus(sig.Signal(), originalStatus, status)
}
func (e *deployExecutor) loadRunningManifests(ctx context.Context) (manifests []provider.Manifest, err error) {
commit := e.Deployment.RunningCommitHash
if commit == "" {
return nil, fmt.Errorf("unable to determine running commit")
}
loader := &manifestsLoadFunc{
loadFunc: func(ctx context.Context) ([]provider.Manifest, error) {
ds, err := e.RunningDSP.Get(ctx, e.LogPersister)
if err != nil {
e.LogPersister.Errorf("Failed to prepare running deploy source (%v)", err)
return nil, err
}
loader := provider.NewManifestLoader(
e.Deployment.ApplicationName,
ds.AppDir,
ds.RepoDir,
e.Deployment.GitPath.ConfigFilename,
e.deployCfg.Input,
e.Logger,
)
return loader.LoadManifests(ctx)
},
}
return loadManifests(ctx, e.Deployment.ApplicationId, commit, e.AppManifestsCache, loader, e.Logger)
}
type manifestsLoadFunc struct {
loadFunc func(context.Context) ([]provider.Manifest, error)
}
func (l *manifestsLoadFunc) LoadManifests(ctx context.Context) ([]provider.Manifest, error) {
return l.loadFunc(ctx)
}
func loadManifests(ctx context.Context, appID, commit string, manifestsCache cache.Cache, loader provider.ManifestLoader, logger *zap.Logger) (manifests []provider.Manifest, err error) {
cache := provider.AppManifestsCache{
AppID: appID,
Cache: manifestsCache,
Logger: logger,
}
manifests, ok := cache.Get(commit)
if ok {
return manifests, nil
}
// When the manifests were not in the cache we have to load them.
if manifests, err = loader.LoadManifests(ctx); err != nil {
return nil, err
}
cache.Put(commit, manifests)
return manifests, nil
}
func addBuiltinAnnontations(manifests []provider.Manifest, variant, hash, pipedID, appID string) {
for i := range manifests {
manifests[i].AddAnnotations(map[string]string{
provider.LabelManagedBy: provider.ManagedByPiped,
provider.LabelPiped: pipedID,
provider.LabelApplication: appID,
variantLabel: variant,
provider.LabelOriginalAPIVersion: manifests[i].Key.APIVersion,
provider.LabelResourceKey: manifests[i].Key.String(),
provider.LabelCommitHash: hash,
})
}
}
func applyManifests(ctx context.Context, applier provider.Applier, manifests []provider.Manifest, namespace string, lp executor.LogPersister) error {
if namespace == "" {
lp.Infof("Start applying %d manifests", len(manifests))
} else {
lp.Infof("Start applying %d manifests to %q namespace", len(manifests), namespace)
}
for _, m := range manifests {
if err := applier.ApplyManifest(ctx, m); err != nil {
lp.Errorf("Failed to apply manifest: %s (%v)", m.Key.ReadableString(), err)
return err
}
lp.Successf("- applied manifest: %s", m.Key.ReadableString())
}
lp.Successf("Successfully applied %d manifests", len(manifests))
return nil
}
func deleteResources(ctx context.Context, applier provider.Applier, resources []provider.ResourceKey, lp executor.LogPersister) error {
resourcesLen := len(resources)
if resourcesLen == 0 {
lp.Info("No resources to delete")
return nil
}
lp.Infof("Start deleting %d resources", len(resources))
var deletedCount int
for _, k := range resources {
err := applier.Delete(ctx, k)
if err == nil {
lp.Successf("- deleted resource: %s", k.ReadableString())
deletedCount++
continue
}
if errors.Is(err, provider.ErrNotFound) {
lp.Infof("- no resource %s to delete", k.ReadableString())
deletedCount++
continue
}
lp.Errorf("- unable to delete resource: %s (%v)", k.ReadableString(), err)
}
if deletedCount < resourcesLen {
lp.Infof("Deleted %d/%d resources", deletedCount, resourcesLen)
return fmt.Errorf("unable to delete %d resources", resourcesLen-deletedCount)
}
lp.Successf("Successfully deleted %d resources", len(resources))
return nil
}
func findManifests(kind, name string, manifests []provider.Manifest) []provider.Manifest {
var out []provider.Manifest
for _, m := range manifests {
if m.Key.Kind != kind {
continue
}
if name != "" && m.Key.Name != name {
continue
}
out = append(out, m)
}
return out
}
func findConfigMapManifests(manifests []provider.Manifest) []provider.Manifest {
var out []provider.Manifest
for _, m := range manifests {
if !m.Key.IsConfigMap() {
continue
}
out = append(out, m)
}
return out
}
func findSecretManifests(manifests []provider.Manifest) []provider.Manifest {
var out []provider.Manifest
for _, m := range manifests {
if !m.Key.IsSecret() {
continue
}
out = append(out, m)
}
return out
}
func findWorkloadManifests(manifests []provider.Manifest, refs []config.K8sResourceReference) []provider.Manifest {
if len(refs) == 0 {
return findManifests(provider.KindDeployment, "", manifests)
}
workloads := make([]provider.Manifest, 0)
for _, ref := range refs {
kind := provider.KindDeployment
if ref.Kind != "" {
kind = ref.Kind
}
ms := findManifests(kind, ref.Name, manifests)
workloads = append(workloads, ms...)
}
return workloads
}
func duplicateManifests(manifests []provider.Manifest, nameSuffix string) []provider.Manifest {
out := make([]provider.Manifest, 0, len(manifests))
for _, m := range manifests {
out = append(out, duplicateManifest(m, nameSuffix))
}
return out
}
func duplicateManifest(m provider.Manifest, nameSuffix string) provider.Manifest {
name := makeSuffixedName(m.Key.Name, nameSuffix)
return m.Duplicate(name)
}
func generateVariantServiceManifests(services []provider.Manifest, variant, nameSuffix string) ([]provider.Manifest, error) {
manifests := make([]provider.Manifest, 0, len(services))
updateService := func(s *corev1.Service) {
s.Name = makeSuffixedName(s.Name, nameSuffix)
// Currently, we suppose that all generated services should be ClusterIP.
s.Spec.Type = corev1.ServiceTypeClusterIP
// Append the variant label to the selector
// to ensure that the generated service is using only workloads of this variant.
if s.Spec.Selector == nil {
s.Spec.Selector = map[string]string{}
}
s.Spec.Selector[variantLabel] = variant
// Empty all unneeded fields.
s.Spec.ExternalIPs = nil
s.Spec.LoadBalancerIP = ""
s.Spec.LoadBalancerSourceRanges = nil
}
for _, m := range services {
s := &corev1.Service{}
if err := m.ConvertToStructuredObject(s); err != nil {
return nil, err
}
updateService(s)
manifest, err := provider.ParseFromStructuredObject(s)
if err != nil {
return nil, fmt.Errorf("failed to parse Service object to Manifest: %w", err)
}
manifests = append(manifests, manifest)
}
return manifests, nil
}
func generateVariantWorkloadManifests(workloads, configmaps, secrets []provider.Manifest, variant, nameSuffix string, replicasCalculator func(*int32) int32) ([]provider.Manifest, error) {
manifests := make([]provider.Manifest, 0, len(workloads))
cmNames := make(map[string]struct{}, len(configmaps))
for i := range configmaps {
cmNames[configmaps[i].Key.Name] = struct{}{}
}
secretNames := make(map[string]struct{}, len(secrets))
for i := range secrets {
secretNames[secrets[i].Key.Name] = struct{}{}
}
updatePod := func(pod *corev1.PodTemplateSpec) {
// Add variant labels.
if pod.Labels == nil {
pod.Labels = map[string]string{}
}
pod.Labels[variantLabel] = variant
// Update volumes to use canary's ConfigMaps and Secrets.
for i := range pod.Spec.Volumes {
if cm := pod.Spec.Volumes[i].ConfigMap; cm != nil {
if _, ok := cmNames[cm.Name]; ok {
cm.Name = makeSuffixedName(cm.Name, nameSuffix)
}
}
if s := pod.Spec.Volumes[i].Secret; s != nil {
if _, ok := secretNames[s.SecretName]; ok {
s.SecretName = makeSuffixedName(s.SecretName, nameSuffix)
}
}
}
}
updateDeployment := func(d *appsv1.Deployment) {
d.Name = makeSuffixedName(d.Name, nameSuffix)
if replicasCalculator != nil {
replicas := replicasCalculator(d.Spec.Replicas)
d.Spec.Replicas = &replicas
}
d.Spec.Selector = metav1.AddLabelToSelector(d.Spec.Selector, variantLabel, variant)
updatePod(&d.Spec.Template)
}
for _, m := range workloads {
switch m.Key.Kind {
case provider.KindDeployment:
d := &appsv1.Deployment{}
if err := m.ConvertToStructuredObject(d); err != nil {
return nil, err
}
updateDeployment(d)
manifest, err := provider.ParseFromStructuredObject(d)
if err != nil {
return nil, err
}
manifests = append(manifests, manifest)
default:
return nil, fmt.Errorf("unsupported workload kind %s", m.Key.Kind)
}
}
return manifests, nil
}
func checkVariantSelectorInWorkload(m provider.Manifest, variant string) error {
var (
matchLabelsFields = []string{"spec", "selector", "matchLabels"}
labelsFields = []string{"spec", "template", "metadata", "labels"}
)
matchLabels, err := m.GetNestedStringMap(matchLabelsFields...)
if err != nil {
return err
}
value, ok := matchLabels[variantLabel]
if !ok {
return fmt.Errorf("missing %s key in spec.selector.matchLabels", variantLabel)
}
if value != variant {
return fmt.Errorf("require %s but got %s for %s key in %s", variant, value, variantLabel, strings.Join(matchLabelsFields, "."))
}
labels, err := m.GetNestedStringMap(labelsFields...)
if err != nil {
return err
}
value, ok = labels[variantLabel]
if !ok {
return fmt.Errorf("missing %s key in spec.template.metadata.labels", variantLabel)
}
if value != variant {
return fmt.Errorf("require %s but got %s for %s key in %s", variant, value, variantLabel, strings.Join(labelsFields, "."))
}
return nil
}
func ensureVariantSelectorInWorkload(m provider.Manifest, variant string) error {
variantMap := map[string]string{
variantLabel: variant,
}
if err := m.AddStringMapValues(variantMap, "spec", "selector", "matchLabels"); err != nil {
return err
}
return m.AddStringMapValues(variantMap, "spec", "template", "metadata", "labels")
}
func makeSuffixedName(name, suffix string) string {
if suffix != "" {
return name + "-" + suffix
}
return name
}
| 1 | 16,630 | `DeploymentConfig` is only for deployment configuration not Piped configuration so `ds.DeploymentConfig.PipedSpec` is always nil. Instead of that, you can have Piped config with `e.PipedConfig` because it is placing inside `executor.Input`. | pipe-cd-pipe | go |
@@ -1,6 +1,12 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
+# Purpose
+# This code example demonstrates how to set the access control list (ACL) on an
+# object in an Amazon Simple Storage Solution (Amazon S3) bucket for the given owner.
+
+# snippet-start:[s3.s3_set_bucket_object_acls.rb]
+
require 'aws-sdk-s3'
# Sets the access control list (ACL) on an object in an Amazon S3 | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
require 'aws-sdk-s3'
# Sets the access control list (ACL) on an object in an Amazon S3
# bucket for the given owner.
#
# Prerequisites:
#
# - An Amazon S3 bucket.
# - An object in the bucket.
# - The owner's canonical ID.
#
# @param s3_client [Aws::S3::Client] An initialized Amazon S3 client.
# @param bucket_name [String] The name of the bucket.
# @param object_key [String] The name of the object.
# @param permission [String] The object permission level. Allowed values
# include READ, READ_ACP, WRITE, WRITE_ACP, and FULL_CONTROL.
# @param owner_id [String] The canonical ID of the owner.
# @return [Boolean] true if the ACL was set; otherwise, false.
# @example
# exit 1 unless object_acl_set_for_owner_id?(
# Aws::S3::Client.new(region: 'us-east-1'),
# 'doc-example-bucket',
# 'my-file.txt',
# 'READ',
# 'b380d412791d395dbcdc1fb1728b32a7cd07edae6467220ac4b7c0769EXAMPLE'
# )
def object_acl_set_for_owner_id?(
s3_client,
bucket_name,
object_key,
permission,
owner_id
)
s3_client.put_object_acl(
access_control_policy: {
grants: [
{
grantee: {
id: owner_id,
type: 'CanonicalUser'
},
permission: permission
}
],
owner: {
id: owner_id
}
},
bucket: bucket_name,
key: object_key
)
return true
rescue StandardError => e
puts "Error setting object ACL: #{e.message}"
return false
end
# Full example call:
def run_me
bucket_name = 'doc-example-bucket'
object_key = 'my-file-1.txt'
permission = 'READ'
owner_id = 'b380d412791d395dbcdc1fb1728b32a7cd07edae6467220ac4b7c0769EXAMPLE'
region = 'us-east-1'
s3_client = Aws::S3::Client.new(region: region)
if object_acl_set_for_owner_id?(
s3_client,
bucket_name,
object_key,
permission,
owner_id
)
puts 'Object ACL set.'
else
puts 'Object ACL not set.'
end
end
run_me if $PROGRAM_NAME == __FILE__
| 1 | 20,551 | Simple Storage **Service** | awsdocs-aws-doc-sdk-examples | rb |
@@ -35,8 +35,13 @@ namespace OpenTelemetry.Instrumentation.AspNet
});
/// <summary>
- /// Gets or sets a hook to exclude calls based on domain or other per-request criterion.
+ /// Gets or sets a Filter function to filter instrumentation for requests on a per request basis.
+ /// The functions gets the HttpContext, and should return a boolean.
+ /// If functions returns true, the request is collected.
+ /// If functions returns false, the request is filtered out.
+ /// If filter throws exception, then this is considered as no filter being configured.
+ /// and requested is collected.
/// </summary>
- internal Predicate<HttpContext> RequestFilter { get; set; }
+ public Func<HttpContext, bool> Filter { get; set; }
}
} | 1 | // <copyright file="AspNetInstrumentationOptions.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Web;
using OpenTelemetry.Context.Propagation;
namespace OpenTelemetry.Instrumentation.AspNet
{
/// <summary>
/// Options for requests instrumentation.
/// </summary>
public class AspNetInstrumentationOptions
{
/// <summary>
/// Gets or sets <see cref="ITextFormat"/> for context propagation. Default value: <see cref="CompositePropagator"/> with <see cref="TraceContextFormat"/> & <see cref="BaggageFormat"/>.
/// </summary>
public ITextFormat TextFormat { get; set; } = new CompositePropagator(new ITextFormat[]
{
new TraceContextFormat(),
new BaggageFormat(),
});
/// <summary>
/// Gets or sets a hook to exclude calls based on domain or other per-request criterion.
/// </summary>
internal Predicate<HttpContext> RequestFilter { get; set; }
}
}
| 1 | 16,710 | This line doesn't seem to be right? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -271,6 +271,7 @@ func (c *CVCController) createVolumeOperation(cvc *apis.CStorVolumeClaim) (*apis
// update the cstorvolume reference, phase as "Bound" and desired
// capacity
cvc.Spec.CStorVolumeRef = volumeRef
+ cvc.Spec.Policy = volumePolicy.Spec
cvc.Status.Phase = apis.CStorVolumeClaimPhaseBound
cvc.Status.Capacity = cvc.Spec.Capacity
| 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cstorvolumeclaim
import (
"encoding/json"
"fmt"
"time"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
errors "github.com/pkg/errors"
"k8s.io/klog"
corev1 "k8s.io/api/core/v1"
k8serror "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
ref "k8s.io/client-go/tools/reference"
"k8s.io/kubernetes/pkg/util/slice"
)
const (
// SuccessSynced is used as part of the Event 'reason' when a
// cstorvolumeclaim is synced
SuccessSynced = "Synced"
// Provisioning is used as part of the Event 'reason' when a
// cstorvolumeclaim is in provisioning stage
Provisioning = "Provisioning"
// ErrResourceExists is used as part of the Event 'reason' when a
// cstorvolumeclaim fails to sync due to a cstorvolumeclaim of the same
// name already existing.
ErrResourceExists = "ErrResourceExists"
// MessageResourceExists is the message used for Events when a resource
// fails to sync due to a cstorvolumeclaim already existing
MessageResourceExists = "Resource %q already exists and is not managed by CVC"
// MessageResourceSynced is the message used for an Event fired when a
// cstorvolumeclaim is synced successfully
MessageResourceSynced = "cstorvolumeclaim synced successfully"
// MessageResourceCreated msg used for cstor volume provisioning success event
MessageResourceCreated = "cstorvolumeclaim created successfully"
// MessageCVCPublished msg used for cstor volume provisioning publish events
MessageCVCPublished = "cstorvolumeclaim %q must be published/attached on node"
// CStorVolumeClaimFinalizer name of finalizer on CStorVolumeClaim that
// are bound by CStorVolume
CStorVolumeClaimFinalizer = "cvc.openebs.io/finalizer"
)
var knownResizeConditions = map[apis.CStorVolumeClaimConditionType]bool{
apis.CStorVolumeClaimResizing: true,
apis.CStorVolumeClaimResizePending: true,
}
// Patch struct represent the struct used to patch
// the cstorvolumeclaim object
type Patch struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the spcPoolUpdated resource
// with the current status of the resource.
func (c *CVCController) syncHandler(key string) error {
startTime := time.Now()
klog.V(4).Infof("Started syncing cstorvolumeclaim %q (%v)", key, startTime)
defer func() {
klog.V(4).Infof("Finished syncing cstorvolumeclaim %q (%v)", key, time.Since(startTime))
}()
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
// Get the cvc resource with this namespace/name
cvc, err := c.cvcLister.CStorVolumeClaims(namespace).Get(name)
if k8serror.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("cstorvolumeclaim '%s' has been deleted", key))
return nil
}
if err != nil {
return err
}
cvcCopy := cvc.DeepCopy()
err = c.syncCVC(cvcCopy)
return err
}
// enqueueCVC takes a CVC resource and converts it into a namespace/name
// string which is then put onto the work queue. This method should *not* be
// passed resources of any type other than CStorVolumeClaims.
func (c *CVCController) enqueueCVC(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
runtime.HandleError(err)
return
}
c.workqueue.Add(key)
/* if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if cvc, ok := obj.(*apis.CStorVolumeClaim); ok {
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(cvc)
if err != nil {
klog.Errorf("failed to get key from object: %v, %v", err, cvc)
return
}
klog.V(5).Infof("enqueued %q for sync", objName)
c.workqueue.Add(objName)
}
*/
}
// synCVC is the function which tries to converge to a desired state for the
// CStorVolumeClaims
func (c *CVCController) syncCVC(cvc *apis.CStorVolumeClaim) error {
var err error
// CStor Volume Claim should be deleted. Check if deletion timestamp is set
// and remove finalizer.
if c.isClaimDeletionCandidate(cvc) {
klog.Infof("syncClaim: remove finalizer for CStorVolumeClaimVolume [%s]", cvc.Name)
return c.removeClaimFinalizer(cvc)
}
volName := cvc.Name
if volName == "" {
// We choose to absorb the error here as the worker would requeue the
// resource otherwise. Instead, the next time the resource is updated
// the resource will be queued again.
runtime.HandleError(fmt.Errorf("%+v: cvc name must be specified", cvc))
return nil
}
nodeID := cvc.Publish.NodeID
if nodeID == "" {
// We choose to absorb the error here as the worker would requeue the
// resource otherwise. Instead, the next time the resource is updated
// the resource will be queued again.
runtime.HandleError(fmt.Errorf("cvc must be publish/attached to Node: %+v", cvc))
c.recorder.Event(cvc, corev1.EventTypeWarning,
Provisioning,
fmt.Sprintf(MessageCVCPublished, cvc.Name),
)
return nil
}
if cvc.Status.Phase == apis.CStorVolumeClaimPhasePending {
klog.V(2).Infof("provisioning cstor volume %+v", cvc)
_, err = c.createVolumeOperation(cvc)
if err != nil {
//Record an event to indicate that any provisioning operation is failed.
c.recorder.Eventf(cvc, corev1.EventTypeWarning, Provisioning, err.Error())
}
}
// If an error occurs during Get/Create, we'll requeue the item so we can
// attempt processing again later. This could have been caused by a
// temporary network failure, or any other transient reason.
if err != nil {
return err
}
if c.cvcNeedResize(cvc) {
err = c.resizeCVC(cvc)
}
// If an error occurs during Get/Create, we'll requeue the item so we can
// attempt processing again later. This could have been caused by a
// temporary network failure, or any other transient reason.
if err != nil {
return err
}
return nil
}
// UpdateCVCObj updates the cstorvolumeclaim object resource to reflect the
// current state of the world
func (c *CVCController) updateCVCObj(
cvc *apis.CStorVolumeClaim,
cv *apis.CStorVolume,
) error {
// NEVER modify objects from the store. It's a read-only, local cache.
// You can use DeepCopy() to make a deep copy of original object and modify this copy
// Or create a copy manually for better performance
cvcCopy := cvc.DeepCopy()
if cvc.Name != cv.Name {
return fmt.
Errorf("could not bind cstorvolumeclaim %s and cstorvolume %s, name does not match",
cvc.Name,
cv.Name)
}
_, err := c.clientset.OpenebsV1alpha1().CStorVolumeClaims(cvc.Namespace).Update(cvcCopy)
if err == nil {
c.recorder.Event(cvc, corev1.EventTypeNormal,
SuccessSynced,
MessageResourceCreated,
)
}
return err
}
// createVolumeOperation trigers the all required resource create operation.
// 1. Create volume service.
// 2. Create cstorvolume resource with required iscsi information.
// 3. Create target deployment.
// 4. Create cstorvolumeclaim resource.
// 5. Update the cstorvolumeclaim with claimRef info and bound with cstorvolume.
func (c *CVCController) createVolumeOperation(cvc *apis.CStorVolumeClaim) (*apis.CStorVolumeClaim, error) {
policyName := cvc.Annotations[string(apis.VolumePolicyKey)]
volumePolicy, err := c.getVolumePolicy(policyName, cvc)
if err != nil {
return nil, err
}
klog.V(2).Infof("creating cstorvolume service resource")
svcObj, err := getOrCreateTargetService(cvc)
if err != nil {
return nil, err
}
klog.V(2).Infof("creating cstorvolume resource")
cvObj, err := getOrCreateCStorVolumeResource(svcObj, cvc)
if err != nil {
return nil, err
}
klog.V(2).Infof("creating cstorvolume target deployment")
_, err = getOrCreateCStorTargetDeployment(cvObj, volumePolicy)
if err != nil {
return nil, err
}
klog.V(2).Infof("creating cstorvolume replica resource")
err = c.distributePendingCVRs(cvc, cvObj, svcObj)
if err != nil {
return nil, err
}
volumeRef, err := ref.GetReference(scheme.Scheme, cvObj)
if err != nil {
return nil, err
}
// update the cstorvolume reference, phase as "Bound" and desired
// capacity
cvc.Spec.CStorVolumeRef = volumeRef
cvc.Status.Phase = apis.CStorVolumeClaimPhaseBound
cvc.Status.Capacity = cvc.Spec.Capacity
err = c.updateCVCObj(cvc, cvObj)
if err != nil {
return nil, err
}
return cvc, nil
}
func (c *CVCController) getVolumePolicy(
policyName string,
cvc *apis.CStorVolumeClaim,
) (*apis.CStorVolumePolicy, error) {
volumePolicy := &apis.CStorVolumePolicy{}
var err error
if policyName != "" {
klog.Infof("uses cstorvolume policy %q to configure volume %q", policyName, cvc.Name)
volumePolicy, err = c.clientset.OpenebsV1alpha1().CStorVolumePolicies(getNamespace()).Get(policyName, metav1.GetOptions{})
if err != nil {
return nil, errors.Wrapf(
err,
"failed to get volume policy %q of volume %q",
policyName,
cvc.Name,
)
}
}
return volumePolicy, nil
}
// distributePendingCVRs trigers create and distribute pending cstorvolumereplica
// resource among the available cstor pools
func (c *CVCController) distributePendingCVRs(
cvc *apis.CStorVolumeClaim,
cv *apis.CStorVolume,
service *corev1.Service,
) error {
pendingReplicaCount, err := c.getPendingCVRCount(cvc)
if err != nil {
return err
}
err = distributeCVRs(pendingReplicaCount, cvc, service, cv)
if err != nil {
return err
}
return nil
}
// isClaimDeletionCandidate checks if a cstorvolumeclaim is a deletion candidate.
func (c *CVCController) isClaimDeletionCandidate(cvc *apis.CStorVolumeClaim) bool {
return cvc.ObjectMeta.DeletionTimestamp != nil &&
slice.ContainsString(cvc.ObjectMeta.Finalizers, CStorVolumeClaimFinalizer, nil)
}
// removeFinalizer removes finalizers present in CStorVolumeClaim resource
// TODO Avoid removing clone finalizer
func (c *CVCController) removeClaimFinalizer(
cvc *apis.CStorVolumeClaim,
) error {
cvcPatch := []Patch{
Patch{
Op: "remove",
Path: "/metadata/finalizers",
},
}
cvcPatchBytes, err := json.Marshal(cvcPatch)
if err != nil {
return errors.Wrapf(
err,
"failed to remove finalizers from cstorvolumeclaim {%s}",
cvc.Name,
)
}
_, err = c.clientset.
OpenebsV1alpha1().
CStorVolumeClaims(cvc.Namespace).
Patch(cvc.Name, types.JSONPatchType, cvcPatchBytes)
if err != nil {
return errors.Wrapf(
err,
"failed to remove finalizers from cstorvolumeclaim {%s}",
cvc.Name,
)
}
klog.Infof("finalizers removed successfully from cstorvolumeclaim {%s}", cvc.Name)
return nil
}
// getPendingCVRCount gets the pending replica count to be created
// in case of any failures
func (c *CVCController) getPendingCVRCount(
cvc *apis.CStorVolumeClaim,
) (int, error) {
currentReplicaCount, err := c.getCurrentReplicaCount(cvc)
if err != nil {
runtime.HandleError(err)
return 0, err
}
return cvc.Spec.ReplicaCount - currentReplicaCount, nil
}
// getCurrentReplicaCount give the current cstorvolumereplicas count for the
// given volume.
func (c *CVCController) getCurrentReplicaCount(cvc *apis.CStorVolumeClaim) (int, error) {
// TODO use lister
// CVRs, err := c.cvrLister.CStorVolumeReplicas(cvc.Namespace).
// List(klabels.Set(pvLabel).AsSelector())
pvLabel := pvSelector + "=" + cvc.Name
cvrList, err := c.clientset.
OpenebsV1alpha1().
CStorVolumeReplicas(cvc.Namespace).
List(metav1.ListOptions{LabelSelector: pvLabel})
if err != nil {
return 0, errors.Errorf("unable to get current replica count: %v", err)
}
return len(cvrList.Items), nil
}
// IsCVRPending look for pending cstorvolume replicas compared to desired
// replica count. returns true if count doesn't matches.
func (c *CVCController) IsCVRPending(cvc *apis.CStorVolumeClaim) (bool, error) {
selector := klabels.SelectorFromSet(BaseLabels(cvc))
CVRs, err := c.cvrLister.CStorVolumeReplicas(cvc.Namespace).
List(selector)
if err != nil {
return false, errors.Errorf("failed to list cvr : %v", err)
}
// TODO: check for greater values
return cvc.Spec.ReplicaCount != len(CVRs), nil
}
// BaseLabels returns the base labels we apply to cstorvolumereplicas created
func BaseLabels(cvc *apis.CStorVolumeClaim) map[string]string {
base := map[string]string{
pvSelector: cvc.Name,
}
return base
}
// cvcNeedResize returns true if a cvc desired a resize operation.
func (c *CVCController) cvcNeedResize(cvc *apis.CStorVolumeClaim) bool {
desiredCVCSize := cvc.Spec.Capacity[corev1.ResourceStorage]
actualCVCSize := cvc.Status.Capacity[corev1.ResourceStorage]
return desiredCVCSize.Cmp(actualCVCSize) > 0
}
// resizeCVC will:
// 1. Mark cvc as resizing.
// 2. Resize the cstorvolume object.
// 3. Mark cvc as resizing finished
func (c *CVCController) resizeCVC(cvc *apis.CStorVolumeClaim) error {
var updatedCVC *apis.CStorVolumeClaim
var err error
cv, err := c.clientset.OpenebsV1alpha1().CStorVolumes(cvc.Namespace).
Get(cvc.Name, metav1.GetOptions{})
if err != nil {
runtime.HandleError(fmt.Errorf("falied to get cv %s: %v", cvc.Name, err))
return err
}
desiredCVCSize := cvc.Spec.Capacity[corev1.ResourceStorage]
if (cv.Spec.Capacity).Cmp(cv.Status.Capacity) > 0 {
c.recorder.Event(cvc, corev1.EventTypeNormal, string(apis.CStorVolumeClaimResizing),
fmt.Sprintf("Resize already in progress %s", cvc.Name))
klog.Warningf("Resize already in progress on %q from: %v to: %v",
cvc.Name, cv.Status.Capacity.String(), cv.Spec.Capacity.String())
return nil
}
// markCVC as resized finished
if desiredCVCSize.Cmp(cv.Status.Capacity) == 0 {
// Resize volume succeeded mark it as resizing finished.
return c.markCVCResizeFinished(cvc)
}
//if desiredCVCSize.Cmp(cv.Spec.Capacity) > 0 {
if updatedCVC, err = c.markCVCResizeInProgress(cvc); err != nil {
klog.Errorf("failed to mark cvc %q as resizing: %v", cvc.Name, err)
return err
}
cvc = updatedCVC
// Record an event to indicate that cvc-controller is resizing this volume.
c.recorder.Event(cvc, corev1.EventTypeNormal, string(apis.CStorVolumeClaimResizing),
fmt.Sprintf("CVCController is resizing volume %s", cvc.Name))
err = c.resizeCV(cv, desiredCVCSize)
if err != nil {
// Record an event to indicate that resize operation is failed.
c.recorder.Eventf(cvc, corev1.EventTypeWarning, string(apis.CStorVolumeClaimResizeFailed), err.Error())
return err
}
return nil
}
func (c *CVCController) markCVCResizeInProgress(cvc *apis.CStorVolumeClaim) (*apis.CStorVolumeClaim, error) {
// Mark CVC as Resize Started
progressCondition := apis.CStorVolumeClaimCondition{
Type: apis.CStorVolumeClaimResizing,
LastTransitionTime: metav1.Now(),
}
newCVC := cvc.DeepCopy()
newCVC.Status.Conditions = MergeResizeConditionsOfCVC(newCVC.Status.Conditions,
[]apis.CStorVolumeClaimCondition{progressCondition})
return c.PatchCVCStatus(cvc, newCVC)
}
type resizeProcessStatus struct {
condition apis.CStorVolumeClaimCondition
processed bool
}
// MergeResizeConditionsOfCVC updates cvc with desired resize conditions
// leaving other conditions untouched.
func MergeResizeConditionsOfCVC(oldConditions, resizeConditions []apis.CStorVolumeClaimCondition) []apis.CStorVolumeClaimCondition {
resizeConditionMap := map[apis.CStorVolumeClaimConditionType]*resizeProcessStatus{}
for _, condition := range resizeConditions {
resizeConditionMap[condition.Type] = &resizeProcessStatus{condition, false}
}
newConditions := []apis.CStorVolumeClaimCondition{}
for _, condition := range oldConditions {
// If Condition is of not resize type, we keep it.
if _, ok := knownResizeConditions[condition.Type]; !ok {
newConditions = append(newConditions, condition)
continue
}
if newCondition, ok := resizeConditionMap[condition.Type]; ok {
newConditions = append(newConditions, newCondition.condition)
newCondition.processed = true
}
}
// append all unprocessed conditions
for _, newCondition := range resizeConditionMap {
if !newCondition.processed {
newConditions = append(newConditions, newCondition.condition)
}
}
return newConditions
}
func (c *CVCController) markCVCResizeFinished(cvc *apis.CStorVolumeClaim) error {
newCVC := cvc.DeepCopy()
newCVC.Status.Capacity = cvc.Spec.Capacity
newCVC.Status.Conditions = MergeResizeConditionsOfCVC(cvc.Status.Conditions, []apis.CStorVolumeClaimCondition{})
_, err := c.PatchCVCStatus(cvc, newCVC)
if err != nil {
klog.Errorf("Mark CVC %q as resize finished failed: %v", cvc.Name, err)
return err
}
klog.V(4).Infof("Resize CVC %q finished", cvc.Name)
c.recorder.Eventf(cvc, corev1.EventTypeNormal, string(apis.CStorVolumeClaimResizeSuccess), "Resize volume succeeded")
return nil
}
// PatchCVCStatus updates CVC status using patch api
func (c *CVCController) PatchCVCStatus(oldCVC,
newCVC *apis.CStorVolumeClaim,
) (*apis.CStorVolumeClaim, error) {
patchBytes, err := getPatchData(oldCVC, newCVC)
if err != nil {
return nil, fmt.Errorf("can't patch status of CVC %s as generate path data failed: %v", oldCVC.Name, err)
}
updatedClaim, updateErr := c.clientset.OpenebsV1alpha1().CStorVolumeClaims(oldCVC.Namespace).
Patch(oldCVC.Name, types.MergePatchType, patchBytes)
if updateErr != nil {
return nil, fmt.Errorf("can't patch status of CVC %s with %v", oldCVC.Name, updateErr)
}
return updatedClaim, nil
}
func getPatchData(oldObj, newObj interface{}) ([]byte, error) {
oldData, err := json.Marshal(oldObj)
if err != nil {
return nil, fmt.Errorf("marshal old object failed: %v", err)
}
newData, err := json.Marshal(newObj)
if err != nil {
return nil, fmt.Errorf("mashal new object failed: %v", err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldObj)
if err != nil {
return nil, fmt.Errorf("CreateTwoWayMergePatch failed: %v", err)
}
return patchBytes, nil
}
// resizeCV resize the cstor volume to desired size, and update CV's capacity
func (c *CVCController) resizeCV(cv *apis.CStorVolume, newCapacity resource.Quantity) error {
newCV := cv.DeepCopy()
newCV.Spec.Capacity = newCapacity
patchBytes, err := getPatchData(cv, newCV)
if err != nil {
return fmt.Errorf("can't update capacity of CV %s as generate patch data failed: %v", cv.Name, err)
}
_, updateErr := c.clientset.OpenebsV1alpha1().CStorVolumes(getNamespace()).
Patch(cv.Name, types.MergePatchType, patchBytes)
if updateErr != nil {
return updateErr
}
return nil
}
| 1 | 17,966 | this need to be moved up after getting policy.. | openebs-maya | go |
@@ -1363,7 +1363,7 @@ data:
# GCP Specific
- name: GCPInvalidProjectID
searchRegexStrings:
- - "platform\.gcp\.project.* invalid project ID"
+ - "platform.gcp.project.* invalid project ID"
installFailingReason: GCPInvalidProjectID
installFailingMessage: Invalid GCP project ID
- name: GCPInstanceTypeNotFound | 1 | // Code generated for package assets by go-bindata DO NOT EDIT. (@generated)
// sources:
// config/hiveadmission/apiservice.yaml
// config/hiveadmission/clusterdeployment-webhook.yaml
// config/hiveadmission/clusterimageset-webhook.yaml
// config/hiveadmission/clusterprovision-webhook.yaml
// config/hiveadmission/deployment.yaml
// config/hiveadmission/dnszones-webhook.yaml
// config/hiveadmission/hiveadmission_rbac_role.yaml
// config/hiveadmission/hiveadmission_rbac_role_binding.yaml
// config/hiveadmission/machinepool-webhook.yaml
// config/hiveadmission/selectorsyncset-webhook.yaml
// config/hiveadmission/service-account.yaml
// config/hiveadmission/service.yaml
// config/hiveadmission/syncset-webhook.yaml
// config/controllers/deployment.yaml
// config/controllers/hive_controllers_role.yaml
// config/controllers/hive_controllers_role_binding.yaml
// config/controllers/hive_controllers_serviceaccount.yaml
// config/controllers/service.yaml
// config/rbac/hive_admin_role.yaml
// config/rbac/hive_admin_role_binding.yaml
// config/rbac/hive_frontend_role.yaml
// config/rbac/hive_frontend_role_binding.yaml
// config/rbac/hive_frontend_serviceaccount.yaml
// config/rbac/hive_reader_role.yaml
// config/rbac/hive_reader_role_binding.yaml
// config/configmaps/install-log-regexes-configmap.yaml
package assets
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
// Mode return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
return fi.mode&os.ModeDir != 0
}
// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _configHiveadmissionApiserviceYaml = []byte(`---
# register as aggregated apiserver; this has a number of benefits:
#
# - allows other kubernetes components to talk to the the admission webhook using the ` + "`" + `kubernetes.default.svc` + "`" + ` service
# - allows other kubernetes components to use their in-cluster credentials to communicate with the webhook
# - allows you to test the webhook using kubectl
# - allows you to govern access to the webhook using RBAC
# - prevents other extension API servers from leaking their service account tokens to the webhook
#
# for more information, see: https://kubernetes.io/blog/2018/01/extensible-admission-is-beta
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1.admission.hive.openshift.io
annotations:
service.alpha.openshift.io/inject-cabundle: "true"
spec:
group: admission.hive.openshift.io
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: hiveadmission
namespace: hive
version: v1
`)
func configHiveadmissionApiserviceYamlBytes() ([]byte, error) {
return _configHiveadmissionApiserviceYaml, nil
}
func configHiveadmissionApiserviceYaml() (*asset, error) {
bytes, err := configHiveadmissionApiserviceYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/apiservice.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionClusterdeploymentWebhookYaml = []byte(`---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: clusterdeploymentvalidators.admission.hive.openshift.io
webhooks:
- name: clusterdeploymentvalidators.admission.hive.openshift.io
clientConfig:
service:
# reach the webhook via the registered aggregated API
namespace: default
name: kubernetes
path: /apis/admission.hive.openshift.io/v1/clusterdeploymentvalidators
rules:
- operations:
- CREATE
- UPDATE
- DELETE
apiGroups:
- hive.openshift.io
apiVersions:
- v1
resources:
- clusterdeployments
failurePolicy: Fail
`)
func configHiveadmissionClusterdeploymentWebhookYamlBytes() ([]byte, error) {
return _configHiveadmissionClusterdeploymentWebhookYaml, nil
}
func configHiveadmissionClusterdeploymentWebhookYaml() (*asset, error) {
bytes, err := configHiveadmissionClusterdeploymentWebhookYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/clusterdeployment-webhook.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionClusterimagesetWebhookYaml = []byte(`---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: clusterimagesetvalidators.admission.hive.openshift.io
webhooks:
- name: clusterimagesetvalidators.admission.hive.openshift.io
clientConfig:
service:
# reach the webhook via the registered aggregated API
namespace: default
name: kubernetes
path: /apis/admission.hive.openshift.io/v1/clusterimagesetvalidators
rules:
- operations:
- CREATE
- UPDATE
apiGroups:
- hive.openshift.io
apiVersions:
- v1
resources:
- clusterimagesets
failurePolicy: Fail
`)
func configHiveadmissionClusterimagesetWebhookYamlBytes() ([]byte, error) {
return _configHiveadmissionClusterimagesetWebhookYaml, nil
}
func configHiveadmissionClusterimagesetWebhookYaml() (*asset, error) {
bytes, err := configHiveadmissionClusterimagesetWebhookYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/clusterimageset-webhook.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionClusterprovisionWebhookYaml = []byte(`---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: clusterprovisionvalidators.admission.hive.openshift.io
webhooks:
- name: clusterprovisionvalidators.admission.hive.openshift.io
clientConfig:
service:
# reach the webhook via the registered aggregated API
namespace: default
name: kubernetes
path: /apis/admission.hive.openshift.io/v1/clusterprovisionvalidators
rules:
- operations:
- CREATE
- UPDATE
apiGroups:
- hive.openshift.io
apiVersions:
- v1
resources:
- clusterprovisions
failurePolicy: Fail
`)
func configHiveadmissionClusterprovisionWebhookYamlBytes() ([]byte, error) {
return _configHiveadmissionClusterprovisionWebhookYaml, nil
}
func configHiveadmissionClusterprovisionWebhookYaml() (*asset, error) {
bytes, err := configHiveadmissionClusterprovisionWebhookYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/clusterprovision-webhook.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionDeploymentYaml = []byte(`---
# to create the namespace-reservation-server
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: hive
name: hiveadmission
labels:
app: hiveadmission
hiveadmission: "true"
spec:
replicas: 2
selector:
matchLabels:
app: hiveadmission
hiveadmission: "true"
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
name: hiveadmission
labels:
app: hiveadmission
hiveadmission: "true"
spec:
serviceAccountName: hiveadmission
containers:
- name: hiveadmission
image: registry.svc.ci.openshift.org/openshift/hive-v4.0:hive
imagePullPolicy: Always
command:
- "/opt/services/hiveadmission"
- "--secure-port=9443"
- "--audit-log-path=-"
- "--tls-cert-file=/var/serving-cert/tls.crt"
- "--tls-private-key-file=/var/serving-cert/tls.key"
- "--v=2"
ports:
- containerPort: 9443
protocol: TCP
volumeMounts:
- mountPath: /var/serving-cert
name: serving-cert
readinessProbe:
httpGet:
path: /healthz
port: 9443
scheme: HTTPS
volumes:
- name: serving-cert
secret:
defaultMode: 420
secretName: hiveadmission-serving-cert
`)
func configHiveadmissionDeploymentYamlBytes() ([]byte, error) {
return _configHiveadmissionDeploymentYaml, nil
}
func configHiveadmissionDeploymentYaml() (*asset, error) {
bytes, err := configHiveadmissionDeploymentYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/deployment.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionDnszonesWebhookYaml = []byte(`---
# register to intercept DNSZone object creates and updates
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: dnszonevalidators.admission.hive.openshift.io
webhooks:
- name: dnszonevalidators.admission.hive.openshift.io
clientConfig:
service:
# reach the webhook via the registered aggregated API
namespace: default
name: kubernetes
path: /apis/admission.hive.openshift.io/v1/dnszonevalidators
rules:
- operations:
- CREATE
- UPDATE
apiGroups:
- hive.openshift.io
apiVersions:
- v1
resources:
- dnszones
failurePolicy: Fail
`)
func configHiveadmissionDnszonesWebhookYamlBytes() ([]byte, error) {
return _configHiveadmissionDnszonesWebhookYaml, nil
}
func configHiveadmissionDnszonesWebhookYaml() (*asset, error) {
bytes, err := configHiveadmissionDnszonesWebhookYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/dnszones-webhook.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionHiveadmission_rbac_roleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
name: system:openshift:hive:hiveadmission
rules:
- apiGroups:
- admission.hive.openshift.io
resources:
- dnszones
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
`)
func configHiveadmissionHiveadmission_rbac_roleYamlBytes() ([]byte, error) {
return _configHiveadmissionHiveadmission_rbac_roleYaml, nil
}
func configHiveadmissionHiveadmission_rbac_roleYaml() (*asset, error) {
bytes, err := configHiveadmissionHiveadmission_rbac_roleYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/hiveadmission_rbac_role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionHiveadmission_rbac_role_bindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: hiveadmission-hive-hiveadmission
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: system:openshift:hive:hiveadmission
subjects:
- kind: ServiceAccount
namespace: hive
name: hiveadmission
`)
func configHiveadmissionHiveadmission_rbac_role_bindingYamlBytes() ([]byte, error) {
return _configHiveadmissionHiveadmission_rbac_role_bindingYaml, nil
}
func configHiveadmissionHiveadmission_rbac_role_bindingYaml() (*asset, error) {
bytes, err := configHiveadmissionHiveadmission_rbac_role_bindingYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/hiveadmission_rbac_role_binding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionMachinepoolWebhookYaml = []byte(`---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: machinepoolvalidators.admission.hive.openshift.io
webhooks:
- name: machinepoolvalidators.admission.hive.openshift.io
clientConfig:
service:
# reach the webhook via the registered aggregated API
namespace: default
name: kubernetes
path: /apis/admission.hive.openshift.io/v1/machinepoolvalidators
rules:
- operations:
- CREATE
- UPDATE
apiGroups:
- hive.openshift.io
apiVersions:
- v1
resources:
- machinepools
failurePolicy: Fail
`)
func configHiveadmissionMachinepoolWebhookYamlBytes() ([]byte, error) {
return _configHiveadmissionMachinepoolWebhookYaml, nil
}
func configHiveadmissionMachinepoolWebhookYaml() (*asset, error) {
bytes, err := configHiveadmissionMachinepoolWebhookYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/machinepool-webhook.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionSelectorsyncsetWebhookYaml = []byte(`---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: selectorsyncsetvalidators.admission.hive.openshift.io
webhooks:
- name: selectorsyncsetvalidators.admission.hive.openshift.io
clientConfig:
service:
# reach the webhook via the registered aggregated API
namespace: default
name: kubernetes
path: /apis/admission.hive.openshift.io/v1/selectorsyncsetvalidators
rules:
- operations:
- CREATE
- UPDATE
apiGroups:
- hive.openshift.io
apiVersions:
- v1
resources:
- selectorsyncsets
failurePolicy: Fail
`)
func configHiveadmissionSelectorsyncsetWebhookYamlBytes() ([]byte, error) {
return _configHiveadmissionSelectorsyncsetWebhookYaml, nil
}
func configHiveadmissionSelectorsyncsetWebhookYaml() (*asset, error) {
bytes, err := configHiveadmissionSelectorsyncsetWebhookYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/selectorsyncset-webhook.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionServiceAccountYaml = []byte(`---
# to be able to assign powers to the hiveadmission process
apiVersion: v1
kind: ServiceAccount
metadata:
name: hiveadmission
`)
func configHiveadmissionServiceAccountYamlBytes() ([]byte, error) {
return _configHiveadmissionServiceAccountYaml, nil
}
func configHiveadmissionServiceAccountYaml() (*asset, error) {
bytes, err := configHiveadmissionServiceAccountYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/service-account.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionServiceYaml = []byte(`---
apiVersion: v1
kind: Service
metadata:
namespace: hive
name: hiveadmission
annotations:
service.alpha.openshift.io/serving-cert-secret-name: hiveadmission-serving-cert
spec:
selector:
app: hiveadmission
ports:
- port: 443
targetPort: 9443
protocol: TCP
`)
func configHiveadmissionServiceYamlBytes() ([]byte, error) {
return _configHiveadmissionServiceYaml, nil
}
func configHiveadmissionServiceYaml() (*asset, error) {
bytes, err := configHiveadmissionServiceYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/service.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configHiveadmissionSyncsetWebhookYaml = []byte(`---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: syncsetvalidators.admission.hive.openshift.io
webhooks:
- name: syncsetvalidators.admission.hive.openshift.io
clientConfig:
service:
# reach the webhook via the registered aggregated API
namespace: default
name: kubernetes
path: /apis/admission.hive.openshift.io/v1/syncsetvalidators
rules:
- operations:
- CREATE
- UPDATE
apiGroups:
- hive.openshift.io
apiVersions:
- v1
resources:
- syncsets
failurePolicy: Fail
`)
func configHiveadmissionSyncsetWebhookYamlBytes() ([]byte, error) {
return _configHiveadmissionSyncsetWebhookYaml, nil
}
func configHiveadmissionSyncsetWebhookYaml() (*asset, error) {
bytes, err := configHiveadmissionSyncsetWebhookYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/hiveadmission/syncset-webhook.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configControllersDeploymentYaml = []byte(`apiVersion: apps/v1
kind: Deployment
metadata:
name: hive-controllers
namespace: hive
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
spec:
selector:
matchLabels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
replicas: 1
revisionHistoryLimit: 4
strategy:
type: Recreate
template:
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
spec:
serviceAccountName: hive-controllers
volumes:
- name: kubectl-cache
emptyDir: {}
containers:
# By default we will use the latest CI images published from hive master:
- image: registry.svc.ci.openshift.org/openshift/hive-v4.0:hive
imagePullPolicy: Always
name: manager
resources:
requests:
cpu: 50m
memory: 512Mi
command:
- /opt/services/manager
envFrom:
- configMapRef:
name: hive-controllers-config
volumeMounts:
- name: kubectl-cache
mountPath: /var/cache/kubectl
env:
- name: CLI_CACHE_DIR
value: /var/cache/kubectl
- name: HIVE_NS
valueFrom:
fieldRef:
fieldPath: metadata.namespace
readinessProbe:
httpGet:
path: /readyz
port: 8080
livenessProbe:
httpGet:
path: /healthz
port: 8080
terminationGracePeriodSeconds: 10
`)
func configControllersDeploymentYamlBytes() ([]byte, error) {
return _configControllersDeploymentYaml, nil
}
func configControllersDeploymentYaml() (*asset, error) {
bytes, err := configControllersDeploymentYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/controllers/deployment.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configControllersHive_controllers_roleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: hive-controllers
rules:
- apiGroups:
- hive.openshift.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- hiveinternal.openshift.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- serviceaccounts
- secrets
- configmaps
- events
- persistentvolumeclaims
- namespaces
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- velero.io
resources:
- backups
verbs:
- create
`)
func configControllersHive_controllers_roleYamlBytes() ([]byte, error) {
return _configControllersHive_controllers_roleYaml, nil
}
func configControllersHive_controllers_roleYaml() (*asset, error) {
bytes, err := configControllersHive_controllers_roleYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/controllers/hive_controllers_role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configControllersHive_controllers_role_bindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: hive-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hive-controllers
subjects:
- kind: ServiceAccount
name: hive-controllers
namespace: system
`)
func configControllersHive_controllers_role_bindingYamlBytes() ([]byte, error) {
return _configControllersHive_controllers_role_bindingYaml, nil
}
func configControllersHive_controllers_role_bindingYaml() (*asset, error) {
bytes, err := configControllersHive_controllers_role_bindingYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/controllers/hive_controllers_role_binding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configControllersHive_controllers_serviceaccountYaml = []byte(`apiVersion: v1
kind: ServiceAccount
metadata:
name: hive-controllers
namespace: hive
`)
func configControllersHive_controllers_serviceaccountYamlBytes() ([]byte, error) {
return _configControllersHive_controllers_serviceaccountYaml, nil
}
func configControllersHive_controllers_serviceaccountYaml() (*asset, error) {
bytes, err := configControllersHive_controllers_serviceaccountYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/controllers/hive_controllers_serviceaccount.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configControllersServiceYaml = []byte(`apiVersion: v1
kind: Service
metadata:
name: hive-controllers
namespace: hive
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
spec:
selector:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
ports:
- name: metrics
port: 2112
protocol: TCP
# Expose 6060 for pprof data. Normally nothing listening here unless a developer has
# compiled in pprof support. See Hive developer documentation for how to use.
- name: profiling
port: 6060
protocol: TCP
`)
func configControllersServiceYamlBytes() ([]byte, error) {
return _configControllersServiceYaml, nil
}
func configControllersServiceYaml() (*asset, error) {
bytes, err := configControllersServiceYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/controllers/service.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configRbacHive_admin_roleYaml = []byte(`# hive-admin is a role intended for hive administrators who need to be able to debug
# cluster installations, and modify hive configuration.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hive-admin
rules:
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get
- list
- watch
- apiGroups:
- hive.openshift.io
resources:
- clusterdeployments
- clusterprovisions
- dnszones
- machinepools
- machinepoolnameleases
- selectorsyncidentityproviders
- syncidentityproviders
- syncsets
- syncsetinstances
- clusterdeprovisions
# TODO: remove once v1alpha1 compat removed
- clusterdeprovisionrequests
- clusterstates
verbs:
- get
- list
- watch
- apiGroups:
- hive.openshift.io
resources:
- clusterimagesets
- hiveconfigs
- selectorsyncsets
- selectorsyncidentityproviders
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- hiveinternal.openshift.io
resources:
- clustersyncs
- clustersyncleases
verbs:
- get
- list
- watch
- apiGroups:
- admission.hive.openshift.io
resources:
- clusterdeployments
- clusterimagesets
- clusterprovisions
- dnszones
- machinepools
- selectorsyncsets
- syncsets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- apiGroups:
- apiregistration.k8s.io
resources:
- apiservices
verbs:
- get
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- get
- list
- watch
`)
func configRbacHive_admin_roleYamlBytes() ([]byte, error) {
return _configRbacHive_admin_roleYaml, nil
}
func configRbacHive_admin_roleYaml() (*asset, error) {
bytes, err := configRbacHive_admin_roleYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/rbac/hive_admin_role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configRbacHive_admin_role_bindingYaml = []byte(`# NOTE: This binding uses the openshift apigroup as it is the only way to link
# to an openshift user group. This will not work if running hive on vanilla Kube,
# but the Hive operator will detect this and skip creation of the binding.
apiVersion: authorization.openshift.io/v1
kind: ClusterRoleBinding
metadata:
name: hive-admin
roleRef:
name: hive-admin
groupNames:
- hive-admins
subjects:
- kind: Group
name: hive-admins
`)
func configRbacHive_admin_role_bindingYamlBytes() ([]byte, error) {
return _configRbacHive_admin_role_bindingYaml, nil
}
func configRbacHive_admin_role_bindingYaml() (*asset, error) {
bytes, err := configRbacHive_admin_role_bindingYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/rbac/hive_admin_role_binding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configRbacHive_frontend_roleYaml = []byte(`# hive-frontend is a role intended for integrating applications acting as a frontend
# to Hive. These applications will need quite powerful permissions in the Hive cluster
# to create namespaces to organize clusters, as well as all the required objects in those
# clusters.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hive-frontend
rules:
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
- configmaps
- events
- namespaces
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- hive.openshift.io
resources:
- clusterdeployments
- clusterprovisions
- dnszones
- machinepools
- selectorsyncidentityproviders
- syncidentityproviders
- selectorsyncsets
- syncsets
- clusterdeprovisions
# TODO: remove once v1alpha1 compat removed
- clusterdeprovisionrequests
- clusterstates
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- hive.openshift.io
resources:
- clusterimagesets
- hiveconfigs
verbs:
- get
- list
- watch
- apiGroups:
- hiveinternal.openshift.io
resources:
- clustersyncs
- clustersyncleases
verbs:
- get
- list
- watch
`)
func configRbacHive_frontend_roleYamlBytes() ([]byte, error) {
return _configRbacHive_frontend_roleYaml, nil
}
func configRbacHive_frontend_roleYaml() (*asset, error) {
bytes, err := configRbacHive_frontend_roleYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/rbac/hive_frontend_role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configRbacHive_frontend_role_bindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: hive-frontend
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hive-frontend
subjects:
- kind: ServiceAccount
name: hive-frontend
namespace: hive
- kind: Group
name: hive-frontend
`)
func configRbacHive_frontend_role_bindingYamlBytes() ([]byte, error) {
return _configRbacHive_frontend_role_bindingYaml, nil
}
func configRbacHive_frontend_role_bindingYaml() (*asset, error) {
bytes, err := configRbacHive_frontend_role_bindingYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/rbac/hive_frontend_role_binding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configRbacHive_frontend_serviceaccountYaml = []byte(`apiVersion: v1
kind: ServiceAccount
metadata:
name: hive-frontend
namespace: hive
`)
func configRbacHive_frontend_serviceaccountYamlBytes() ([]byte, error) {
return _configRbacHive_frontend_serviceaccountYaml, nil
}
func configRbacHive_frontend_serviceaccountYaml() (*asset, error) {
bytes, err := configRbacHive_frontend_serviceaccountYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/rbac/hive_frontend_serviceaccount.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configRbacHive_reader_roleYaml = []byte(`# hive-admin is a role intended for hive administrators who need to be able to debug
# cluster installations, and modify hive configuration.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hive-reader
rules:
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get
- list
- watch
- apiGroups:
- hive.openshift.io
resources:
- clusterdeployments
- clusterprovisions
- dnszones
- machinepools
- selectorsyncidentityproviders
- selectorsyncsets
- syncidentityproviders
- syncsets
- syncsetinstances
- clusterdeprovisions
# TODO: remove once v1alpha1 compat removed
- clusterdeprovisionrequests
- clusterstates
verbs:
- get
- list
- watch
- apiGroups:
- hive.openshift.io
resources:
- clusterimagesets
- hiveconfigs
verbs:
- get
- list
- watch
- apiGroups:
- hiveinternal.openshift.io
resources:
- clustersyncs
- clustersyncleases
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
`)
func configRbacHive_reader_roleYamlBytes() ([]byte, error) {
return _configRbacHive_reader_roleYaml, nil
}
func configRbacHive_reader_roleYaml() (*asset, error) {
bytes, err := configRbacHive_reader_roleYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/rbac/hive_reader_role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configRbacHive_reader_role_bindingYaml = []byte(`# NOTE: This binding uses the openshift apigroup as it is the only way to link
# to an openshift user group. This will not work if running hive on vanilla Kube,
# but the Hive operator will detect this and skip creation of the binding.
apiVersion: authorization.openshift.io/v1
kind: ClusterRoleBinding
metadata:
name: hive-reader
roleRef:
name: hive-reader
groupNames:
- hive-readers
subjects:
- kind: Group
name: hive-readers
`)
func configRbacHive_reader_role_bindingYamlBytes() ([]byte, error) {
return _configRbacHive_reader_role_bindingYaml, nil
}
func configRbacHive_reader_role_bindingYaml() (*asset, error) {
bytes, err := configRbacHive_reader_role_bindingYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/rbac/hive_reader_role_binding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configConfigmapsInstallLogRegexesConfigmapYaml = []byte(`apiVersion: v1
kind: ConfigMap
metadata:
name: install-log-regexes
namespace: hive
data:
regexes: |
# AWS Specific
# https://bugzilla.redhat.com/show_bug.cgi?id=1844320
- name: AWSUnableToFindMatchingRouteTable
searchRegexStrings:
- "Error: Unable to find matching route for Route Table"
installFailingReason: AWSUnableToFindMatchingRouteTable
installFailingMessage: Unable to find matching route for route table
- name: AWSNATGatewayLimitExceeded
searchRegexStrings:
- "NatGatewayLimitExceeded"
installFailingReason: AWSNATGatewayLimitExceeded
installFailingMessage: AWS NAT gateway limit exceeded
- name: DNSAlreadyExists
searchRegexStrings:
- "aws_route53_record.*Error building changeset:.*Tried to create resource record set.*but it already exists"
installFailingReason: DNSAlreadyExists
installFailingMessage: DNS record already exists
- name: PendingVerification
searchRegexStrings:
- "PendingVerification: Your request for accessing resources in this region is being validated"
installFailingReason: PendingVerification
installFailingMessage: Account pending verification for region
- name: NoMatchingRoute53Zone
searchRegexStrings:
- "data.aws_route53_zone.public: no matching Route53Zone found"
installFailingReason: NoMatchingRoute53Zone
installFailingMessage: No matching Route53Zone found
- name: SimulatorThrottling
searchRegexStrings:
- "validate AWS credentials: checking install permissions: error simulating policy: Throttling: Rate exceeded"
installFailingReason: AWSAPIRateLimitExceeded
installFailingMessage: AWS API rate limit exceeded while simulating policy
- name: GeneralThrottling
searchRegexStrings:
- "Throttling: Rate exceeded"
installFailingReason: AWSAPIRateLimitExceeded
installFailingMessage: AWS API rate limit exceeded
# GCP Specific
- name: GCPInvalidProjectID
searchRegexStrings:
- "platform\.gcp\.project.* invalid project ID"
installFailingReason: GCPInvalidProjectID
installFailingMessage: Invalid GCP project ID
- name: GCPInstanceTypeNotFound
searchRegexStrings:
- "platform.gcp.type: Invalid value:.* instance type.* not found]"
installFailingReason: GCPInstanceTypeNotFound
installFailingMessage: GCP instance type not found
# Bare Metal
- name: LibvirtSSHKeyPermissionDenied
searchRegexStrings:
- "platform.baremetal.libvirtURI: Internal error: could not connect to libvirt: virError.Code=38, Domain=7, Message=.Cannot recv data: Permission denied"
installFailingReason: LibvirtSSHKeyPermissionDenied
installFailingMessage: "Permission denied connecting to libvirt host, check SSH key configuration and pass phrase"
# Generic OpenShift Install
- name: KubeAPIWaitTimeout
searchRegexStrings:
- "waiting for Kubernetes API: context deadline exceeded"
installFailingReason: KubeAPIWaitTimeout
installFailingMessage: Timeout waiting for the Kubernetes API to begin responding
- name: MonitoringOperatorStillUpdating
searchRegexStrings:
- "failed to initialize the cluster: Cluster operator monitoring is still updating"
installFailingReason: MonitoringOperatorStillUpdating
installFailingMessage: Timeout waiting for the monitoring operator to become ready
# Processing stops at the first match, so this more generic
# message about the connection failure must always come after the
# more specific message for LibvirtSSHKeyPermissionDenied.
- name: LibvirtConnectionFailed
searchRegexStrings:
- "could not connect to libvirt"
installFailingReason: LibvirtConnectionFailed
installFailingMessage: "Could not connect to libvirt host"
`)
func configConfigmapsInstallLogRegexesConfigmapYamlBytes() ([]byte, error) {
return _configConfigmapsInstallLogRegexesConfigmapYaml, nil
}
func configConfigmapsInstallLogRegexesConfigmapYaml() (*asset, error) {
bytes, err := configConfigmapsInstallLogRegexesConfigmapYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/configmaps/install-log-regexes-configmap.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"config/hiveadmission/apiservice.yaml": configHiveadmissionApiserviceYaml,
"config/hiveadmission/clusterdeployment-webhook.yaml": configHiveadmissionClusterdeploymentWebhookYaml,
"config/hiveadmission/clusterimageset-webhook.yaml": configHiveadmissionClusterimagesetWebhookYaml,
"config/hiveadmission/clusterprovision-webhook.yaml": configHiveadmissionClusterprovisionWebhookYaml,
"config/hiveadmission/deployment.yaml": configHiveadmissionDeploymentYaml,
"config/hiveadmission/dnszones-webhook.yaml": configHiveadmissionDnszonesWebhookYaml,
"config/hiveadmission/hiveadmission_rbac_role.yaml": configHiveadmissionHiveadmission_rbac_roleYaml,
"config/hiveadmission/hiveadmission_rbac_role_binding.yaml": configHiveadmissionHiveadmission_rbac_role_bindingYaml,
"config/hiveadmission/machinepool-webhook.yaml": configHiveadmissionMachinepoolWebhookYaml,
"config/hiveadmission/selectorsyncset-webhook.yaml": configHiveadmissionSelectorsyncsetWebhookYaml,
"config/hiveadmission/service-account.yaml": configHiveadmissionServiceAccountYaml,
"config/hiveadmission/service.yaml": configHiveadmissionServiceYaml,
"config/hiveadmission/syncset-webhook.yaml": configHiveadmissionSyncsetWebhookYaml,
"config/controllers/deployment.yaml": configControllersDeploymentYaml,
"config/controllers/hive_controllers_role.yaml": configControllersHive_controllers_roleYaml,
"config/controllers/hive_controllers_role_binding.yaml": configControllersHive_controllers_role_bindingYaml,
"config/controllers/hive_controllers_serviceaccount.yaml": configControllersHive_controllers_serviceaccountYaml,
"config/controllers/service.yaml": configControllersServiceYaml,
"config/rbac/hive_admin_role.yaml": configRbacHive_admin_roleYaml,
"config/rbac/hive_admin_role_binding.yaml": configRbacHive_admin_role_bindingYaml,
"config/rbac/hive_frontend_role.yaml": configRbacHive_frontend_roleYaml,
"config/rbac/hive_frontend_role_binding.yaml": configRbacHive_frontend_role_bindingYaml,
"config/rbac/hive_frontend_serviceaccount.yaml": configRbacHive_frontend_serviceaccountYaml,
"config/rbac/hive_reader_role.yaml": configRbacHive_reader_roleYaml,
"config/rbac/hive_reader_role_binding.yaml": configRbacHive_reader_role_bindingYaml,
"config/configmaps/install-log-regexes-configmap.yaml": configConfigmapsInstallLogRegexesConfigmapYaml,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"config": {nil, map[string]*bintree{
"configmaps": {nil, map[string]*bintree{
"install-log-regexes-configmap.yaml": {configConfigmapsInstallLogRegexesConfigmapYaml, map[string]*bintree{}},
}},
"controllers": {nil, map[string]*bintree{
"deployment.yaml": {configControllersDeploymentYaml, map[string]*bintree{}},
"hive_controllers_role.yaml": {configControllersHive_controllers_roleYaml, map[string]*bintree{}},
"hive_controllers_role_binding.yaml": {configControllersHive_controllers_role_bindingYaml, map[string]*bintree{}},
"hive_controllers_serviceaccount.yaml": {configControllersHive_controllers_serviceaccountYaml, map[string]*bintree{}},
"service.yaml": {configControllersServiceYaml, map[string]*bintree{}},
}},
"hiveadmission": {nil, map[string]*bintree{
"apiservice.yaml": {configHiveadmissionApiserviceYaml, map[string]*bintree{}},
"clusterdeployment-webhook.yaml": {configHiveadmissionClusterdeploymentWebhookYaml, map[string]*bintree{}},
"clusterimageset-webhook.yaml": {configHiveadmissionClusterimagesetWebhookYaml, map[string]*bintree{}},
"clusterprovision-webhook.yaml": {configHiveadmissionClusterprovisionWebhookYaml, map[string]*bintree{}},
"deployment.yaml": {configHiveadmissionDeploymentYaml, map[string]*bintree{}},
"dnszones-webhook.yaml": {configHiveadmissionDnszonesWebhookYaml, map[string]*bintree{}},
"hiveadmission_rbac_role.yaml": {configHiveadmissionHiveadmission_rbac_roleYaml, map[string]*bintree{}},
"hiveadmission_rbac_role_binding.yaml": {configHiveadmissionHiveadmission_rbac_role_bindingYaml, map[string]*bintree{}},
"machinepool-webhook.yaml": {configHiveadmissionMachinepoolWebhookYaml, map[string]*bintree{}},
"selectorsyncset-webhook.yaml": {configHiveadmissionSelectorsyncsetWebhookYaml, map[string]*bintree{}},
"service-account.yaml": {configHiveadmissionServiceAccountYaml, map[string]*bintree{}},
"service.yaml": {configHiveadmissionServiceYaml, map[string]*bintree{}},
"syncset-webhook.yaml": {configHiveadmissionSyncsetWebhookYaml, map[string]*bintree{}},
}},
"rbac": {nil, map[string]*bintree{
"hive_admin_role.yaml": {configRbacHive_admin_roleYaml, map[string]*bintree{}},
"hive_admin_role_binding.yaml": {configRbacHive_admin_role_bindingYaml, map[string]*bintree{}},
"hive_frontend_role.yaml": {configRbacHive_frontend_roleYaml, map[string]*bintree{}},
"hive_frontend_role_binding.yaml": {configRbacHive_frontend_role_bindingYaml, map[string]*bintree{}},
"hive_frontend_serviceaccount.yaml": {configRbacHive_frontend_serviceaccountYaml, map[string]*bintree{}},
"hive_reader_role.yaml": {configRbacHive_reader_roleYaml, map[string]*bintree{}},
"hive_reader_role_binding.yaml": {configRbacHive_reader_role_bindingYaml, map[string]*bintree{}},
}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
| 1 | 15,911 | I'm curious - is there a way to test these? | openshift-hive | go |
@@ -59,10 +59,10 @@ public class MetadataTableUtils {
}
public static Table createMetadataTableInstance(TableOperations originTableOps,
+ String catalogName,
TableIdentifier originTableIdentifier,
MetadataTableType type) {
- return createMetadataTableInstance(originTableOps,
- BaseMetastoreCatalog.fullTableName(type.name(), originTableIdentifier),
- type);
+ String fullTableName = BaseMetastoreCatalog.fullTableName(catalogName, originTableIdentifier);
+ return createMetadataTableInstance(originTableOps, fullTableName, type);
}
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.NoSuchTableException;
public class MetadataTableUtils {
private MetadataTableUtils() {
}
public static boolean hasMetadataTableName(TableIdentifier identifier) {
return MetadataTableType.from(identifier.name()) != null;
}
public static Table createMetadataTableInstance(TableOperations originTableOps,
String fullTableName,
MetadataTableType type) {
Table baseTableForMetadata = new BaseTable(originTableOps, fullTableName);
switch (type) {
case ENTRIES:
return new ManifestEntriesTable(originTableOps, baseTableForMetadata);
case FILES:
return new DataFilesTable(originTableOps, baseTableForMetadata);
case HISTORY:
return new HistoryTable(originTableOps, baseTableForMetadata);
case SNAPSHOTS:
return new SnapshotsTable(originTableOps, baseTableForMetadata);
case MANIFESTS:
return new ManifestsTable(originTableOps, baseTableForMetadata);
case PARTITIONS:
return new PartitionsTable(originTableOps, baseTableForMetadata);
case ALL_DATA_FILES:
return new AllDataFilesTable(originTableOps, baseTableForMetadata);
case ALL_MANIFESTS:
return new AllManifestsTable(originTableOps, baseTableForMetadata);
case ALL_ENTRIES:
return new AllEntriesTable(originTableOps, baseTableForMetadata);
default:
throw new NoSuchTableException("Unknown metadata table type: %s for %s", type, fullTableName);
}
}
public static Table createMetadataTableInstance(TableOperations originTableOps,
TableIdentifier originTableIdentifier,
MetadataTableType type) {
return createMetadataTableInstance(originTableOps,
BaseMetastoreCatalog.fullTableName(type.name(), originTableIdentifier),
type);
}
}
| 1 | 25,823 | This was broken before as the name of the metadata table started with its type, not catalog. | apache-iceberg | java |
@@ -173,6 +173,7 @@ func (di *Dependencies) Bootstrap(nodeOptions node.Options) error {
nats_discovery.Bootstrap()
log.Infof("Starting Mysterium Node (%s)", metadata.VersionAsString())
+ log.Infof("Build information (%s)", metadata.BuildAsString())
if err := nodeOptions.Directories.Check(); err != nil {
return err | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package cmd
import (
"time"
"fmt"
"path/filepath"
log "github.com/cihub/seelog"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/mysteriumnetwork/node/blockchain"
"github.com/mysteriumnetwork/node/communication"
nats_dialog "github.com/mysteriumnetwork/node/communication/nats/dialog"
nats_discovery "github.com/mysteriumnetwork/node/communication/nats/discovery"
consumer_session "github.com/mysteriumnetwork/node/consumer/session"
"github.com/mysteriumnetwork/node/consumer/statistics"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/ip"
"github.com/mysteriumnetwork/node/core/location"
"github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/core/port"
"github.com/mysteriumnetwork/node/core/service"
"github.com/mysteriumnetwork/node/core/storage/boltdb"
"github.com/mysteriumnetwork/node/core/storage/boltdb/migrations/history"
"github.com/mysteriumnetwork/node/eventbus"
"github.com/mysteriumnetwork/node/identity"
identity_registry "github.com/mysteriumnetwork/node/identity/registry"
"github.com/mysteriumnetwork/node/logconfig"
"github.com/mysteriumnetwork/node/market"
market_metrics "github.com/mysteriumnetwork/node/market/metrics"
"github.com/mysteriumnetwork/node/market/metrics/oracle"
"github.com/mysteriumnetwork/node/market/mysterium"
"github.com/mysteriumnetwork/node/metadata"
"github.com/mysteriumnetwork/node/metrics"
"github.com/mysteriumnetwork/node/money"
"github.com/mysteriumnetwork/node/nat"
"github.com/mysteriumnetwork/node/nat/event"
"github.com/mysteriumnetwork/node/nat/mapping"
"github.com/mysteriumnetwork/node/nat/traversal"
"github.com/mysteriumnetwork/node/nat/traversal/config"
"github.com/mysteriumnetwork/node/nat/upnp"
"github.com/mysteriumnetwork/node/services"
service_noop "github.com/mysteriumnetwork/node/services/noop"
"github.com/mysteriumnetwork/node/services/openvpn"
service_openvpn "github.com/mysteriumnetwork/node/services/openvpn"
"github.com/mysteriumnetwork/node/services/openvpn/discovery/dto"
"github.com/mysteriumnetwork/node/session"
"github.com/mysteriumnetwork/node/session/balance"
session_payment "github.com/mysteriumnetwork/node/session/payment"
payment_factory "github.com/mysteriumnetwork/node/session/payment/factory"
payments_noop "github.com/mysteriumnetwork/node/session/payment/noop"
"github.com/mysteriumnetwork/node/session/promise"
"github.com/mysteriumnetwork/node/session/promise/validators"
"github.com/mysteriumnetwork/node/tequilapi"
tequilapi_endpoints "github.com/mysteriumnetwork/node/tequilapi/endpoints"
"github.com/mysteriumnetwork/node/utils"
)
const logPrefix = "[service bootstrap] "
// Storage stores persistent objects for future usage
type Storage interface {
Store(issuer string, data interface{}) error
Delete(issuer string, data interface{}) error
Update(bucket string, object interface{}) error
GetAllFrom(bucket string, data interface{}) error
GetOneByField(bucket string, fieldName string, key interface{}, to interface{}) error
GetLast(bucket string, to interface{}) error
GetBuckets() []string
Close() error
}
// NatPinger is responsible for pinging nat holes
type NatPinger interface {
PingProvider(ip string, port int, stop <-chan struct{}) error
PingTarget(*traversal.Params)
BindConsumerPort(port int)
BindServicePort(serviceType services.ServiceType, port int)
Start()
Stop()
}
// NatEventTracker is responsible for tracking NAT events
type NatEventTracker interface {
ConsumeNATEvent(event event.Event)
LastEvent() *event.Event
WaitForEvent() event.Event
}
// NatEventSender is responsible for sending NAT events to metrics server
type NatEventSender interface {
ConsumeNATEvent(event event.Event)
}
// NATStatusTracker tracks status of NAT traversal by consuming NAT events
type NATStatusTracker interface {
Status() nat.Status
ConsumeNATEvent(event event.Event)
}
// CacheResolver caches the location resolution results
type CacheResolver interface {
location.Resolver
HandleConnectionEvent(connection.StateEvent)
}
// Dependencies is DI container for top level components which is reused in several places
type Dependencies struct {
Node *node.Node
NetworkDefinition metadata.NetworkDefinition
MysteriumAPI *mysterium.MysteriumAPI
MysteriumMorqaClient market_metrics.QualityOracle
EtherClient *ethclient.Client
NATService nat.NATService
Storage Storage
Keystore *keystore.KeyStore
PromiseStorage *promise.Storage
IdentityManager identity.Manager
SignerFactory identity.SignerFactory
IdentityRegistry identity_registry.IdentityRegistry
IdentityRegistration identity_registry.RegistrationDataProvider
IPResolver ip.Resolver
LocationResolver CacheResolver
StatisticsTracker *statistics.SessionStatisticsTracker
StatisticsReporter *statistics.SessionStatisticsReporter
SessionStorage *consumer_session.Storage
EventBus eventbus.EventBus
ConnectionManager connection.Manager
ConnectionRegistry *connection.Registry
ServicesManager *service.Manager
ServiceRegistry *service.Registry
ServiceSessionStorage *session.StorageMemory
NATPinger NatPinger
NATTracker NatEventTracker
NATEventSender NatEventSender
NATStatusTracker NATStatusTracker
PortPool *port.Pool
MetricsSender *metrics.Sender
}
// Bootstrap initiates all container dependencies
func (di *Dependencies) Bootstrap(nodeOptions node.Options) error {
logconfig.Bootstrap()
nats_discovery.Bootstrap()
log.Infof("Starting Mysterium Node (%s)", metadata.VersionAsString())
if err := nodeOptions.Directories.Check(); err != nil {
return err
}
if err := nodeOptions.Openvpn.Check(); err != nil {
return err
}
if err := di.bootstrapNetworkComponents(nodeOptions.OptionsNetwork); err != nil {
return err
}
if err := di.bootstrapStorage(nodeOptions.Directories.Storage); err != nil {
return err
}
di.bootstrapEventBus()
di.bootstrapIdentityComponents(nodeOptions)
if err := di.bootstrapLocationComponents(nodeOptions.Location, nodeOptions.Directories.Config); err != nil {
return err
}
di.bootstrapMetrics(nodeOptions)
di.PortPool = port.NewPool()
go upnp.ReportNetworkGateways()
di.bootstrapNATComponents(nodeOptions)
di.bootstrapServices(nodeOptions)
di.bootstrapNodeComponents(nodeOptions)
di.registerConnections(nodeOptions)
err := di.subscribeEventConsumers()
if err != nil {
return err
}
if err := di.Node.Start(); err != nil {
return err
}
return nil
}
func (di *Dependencies) registerOpenvpnConnection(nodeOptions node.Options) {
service_openvpn.Bootstrap()
connectionFactory := service_openvpn.NewProcessBasedConnectionFactory(
// TODO instead of passing binary path here, Openvpn from node options could represent abstract vpn factory itself
nodeOptions.Openvpn.BinaryPath(),
nodeOptions.Directories.Config,
nodeOptions.Directories.Runtime,
di.SignerFactory,
di.IPResolver,
di.NATPinger,
)
di.ConnectionRegistry.Register(service_openvpn.ServiceType, connectionFactory)
}
func (di *Dependencies) registerNoopConnection() {
service_noop.Bootstrap()
di.ConnectionRegistry.Register(service_noop.ServiceType, service_noop.NewConnectionCreator())
}
// Shutdown stops container
func (di *Dependencies) Shutdown() (err error) {
var errs []error
defer func() {
for i := range errs {
log.Error("Dependencies shutdown failed: ", errs[i])
if err == nil {
err = errs[i]
}
}
}()
if di.ServicesManager != nil {
if err := di.ServicesManager.Kill(); err != nil {
errs = append(errs, err)
}
}
if di.NATService != nil {
if err := di.NATService.Disable(); err != nil {
errs = append(errs, err)
}
}
if di.Node != nil {
if err := di.Node.Kill(); err != nil {
errs = append(errs, err)
}
}
if di.Storage != nil {
if err := di.Storage.Close(); err != nil {
errs = append(errs, err)
}
}
log.Flush()
return nil
}
func (di *Dependencies) bootstrapStorage(path string) error {
localStorage, err := boltdb.NewStorage(path)
if err != nil {
return err
}
migrator := boltdb.NewMigrator(localStorage)
err = migrator.RunMigrations(history.Sequence)
if err != nil {
return err
}
di.Storage = localStorage
return nil
}
func (di *Dependencies) subscribeEventConsumers() error {
// state events
err := di.EventBus.Subscribe(connection.SessionEventTopic, di.StatisticsTracker.ConsumeSessionEvent)
if err != nil {
return err
}
err = di.EventBus.Subscribe(connection.SessionEventTopic, di.StatisticsReporter.ConsumeSessionEvent)
if err != nil {
return err
}
err = di.EventBus.Subscribe(connection.SessionEventTopic, di.SessionStorage.ConsumeSessionEvent)
if err != nil {
return err
}
// statistics events
err = di.EventBus.Subscribe(connection.StatisticsEventTopic, di.StatisticsTracker.ConsumeStatisticsEvent)
if err != nil {
return err
}
err = di.EventBus.SubscribeAsync(connection.StateEventTopic, di.LocationResolver.HandleConnectionEvent)
if err != nil {
return err
}
// NAT events
err = di.EventBus.Subscribe(event.Topic, di.NATEventSender.ConsumeNATEvent)
if err != nil {
return err
}
err = di.EventBus.Subscribe(event.Topic, di.NATTracker.ConsumeNATEvent)
if err != nil {
return err
}
return di.EventBus.Subscribe(event.Topic, di.NATStatusTracker.ConsumeNATEvent)
}
func (di *Dependencies) bootstrapNodeComponents(nodeOptions node.Options) {
dialogFactory := func(consumerID, providerID identity.Identity, contact market.Contact) (communication.Dialog, error) {
dialogEstablisher := nats_dialog.NewDialogEstablisher(consumerID, di.SignerFactory(consumerID))
return dialogEstablisher.EstablishDialog(providerID, contact)
}
di.StatisticsTracker = statistics.NewSessionStatisticsTracker(time.Now)
di.StatisticsReporter = statistics.NewSessionStatisticsReporter(
di.StatisticsTracker,
di.MysteriumAPI,
di.SignerFactory,
di.LocationResolver,
time.Minute,
)
di.SessionStorage = consumer_session.NewSessionStorage(di.Storage, di.StatisticsTracker)
di.PromiseStorage = promise.NewStorage(di.Storage)
di.ConnectionRegistry = connection.NewRegistry()
di.ConnectionManager = connection.NewManager(
dialogFactory,
payment_factory.PaymentIssuerFactoryFunc(nodeOptions, di.SignerFactory),
di.ConnectionRegistry.CreateConnection,
di.EventBus,
di.IPResolver,
)
router := tequilapi.NewAPIRouter()
tequilapi_endpoints.AddRouteForStop(router, utils.SoftKiller(di.Shutdown))
tequilapi_endpoints.AddRoutesForIdentities(router, di.IdentityManager)
tequilapi_endpoints.AddRoutesForConnection(router, di.ConnectionManager, di.IPResolver, di.StatisticsTracker, di.MysteriumAPI)
tequilapi_endpoints.AddRoutesForConnectionSessions(router, di.SessionStorage)
tequilapi_endpoints.AddRoutesForConnectionLocation(router, di.ConnectionManager, di.LocationResolver)
tequilapi_endpoints.AddRoutesForLocation(router, di.LocationResolver)
tequilapi_endpoints.AddRoutesForProposals(router, di.MysteriumAPI, di.MysteriumMorqaClient)
tequilapi_endpoints.AddRoutesForService(router, di.ServicesManager, serviceTypesRequestParser, nodeOptions.AccessPolicyEndpointAddress)
tequilapi_endpoints.AddRoutesForServiceSessions(router, di.ServiceSessionStorage)
tequilapi_endpoints.AddRoutesForPayout(router, di.IdentityManager, di.SignerFactory, di.MysteriumAPI)
tequilapi_endpoints.AddRoutesForAccessPolicies(router, nodeOptions.AccessPolicyEndpointAddress)
tequilapi_endpoints.AddRoutesForNAT(router, di.NATStatusTracker.Status)
identity_registry.AddIdentityRegistrationEndpoint(router, di.IdentityRegistration, di.IdentityRegistry)
corsPolicy := tequilapi.NewMysteriumCorsPolicy()
httpAPIServer := tequilapi.NewServer(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort, router, corsPolicy)
di.Node = node.NewNode(di.ConnectionManager, httpAPIServer, di.LocationResolver, di.MetricsSender, di.NATPinger)
}
func newSessionManagerFactory(
proposal market.ServiceProposal,
sessionStorage *session.StorageMemory,
promiseStorage session_payment.PromiseStorage,
natPingerChan func(*traversal.Params),
natTracker NatEventTracker,
serviceID string,
) session.ManagerFactory {
return func(dialog communication.Dialog) *session.Manager {
providerBalanceTrackerFactory := func(consumerID, receiverID, issuerID identity.Identity) (session.BalanceTracker, error) {
// We want backwards compatibility for openvpn on desktop providers, so no payments for them.
// Splitting this as a separate case just for that reason.
// TODO: remove this one day.
if proposal.ServiceType == openvpn.ServiceType {
return payments_noop.NewSessionBalance(), nil
}
timeTracker := session.NewTracker(time.Now)
// TODO: set the time and proper payment info
payment := dto.PaymentPerTime{
Price: money.Money{
Currency: money.CurrencyMyst,
Amount: uint64(0),
},
Duration: time.Minute,
}
amountCalc := session.AmountCalc{PaymentDef: payment}
sender := balance.NewBalanceSender(dialog)
promiseChan := make(chan promise.Message, 1)
listener := promise.NewListener(promiseChan)
err := dialog.Receive(listener.GetConsumer())
if err != nil {
return nil, err
}
// TODO: the ints and times here need to be passed in as well, or defined as constants
tracker := balance.NewBalanceTracker(&timeTracker, amountCalc, 0)
validator := validators.NewIssuedPromiseValidator(consumerID, receiverID, issuerID)
return session_payment.NewSessionBalance(sender, tracker, promiseChan, payment_factory.BalanceSendPeriod, payment_factory.PromiseWaitTimeout, validator, promiseStorage, consumerID, receiverID, issuerID), nil
}
return session.NewManager(
proposal,
session.GenerateUUID,
sessionStorage,
providerBalanceTrackerFactory,
natPingerChan,
natTracker,
serviceID,
)
}
}
// function decides on network definition combined from testnet/localnet flags and possible overrides
func (di *Dependencies) bootstrapNetworkComponents(options node.OptionsNetwork) (err error) {
network := metadata.DefaultNetwork
switch {
case options.Testnet:
network = metadata.TestnetDefinition
case options.Localnet:
network = metadata.LocalnetDefinition
}
//override defined values one by one from options
if options.DiscoveryAPIAddress != metadata.DefaultNetwork.DiscoveryAPIAddress {
network.DiscoveryAPIAddress = options.DiscoveryAPIAddress
}
if options.BrokerAddress != metadata.DefaultNetwork.BrokerAddress {
network.BrokerAddress = options.BrokerAddress
}
normalizedAddress := common.HexToAddress(options.EtherPaymentsAddress)
if normalizedAddress != metadata.DefaultNetwork.PaymentsContractAddress {
network.PaymentsContractAddress = normalizedAddress
}
if options.EtherClientRPC != metadata.DefaultNetwork.EtherClientRPC {
network.EtherClientRPC = options.EtherClientRPC
}
di.NetworkDefinition = network
di.MysteriumAPI = mysterium.NewClient(network.DiscoveryAPIAddress)
di.MysteriumMorqaClient = oracle.NewMorqaClient(network.QualityOracle)
log.Info("Using Eth endpoint: ", network.EtherClientRPC)
if di.EtherClient, err = blockchain.NewClient(network.EtherClientRPC); err != nil {
return err
}
log.Info("Using Eth contract at address: ", network.PaymentsContractAddress.String())
if options.ExperimentIdentityCheck {
if di.IdentityRegistry, err = identity_registry.NewIdentityRegistryContract(di.EtherClient, network.PaymentsContractAddress); err != nil {
return err
}
} else {
di.IdentityRegistry = &identity_registry.FakeRegistry{Registered: true, RegistrationEventExists: true}
}
return nil
}
func (di *Dependencies) bootstrapEventBus() {
di.EventBus = eventbus.New()
}
func (di *Dependencies) bootstrapIdentityComponents(options node.Options) {
di.Keystore = identity.NewKeystoreFilesystem(options.Directories.Keystore, options.Keystore.UseLightweight)
di.IdentityManager = identity.NewIdentityManager(di.Keystore)
di.SignerFactory = func(id identity.Identity) identity.Signer {
return identity.NewSigner(di.Keystore, id)
}
di.IdentityRegistration = identity_registry.NewRegistrationDataProvider(di.Keystore)
}
func (di *Dependencies) bootstrapLocationComponents(options node.OptionsLocation, configDirectory string) (err error) {
di.IPResolver = ip.NewResolver(options.IPDetectorURL)
var resolver location.Resolver
switch options.Type {
case node.LocationTypeManual:
resolver = location.NewStaticResolver(options.Country, options.City, options.NodeType, di.IPResolver)
case node.LocationTypeBuiltin:
resolver, err = location.NewBuiltInResolver(di.IPResolver)
case node.LocationTypeMMDB:
resolver, err = location.NewExternalDBResolver(filepath.Join(configDirectory, options.Address), di.IPResolver)
case node.LocationTypeOracle:
resolver, err = location.NewOracleResolver(options.Address), nil
default:
err = fmt.Errorf("unknown location detector type: %s", options.Type)
}
if err != nil {
return err
}
di.LocationResolver = location.NewCache(resolver, time.Minute*5)
return
}
func (di *Dependencies) bootstrapMetrics(options node.Options) {
appVersion := metadata.VersionAsString()
di.MetricsSender = metrics.NewSender(options.DisableMetrics, options.MetricsAddress, appVersion)
}
func (di *Dependencies) bootstrapNATComponents(options node.Options) {
di.NATTracker = event.NewTracker()
if options.ExperimentNATPunching {
log.Trace(logPrefix + "experimental NAT punching enabled, creating a pinger")
di.NATPinger = traversal.NewPingerFactory(
di.NATTracker,
config.NewConfigParser(),
traversal.NewNATProxy(),
di.PortPool,
mapping.StageName,
di.EventBus,
)
} else {
di.NATPinger = &traversal.NoopPinger{}
}
di.NATEventSender = event.NewSender(di.MetricsSender, di.IPResolver.GetPublicIP)
var lastStageName string
if options.ExperimentNATPunching {
lastStageName = traversal.StageName
} else {
lastStageName = mapping.StageName
}
di.NATStatusTracker = nat.NewStatusTracker(lastStageName)
}
| 1 | 14,327 | Maybe remake `VersionAsString()` function, so that that we would have build info in all places | mysteriumnetwork-node | go |
@@ -249,6 +249,17 @@ TSSLSocket::~TSSLSocket() {
close();
}
+bool TSSLSocket::hasPendingDataToRead() {
+ if (!isOpen()) {
+ return false;
+ }
+ initializeHandshake();
+ if (!checkHandshake())
+ throw TSSLException("SSL_peek: Handshake is not completed");
+ // data may be available in SSL buffers (note: SSL_pending does not have a failure mode)
+ return TSocket::hasPendingDataToRead() || SSL_pending(ssl_) > 0;
+}
+
void TSSLSocket::init() {
handshakeCompleted_ = false;
readRetryCount_ = 0; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <thrift/thrift-config.h>
#include <errno.h>
#include <string>
#include <cstring>
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_SYS_POLL_H
#include <sys/poll.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#define OPENSSL_VERSION_NO_THREAD_ID_BEFORE 0x10000000L
#define OPENSSL_ENGINE_CLEANUP_REQUIRED_BEFORE 0x10100000L
#include <boost/shared_array.hpp>
#include <openssl/opensslv.h>
#if (OPENSSL_VERSION_NUMBER < OPENSSL_ENGINE_CLEANUP_REQUIRED_BEFORE)
#include <openssl/engine.h>
#endif
#include <openssl/err.h>
#include <openssl/rand.h>
#include <openssl/ssl.h>
#include <openssl/x509v3.h>
#include <thrift/concurrency/Mutex.h>
#include <thrift/transport/TSSLSocket.h>
#include <thrift/transport/PlatformSocket.h>
#include <thrift/TToString.h>
using namespace apache::thrift::concurrency;
using std::string;
struct CRYPTO_dynlock_value {
Mutex mutex;
};
namespace apache {
namespace thrift {
namespace transport {
// OpenSSL initialization/cleanup
static bool openSSLInitialized = false;
static boost::shared_array<Mutex> mutexes;
static void callbackLocking(int mode, int n, const char*, int) {
if (mode & CRYPTO_LOCK) {
// assertion of (px != 0) here typically means that a TSSLSocket's lifetime
// exceeded the lifetime of the TSSLSocketFactory that created it, and the
// TSSLSocketFactory already ran cleanupOpenSSL(), which deleted "mutexes".
mutexes[n].lock();
} else {
mutexes[n].unlock();
}
}
#if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_NO_THREAD_ID_BEFORE)
static unsigned long callbackThreadID() {
#ifdef _WIN32
return (unsigned long)GetCurrentThreadId();
#else
return (unsigned long)pthread_self();
#endif
}
#endif
static CRYPTO_dynlock_value* dyn_create(const char*, int) {
return new CRYPTO_dynlock_value;
}
static void dyn_lock(int mode, struct CRYPTO_dynlock_value* lock, const char*, int) {
if (lock != NULL) {
if (mode & CRYPTO_LOCK) {
lock->mutex.lock();
} else {
lock->mutex.unlock();
}
}
}
static void dyn_destroy(struct CRYPTO_dynlock_value* lock, const char*, int) {
delete lock;
}
void initializeOpenSSL() {
if (openSSLInitialized) {
return;
}
openSSLInitialized = true;
SSL_library_init();
SSL_load_error_strings();
ERR_load_crypto_strings();
// static locking
// newer versions of OpenSSL changed CRYPTO_num_locks - see THRIFT-3878
#ifdef CRYPTO_num_locks
mutexes = boost::shared_array<Mutex>(new Mutex[CRYPTO_num_locks()]);
#else
mutexes = boost::shared_array<Mutex>(new Mutex[ ::CRYPTO_num_locks()]);
#endif
#if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_NO_THREAD_ID_BEFORE)
CRYPTO_set_id_callback(callbackThreadID);
#endif
CRYPTO_set_locking_callback(callbackLocking);
// dynamic locking
CRYPTO_set_dynlock_create_callback(dyn_create);
CRYPTO_set_dynlock_lock_callback(dyn_lock);
CRYPTO_set_dynlock_destroy_callback(dyn_destroy);
}
void cleanupOpenSSL() {
if (!openSSLInitialized) {
return;
}
openSSLInitialized = false;
// https://wiki.openssl.org/index.php/Library_Initialization#Cleanup
// we purposefully do NOT call FIPS_mode_set(0) and leave it up to the enclosing application to manage FIPS entirely
#if (OPENSSL_VERSION_NUMBER < OPENSSL_ENGINE_CLEANUP_REQUIRED_BEFORE)
ENGINE_cleanup(); // https://www.openssl.org/docs/man1.1.0/crypto/ENGINE_cleanup.html - cleanup call is needed before 1.1.0
#endif
CONF_modules_unload(1);
EVP_cleanup();
CRYPTO_cleanup_all_ex_data();
ERR_remove_state(0);
ERR_free_strings();
mutexes.reset();
}
static void buildErrors(string& message, int errno_copy = 0, int sslerrno = 0);
static bool matchName(const char* host, const char* pattern, int size);
static char uppercase(char c);
// SSLContext implementation
SSLContext::SSLContext(const SSLProtocol& protocol) {
if (protocol == SSLTLS) {
ctx_ = SSL_CTX_new(SSLv23_method());
#ifndef OPENSSL_NO_SSL3
} else if (protocol == SSLv3) {
ctx_ = SSL_CTX_new(SSLv3_method());
#endif
} else if (protocol == TLSv1_0) {
ctx_ = SSL_CTX_new(TLSv1_method());
} else if (protocol == TLSv1_1) {
ctx_ = SSL_CTX_new(TLSv1_1_method());
} else if (protocol == TLSv1_2) {
ctx_ = SSL_CTX_new(TLSv1_2_method());
} else {
/// UNKNOWN PROTOCOL!
throw TSSLException("SSL_CTX_new: Unknown protocol");
}
if (ctx_ == NULL) {
string errors;
buildErrors(errors);
throw TSSLException("SSL_CTX_new: " + errors);
}
SSL_CTX_set_mode(ctx_, SSL_MODE_AUTO_RETRY);
// Disable horribly insecure SSLv2 and SSLv3 protocols but allow a handshake
// with older clients so they get a graceful denial.
if (protocol == SSLTLS) {
SSL_CTX_set_options(ctx_, SSL_OP_NO_SSLv2);
SSL_CTX_set_options(ctx_, SSL_OP_NO_SSLv3); // THRIFT-3164
}
}
SSLContext::~SSLContext() {
if (ctx_ != NULL) {
SSL_CTX_free(ctx_);
ctx_ = NULL;
}
}
SSL* SSLContext::createSSL() {
SSL* ssl = SSL_new(ctx_);
if (ssl == NULL) {
string errors;
buildErrors(errors);
throw TSSLException("SSL_new: " + errors);
}
return ssl;
}
// TSSLSocket implementation
TSSLSocket::TSSLSocket(stdcxx::shared_ptr<SSLContext> ctx)
: TSocket(), server_(false), ssl_(NULL), ctx_(ctx) {
init();
}
TSSLSocket::TSSLSocket(stdcxx::shared_ptr<SSLContext> ctx, stdcxx::shared_ptr<THRIFT_SOCKET> interruptListener)
: TSocket(), server_(false), ssl_(NULL), ctx_(ctx) {
init();
interruptListener_ = interruptListener;
}
TSSLSocket::TSSLSocket(stdcxx::shared_ptr<SSLContext> ctx, THRIFT_SOCKET socket)
: TSocket(socket), server_(false), ssl_(NULL), ctx_(ctx) {
init();
}
TSSLSocket::TSSLSocket(stdcxx::shared_ptr<SSLContext> ctx, THRIFT_SOCKET socket, stdcxx::shared_ptr<THRIFT_SOCKET> interruptListener)
: TSocket(socket, interruptListener), server_(false), ssl_(NULL), ctx_(ctx) {
init();
}
TSSLSocket::TSSLSocket(stdcxx::shared_ptr<SSLContext> ctx, string host, int port)
: TSocket(host, port), server_(false), ssl_(NULL), ctx_(ctx) {
init();
}
TSSLSocket::TSSLSocket(stdcxx::shared_ptr<SSLContext> ctx, string host, int port, stdcxx::shared_ptr<THRIFT_SOCKET> interruptListener)
: TSocket(host, port), server_(false), ssl_(NULL), ctx_(ctx) {
init();
interruptListener_ = interruptListener;
}
TSSLSocket::~TSSLSocket() {
close();
}
void TSSLSocket::init() {
handshakeCompleted_ = false;
readRetryCount_ = 0;
eventSafe_ = false;
}
bool TSSLSocket::isOpen() {
if (ssl_ == NULL || !TSocket::isOpen()) {
return false;
}
int shutdown = SSL_get_shutdown(ssl_);
// "!!" is squelching C4800 "forcing bool -> true or false" performance warning
bool shutdownReceived = !!(shutdown & SSL_RECEIVED_SHUTDOWN);
bool shutdownSent = !!(shutdown & SSL_SENT_SHUTDOWN);
if (shutdownReceived && shutdownSent) {
return false;
}
return true;
}
/*
* Note: This method is not libevent safe.
*/
bool TSSLSocket::peek() {
if (!isOpen()) {
return false;
}
initializeHandshake();
if (!checkHandshake())
throw TSSLException("SSL_peek: Handshake is not completed");
int rc;
uint8_t byte;
do {
rc = SSL_peek(ssl_, &byte, 1);
if (rc < 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
int error = SSL_get_error(ssl_, rc);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
// in the case of SSL_ERROR_SYSCALL we want to wait for an read event again
waitForEvent(error != SSL_ERROR_WANT_WRITE);
continue;
default:;// do nothing
}
string errors;
buildErrors(errors, errno_copy, error);
throw TSSLException("SSL_peek: " + errors);
} else if (rc == 0) {
ERR_clear_error();
break;
}
} while (true);
return (rc > 0);
}
void TSSLSocket::open() {
if (isOpen() || server()) {
throw TTransportException(TTransportException::BAD_ARGS);
}
TSocket::open();
}
/*
* Note: This method is not libevent safe.
*/
void TSSLSocket::close() {
if (ssl_ != NULL) {
try {
int rc;
int errno_copy = 0;
int error = 0;
do {
rc = SSL_shutdown(ssl_);
if (rc <= 0) {
errno_copy = THRIFT_GET_SOCKET_ERROR;
error = SSL_get_error(ssl_, rc);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
// in the case of SSL_ERROR_SYSCALL we want to wait for an write/read event again
waitForEvent(error == SSL_ERROR_WANT_READ);
rc = 2;
default:;// do nothing
}
}
} while (rc == 2);
if (rc < 0) {
string errors;
buildErrors(errors, errno_copy, error);
GlobalOutput(("SSL_shutdown: " + errors).c_str());
}
} catch (TTransportException& te) {
// Don't emit an exception because this method is called by the
// destructor. There's also not much that a user can do to recover, so
// just clean up as much as possible without throwing, similar to the rc
// < 0 case above.
GlobalOutput.printf("SSL_shutdown: %s", te.what());
}
SSL_free(ssl_);
ssl_ = NULL;
handshakeCompleted_ = false;
ERR_remove_state(0);
}
TSocket::close();
}
/*
* Returns number of bytes read in SSL Socket.
* If eventSafe is set, and it may returns 0 bytes then read method
* needs to be called again until it is successfull or it throws
* exception incase of failure.
*/
uint32_t TSSLSocket::read(uint8_t* buf, uint32_t len) {
initializeHandshake();
if (!checkHandshake())
throw TTransportException(TTransportException::UNKNOWN, "retry again");
int32_t bytes = 0;
while (readRetryCount_ < maxRecvRetries_) {
bytes = SSL_read(ssl_, buf, len);
int32_t errno_copy = THRIFT_GET_SOCKET_ERROR;
int32_t error = SSL_get_error(ssl_, bytes);
readRetryCount_++;
if (error == SSL_ERROR_NONE) {
readRetryCount_ = 0;
break;
}
unsigned int waitEventReturn;
switch (error) {
case SSL_ERROR_ZERO_RETURN:
throw TTransportException(TTransportException::END_OF_FILE, "client disconnected");
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
if (readRetryCount_ >= maxRecvRetries_) {
// THRIFT_EINTR needs to be handled manually and we can tolerate
// a certain number
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
if (isLibeventSafe()) {
if (readRetryCount_ < maxRecvRetries_) {
// THRIFT_EINTR needs to be handled manually and we can tolerate
// a certain number
throw TTransportException(TTransportException::UNKNOWN, "retry again");
}
throw TTransportException(TTransportException::INTERNAL_ERROR, "too much recv retries");
}
// in the case of SSL_ERROR_SYSCALL we want to wait for an read event again
else if ((waitEventReturn = waitForEvent(error != SSL_ERROR_WANT_WRITE)) == TSSL_EINTR ) {
// repeat operation
if (readRetryCount_ < maxRecvRetries_) {
// THRIFT_EINTR needs to be handled manually and we can tolerate
// a certain number
continue;
}
throw TTransportException(TTransportException::INTERNAL_ERROR, "too much recv retries");
}
else if (waitEventReturn == TSSL_DATA) {
// in case of SSL and huge thrift packets, there may be a number of
// socket operations, before any data becomes available by SSL_read().
// Therefore the number of retries should not be increased and
// the operation should be repeated.
readRetryCount_--;
continue;
}
throw TTransportException(TTransportException::INTERNAL_ERROR, "unkown waitForEvent return value");
default:;// do nothing
}
string errors;
buildErrors(errors, errno_copy, error);
throw TSSLException("SSL_read: " + errors);
}
return bytes;
}
void TSSLSocket::write(const uint8_t* buf, uint32_t len) {
initializeHandshake();
if (!checkHandshake())
return;
// loop in case SSL_MODE_ENABLE_PARTIAL_WRITE is set in SSL_CTX.
uint32_t written = 0;
while (written < len) {
ERR_clear_error();
int32_t bytes = SSL_write(ssl_, &buf[written], len - written);
if (bytes <= 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
int error = SSL_get_error(ssl_, bytes);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
if (isLibeventSafe()) {
return;
}
else {
// in the case of SSL_ERROR_SYSCALL we want to wait for an write event again
waitForEvent(error == SSL_ERROR_WANT_READ);
continue;
}
default:;// do nothing
}
string errors;
buildErrors(errors, errno_copy, error);
throw TSSLException("SSL_write: " + errors);
}
written += bytes;
}
}
/*
* Returns number of bytes written in SSL Socket.
* If eventSafe is set, and it may returns 0 bytes then write method
* needs to be called again until it is successfull or it throws
* exception incase of failure.
*/
uint32_t TSSLSocket::write_partial(const uint8_t* buf, uint32_t len) {
initializeHandshake();
if (!checkHandshake())
return 0;
// loop in case SSL_MODE_ENABLE_PARTIAL_WRITE is set in SSL_CTX.
uint32_t written = 0;
while (written < len) {
ERR_clear_error();
int32_t bytes = SSL_write(ssl_, &buf[written], len - written);
if (bytes <= 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
int error = SSL_get_error(ssl_, bytes);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
if (isLibeventSafe()) {
return 0;
}
else {
// in the case of SSL_ERROR_SYSCALL we want to wait for an write event again
waitForEvent(error == SSL_ERROR_WANT_READ);
continue;
}
default:;// do nothing
}
string errors;
buildErrors(errors, errno_copy, error);
throw TSSLException("SSL_write: " + errors);
}
written += bytes;
}
return written;
}
void TSSLSocket::flush() {
// Don't throw exception if not open. Thrift servers close socket twice.
if (ssl_ == NULL) {
return;
}
initializeHandshake();
if (!checkHandshake())
throw TSSLException("BIO_flush: Handshake is not completed");
BIO* bio = SSL_get_wbio(ssl_);
if (bio == NULL) {
throw TSSLException("SSL_get_wbio returns NULL");
}
if (BIO_flush(bio) != 1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("BIO_flush: " + errors);
}
}
void TSSLSocket::initializeHandshakeParams() {
// set underlying socket to non-blocking
int flags;
if ((flags = THRIFT_FCNTL(socket_, THRIFT_F_GETFL, 0)) < 0
|| THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags | THRIFT_O_NONBLOCK) < 0) {
GlobalOutput.perror("thriftServerEventHandler: set THRIFT_O_NONBLOCK (THRIFT_FCNTL) ",
THRIFT_GET_SOCKET_ERROR);
::THRIFT_CLOSESOCKET(socket_);
return;
}
ssl_ = ctx_->createSSL();
SSL_set_fd(ssl_, static_cast<int>(socket_));
}
bool TSSLSocket::checkHandshake() {
return handshakeCompleted_;
}
void TSSLSocket::initializeHandshake() {
if (!TSocket::isOpen()) {
throw TTransportException(TTransportException::NOT_OPEN);
}
if (checkHandshake()) {
return;
}
if (ssl_ == NULL) {
initializeHandshakeParams();
}
int rc;
int errno_copy = 0;
int error = 0;
if (server()) {
do {
rc = SSL_accept(ssl_);
if (rc <= 0) {
errno_copy = THRIFT_GET_SOCKET_ERROR;
error = SSL_get_error(ssl_, rc);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
if (isLibeventSafe()) {
return;
}
else {
// repeat operation
// in the case of SSL_ERROR_SYSCALL we want to wait for an write/read event again
waitForEvent(error == SSL_ERROR_WANT_READ);
rc = 2;
}
default:;// do nothing
}
}
} while (rc == 2);
} else {
// OpenSSL < 0.9.8f does not have SSL_set_tlsext_host_name()
#if defined(SSL_set_tlsext_host_name)
// set the SNI hostname
SSL_set_tlsext_host_name(ssl_, getHost().c_str());
#endif
do {
rc = SSL_connect(ssl_);
if (rc <= 0) {
errno_copy = THRIFT_GET_SOCKET_ERROR;
error = SSL_get_error(ssl_, rc);
switch (error) {
case SSL_ERROR_SYSCALL:
if ((errno_copy != THRIFT_EINTR)
&& (errno_copy != THRIFT_EAGAIN)) {
break;
}
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
if (isLibeventSafe()) {
return;
}
else {
// repeat operation
// in the case of SSL_ERROR_SYSCALL we want to wait for an write/read event again
waitForEvent(error == SSL_ERROR_WANT_READ);
rc = 2;
}
default:;// do nothing
}
}
} while (rc == 2);
}
if (rc <= 0) {
string fname(server() ? "SSL_accept" : "SSL_connect");
string errors;
buildErrors(errors, errno_copy, error);
throw TSSLException(fname + ": " + errors);
}
authorize();
handshakeCompleted_ = true;
}
void TSSLSocket::authorize() {
int rc = SSL_get_verify_result(ssl_);
if (rc != X509_V_OK) { // verify authentication result
throw TSSLException(string("SSL_get_verify_result(), ") + X509_verify_cert_error_string(rc));
}
X509* cert = SSL_get_peer_certificate(ssl_);
if (cert == NULL) {
// certificate is not present
if (SSL_get_verify_mode(ssl_) & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) {
throw TSSLException("authorize: required certificate not present");
}
// certificate was optional: didn't intend to authorize remote
if (server() && access_ != NULL) {
throw TSSLException("authorize: certificate required for authorization");
}
return;
}
// certificate is present
if (access_ == NULL) {
X509_free(cert);
return;
}
// both certificate and access manager are present
string host;
sockaddr_storage sa;
socklen_t saLength = sizeof(sa);
if (getpeername(socket_, (sockaddr*)&sa, &saLength) != 0) {
sa.ss_family = AF_UNSPEC;
}
AccessManager::Decision decision = access_->verify(sa);
if (decision != AccessManager::SKIP) {
X509_free(cert);
if (decision != AccessManager::ALLOW) {
throw TSSLException("authorize: access denied based on remote IP");
}
return;
}
// extract subjectAlternativeName
STACK_OF(GENERAL_NAME)* alternatives
= (STACK_OF(GENERAL_NAME)*)X509_get_ext_d2i(cert, NID_subject_alt_name, NULL, NULL);
if (alternatives != NULL) {
const int count = sk_GENERAL_NAME_num(alternatives);
for (int i = 0; decision == AccessManager::SKIP && i < count; i++) {
const GENERAL_NAME* name = sk_GENERAL_NAME_value(alternatives, i);
if (name == NULL) {
continue;
}
char* data = (char*)ASN1_STRING_data(name->d.ia5);
int length = ASN1_STRING_length(name->d.ia5);
switch (name->type) {
case GEN_DNS:
if (host.empty()) {
host = (server() ? getPeerHost() : getHost());
}
decision = access_->verify(host, data, length);
break;
case GEN_IPADD:
decision = access_->verify(sa, data, length);
break;
}
}
sk_GENERAL_NAME_pop_free(alternatives, GENERAL_NAME_free);
}
if (decision != AccessManager::SKIP) {
X509_free(cert);
if (decision != AccessManager::ALLOW) {
throw TSSLException("authorize: access denied");
}
return;
}
// extract commonName
X509_NAME* name = X509_get_subject_name(cert);
if (name != NULL) {
X509_NAME_ENTRY* entry;
unsigned char* utf8;
int last = -1;
while (decision == AccessManager::SKIP) {
last = X509_NAME_get_index_by_NID(name, NID_commonName, last);
if (last == -1)
break;
entry = X509_NAME_get_entry(name, last);
if (entry == NULL)
continue;
ASN1_STRING* common = X509_NAME_ENTRY_get_data(entry);
int size = ASN1_STRING_to_UTF8(&utf8, common);
if (host.empty()) {
host = (server() ? getPeerHost() : getHost());
}
decision = access_->verify(host, (char*)utf8, size);
OPENSSL_free(utf8);
}
}
X509_free(cert);
if (decision != AccessManager::ALLOW) {
throw TSSLException("authorize: cannot authorize peer");
}
}
/*
* Note: This method is not libevent safe.
*/
unsigned int TSSLSocket::waitForEvent(bool wantRead) {
int fdSocket;
BIO* bio;
if (wantRead) {
bio = SSL_get_rbio(ssl_);
} else {
bio = SSL_get_wbio(ssl_);
}
if (bio == NULL) {
throw TSSLException("SSL_get_?bio returned NULL");
}
if (BIO_get_fd(bio, &fdSocket) <= 0) {
throw TSSLException("BIO_get_fd failed");
}
struct THRIFT_POLLFD fds[2];
memset(fds, 0, sizeof(fds));
fds[0].fd = fdSocket;
// use POLLIN also on write operations too, this is needed for operations
// which requires read and write on the socket.
fds[0].events = wantRead ? THRIFT_POLLIN : THRIFT_POLLIN | THRIFT_POLLOUT;
if (interruptListener_) {
fds[1].fd = *(interruptListener_.get());
fds[1].events = THRIFT_POLLIN;
}
int timeout = -1;
if (wantRead && recvTimeout_) {
timeout = recvTimeout_;
}
if (!wantRead && sendTimeout_) {
timeout = sendTimeout_;
}
int ret = THRIFT_POLL(fds, interruptListener_ ? 2 : 1, timeout);
if (ret < 0) {
// error cases
if (THRIFT_GET_SOCKET_ERROR == THRIFT_EINTR) {
return TSSL_EINTR; // repeat operation
}
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSSLSocket::read THRIFT_POLL() ", errno_copy);
throw TTransportException(TTransportException::UNKNOWN, "Unknown", errno_copy);
} else if (ret > 0){
if (fds[1].revents & THRIFT_POLLIN) {
throw TTransportException(TTransportException::INTERRUPTED, "Interrupted");
}
return TSSL_DATA;
} else {
throw TTransportException(TTransportException::TIMED_OUT, "THRIFT_POLL (timed out)");
}
}
// TSSLSocketFactory implementation
uint64_t TSSLSocketFactory::count_ = 0;
Mutex TSSLSocketFactory::mutex_;
bool TSSLSocketFactory::manualOpenSSLInitialization_ = false;
TSSLSocketFactory::TSSLSocketFactory(SSLProtocol protocol) : server_(false) {
Guard guard(mutex_);
if (count_ == 0) {
if (!manualOpenSSLInitialization_) {
initializeOpenSSL();
}
randomize();
}
count_++;
ctx_ = stdcxx::shared_ptr<SSLContext>(new SSLContext(protocol));
}
TSSLSocketFactory::~TSSLSocketFactory() {
Guard guard(mutex_);
ctx_.reset();
count_--;
if (count_ == 0 && !manualOpenSSLInitialization_) {
cleanupOpenSSL();
}
}
stdcxx::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket() {
stdcxx::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_));
setup(ssl);
return ssl;
}
stdcxx::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(stdcxx::shared_ptr<THRIFT_SOCKET> interruptListener) {
stdcxx::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, interruptListener));
setup(ssl);
return ssl;
}
stdcxx::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(THRIFT_SOCKET socket) {
stdcxx::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, socket));
setup(ssl);
return ssl;
}
stdcxx::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(THRIFT_SOCKET socket, stdcxx::shared_ptr<THRIFT_SOCKET> interruptListener) {
stdcxx::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, socket, interruptListener));
setup(ssl);
return ssl;
}
stdcxx::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(const string& host, int port) {
stdcxx::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, host, port));
setup(ssl);
return ssl;
}
stdcxx::shared_ptr<TSSLSocket> TSSLSocketFactory::createSocket(const string& host, int port, stdcxx::shared_ptr<THRIFT_SOCKET> interruptListener) {
stdcxx::shared_ptr<TSSLSocket> ssl(new TSSLSocket(ctx_, host, port, interruptListener));
setup(ssl);
return ssl;
}
void TSSLSocketFactory::setup(stdcxx::shared_ptr<TSSLSocket> ssl) {
ssl->server(server());
if (access_ == NULL && !server()) {
access_ = stdcxx::shared_ptr<AccessManager>(new DefaultClientAccessManager);
}
if (access_ != NULL) {
ssl->access(access_);
}
}
void TSSLSocketFactory::ciphers(const string& enable) {
int rc = SSL_CTX_set_cipher_list(ctx_->get(), enable.c_str());
if (ERR_peek_error() != 0) {
string errors;
buildErrors(errors);
throw TSSLException("SSL_CTX_set_cipher_list: " + errors);
}
if (rc == 0) {
throw TSSLException("None of specified ciphers are supported");
}
}
void TSSLSocketFactory::authenticate(bool required) {
int mode;
if (required) {
mode = SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT | SSL_VERIFY_CLIENT_ONCE;
} else {
mode = SSL_VERIFY_NONE;
}
SSL_CTX_set_verify(ctx_->get(), mode, NULL);
}
void TSSLSocketFactory::loadCertificate(const char* path, const char* format) {
if (path == NULL || format == NULL) {
throw TTransportException(TTransportException::BAD_ARGS,
"loadCertificateChain: either <path> or <format> is NULL");
}
if (strcmp(format, "PEM") == 0) {
if (SSL_CTX_use_certificate_chain_file(ctx_->get(), path) == 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("SSL_CTX_use_certificate_chain_file: " + errors);
}
} else {
throw TSSLException("Unsupported certificate format: " + string(format));
}
}
void TSSLSocketFactory::loadPrivateKey(const char* path, const char* format) {
if (path == NULL || format == NULL) {
throw TTransportException(TTransportException::BAD_ARGS,
"loadPrivateKey: either <path> or <format> is NULL");
}
if (strcmp(format, "PEM") == 0) {
if (SSL_CTX_use_PrivateKey_file(ctx_->get(), path, SSL_FILETYPE_PEM) == 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("SSL_CTX_use_PrivateKey_file: " + errors);
}
}
}
void TSSLSocketFactory::loadTrustedCertificates(const char* path, const char* capath) {
if (path == NULL) {
throw TTransportException(TTransportException::BAD_ARGS,
"loadTrustedCertificates: <path> is NULL");
}
if (SSL_CTX_load_verify_locations(ctx_->get(), path, capath) == 0) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
string errors;
buildErrors(errors, errno_copy);
throw TSSLException("SSL_CTX_load_verify_locations: " + errors);
}
}
void TSSLSocketFactory::randomize() {
RAND_poll();
}
void TSSLSocketFactory::overrideDefaultPasswordCallback() {
SSL_CTX_set_default_passwd_cb(ctx_->get(), passwordCallback);
SSL_CTX_set_default_passwd_cb_userdata(ctx_->get(), this);
}
int TSSLSocketFactory::passwordCallback(char* password, int size, int, void* data) {
TSSLSocketFactory* factory = (TSSLSocketFactory*)data;
string userPassword;
factory->getPassword(userPassword, size);
int length = static_cast<int>(userPassword.size());
if (length > size) {
length = size;
}
strncpy(password, userPassword.c_str(), length);
userPassword.assign(userPassword.size(), '*');
return length;
}
// extract error messages from error queue
void buildErrors(string& errors, int errno_copy, int sslerrno) {
unsigned long errorCode;
char message[256];
errors.reserve(512);
while ((errorCode = ERR_get_error()) != 0) {
if (!errors.empty()) {
errors += "; ";
}
const char* reason = ERR_reason_error_string(errorCode);
if (reason == NULL) {
THRIFT_SNPRINTF(message, sizeof(message) - 1, "SSL error # %lu", errorCode);
reason = message;
}
errors += reason;
}
if (errors.empty()) {
if (errno_copy != 0) {
errors += TOutput::strerror_s(errno_copy);
}
}
if (errors.empty()) {
errors = "error code: " + to_string(errno_copy);
}
if (sslerrno) {
errors += " (SSL_error_code = " + to_string(sslerrno) + ")";
}
}
/**
* Default implementation of AccessManager
*/
Decision DefaultClientAccessManager::verify(const sockaddr_storage& sa) throw() {
(void)sa;
return SKIP;
}
Decision DefaultClientAccessManager::verify(const string& host,
const char* name,
int size) throw() {
if (host.empty() || name == NULL || size <= 0) {
return SKIP;
}
return (matchName(host.c_str(), name, size) ? ALLOW : SKIP);
}
Decision DefaultClientAccessManager::verify(const sockaddr_storage& sa,
const char* data,
int size) throw() {
bool match = false;
if (sa.ss_family == AF_INET && size == sizeof(in_addr)) {
match = (memcmp(&((sockaddr_in*)&sa)->sin_addr, data, size) == 0);
} else if (sa.ss_family == AF_INET6 && size == sizeof(in6_addr)) {
match = (memcmp(&((sockaddr_in6*)&sa)->sin6_addr, data, size) == 0);
}
return (match ? ALLOW : SKIP);
}
/**
* Match a name with a pattern. The pattern may include wildcard. A single
* wildcard "*" can match up to one component in the domain name.
*
* @param host Host name, typically the name of the remote host
* @param pattern Name retrieved from certificate
* @param size Size of "pattern"
* @return True, if "host" matches "pattern". False otherwise.
*/
bool matchName(const char* host, const char* pattern, int size) {
bool match = false;
int i = 0, j = 0;
while (i < size && host[j] != '\0') {
if (uppercase(pattern[i]) == uppercase(host[j])) {
i++;
j++;
continue;
}
if (pattern[i] == '*') {
while (host[j] != '.' && host[j] != '\0') {
j++;
}
i++;
continue;
}
break;
}
if (i == size && host[j] == '\0') {
match = true;
}
return match;
}
// This is to work around the Turkish locale issue, i.e.,
// toupper('i') != toupper('I') if locale is "tr_TR"
char uppercase(char c) {
if ('a' <= c && c <= 'z') {
return c + ('A' - 'a');
}
return c;
}
}
}
}
| 1 | 14,086 | This should probably say something other than SSL_peek? | apache-thrift | c |
@@ -1,7 +1,7 @@
/**
- * Search Console module initialization.
+ * Search console module initialization.
*
- * Site Kit by Google, Copyright 2019 Google LLC
+ * Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. | 1 | /**
* Search Console module initialization.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import SearchConsoleSettingStatus from './settings/search-console-settings-status';
/**
* WordPress dependencies
*/
import { addFilter } from '@wordpress/hooks';
/**
* Internal dependencies
*/
import { createAddToFilter } from '../../util/helpers';
import { fillFilterWithComponent } from '../../util';
const slug = 'search-console';
const addGoogleSitekitSearchConsoleDashboardWidget = createAddToFilter( <GoogleSitekitSearchConsoleDashboardWidget /> );
const addGoogleSitekitSearchConsoleAdminbarWidget = createAddToFilter( <GoogleSitekitSearchConsoleAdminbarWidget /> );
const addWPSearchConsoleDashboardWidget = createAddToFilter( <WPSearchConsoleDashboardWidget /> );
const addDashboardSearchFunnel = createAddToFilter( <DashboardSearchFunnel /> );
const addSearchConsoleDashboardWidgetTopLevel = createAddToFilter( <SearchConsoleDashboardWidgetTopLevel /> );
const addDashboardDetailsSearchFunnel = createAddToFilter( <DashboardDetailsWidgetSearchFunnel /> );
const addDashboardDetailsKeywords = createAddToFilter( <DashboardDetailsWidgetKeywordsTable /> );
const addDashboardPopularKeywords = createAddToFilter( <DashboardWidgetPopularKeywordsTable /> );
const addDashboardPopularity = createAddToFilter( <DashboardPopularity /> );
const addPostSearcher = createAddToFilter( <PostSearcher /> );
/**
* Internal dependencies
*/
import PostSearcher from '../../components/post-searcher';
import GoogleSitekitSearchConsoleDashboardWidget from './dashboard/dashboard-widget';
import GoogleSitekitSearchConsoleAdminbarWidget from './adminbar/adminbar-widget';
import WPSearchConsoleDashboardWidget from './wp-dashboard/wp-dashboard-widget';
import DashboardSearchFunnel from './dashboard/dashboard-widget-search-funnel.js';
import SearchConsoleDashboardWidgetTopLevel from './dashboard/dashboard-widget-top-level';
import DashboardDetailsWidgetKeywordsTable from './dashboard-details/dashboard-details-widget-keyword-table';
import DashboardWidgetPopularKeywordsTable from './dashboard/dashboard-widget-popular-keyword-table';
import DashboardDetailsWidgetSearchFunnel from './dashboard-details/dashboard-details-widget-search-funnel';
import DashboardPopularity from './dashboard/dashboard-widget-popularity';
/**
* Add components to the settings page.
*/
addFilter( `googlesitekit.ModuleSettingsDetails-${ slug }`,
'googlesitekit.SearchConsoleModuleSettingsDetails',
fillFilterWithComponent( SearchConsoleSettingStatus, {
onSettingsPage: true,
} ) );
/**
* Add components to the Site Kit Dashboard.
*/
addFilter( 'googlesitekit.DashboardModule',
'googlesitekit.SearchConsole',
addDashboardSearchFunnel, 11 );
addFilter( 'googlesitekit.DashboardModule',
'googlesitekit.DashboardPopularityModule',
addDashboardPopularity, 40 );
addFilter( 'googlesitekit.DashboardSearchFunnel',
'googlesitekit.SearchConsoleSearchFunnel',
addSearchConsoleDashboardWidgetTopLevel );
/**
* Add components to the Site Kit URL Details Dashboard.
*/
addFilter( 'googlesitekit.DashboardDetailsModule',
'googlesitekit.SearchConsole',
addDashboardDetailsSearchFunnel );
addFilter( 'googlesitekit.DashboardDetailsModule',
'googlesitekit.SearchConsole',
addDashboardDetailsKeywords, 40 );
addFilter( 'googlesitekit.DashboardPopularity',
'googlesitekit.SearchConsoleDashboardPopularity',
addDashboardPopularKeywords );
addFilter( 'googlesitekit.DashboardPopularity',
'googlesitekit.DashboardPPostSearcherModule',
addPostSearcher, 30 );
/**
* Add components to the WordPress Dashboard widget.
*/
addFilter( 'googlesitekit.WPDashboardHeader',
'googlesitekit.SearchConsole',
addWPSearchConsoleDashboardWidget, 11 );
/**
* Add components to the module detail page.
*/
addFilter( 'googlesitekit.ModuleApp-' + slug,
'googlesitekit.ModuleApp',
addGoogleSitekitSearchConsoleDashboardWidget );
addFilter( `googlesitekit.showDateRangeSelector-${ slug }`,
'googlesitekit.searchConsoleShowDateRangeSelector',
() => true );
/**
* Add components to the adminbar.
*/
addFilter( 'googlesitekit.AdminbarModules',
'googlesitekit.SearchConsole',
addGoogleSitekitSearchConsoleAdminbarWidget );
| 1 | 28,630 | Nitpicking, but this should be capitalized since it's a product name :) | google-site-kit-wp | js |
@@ -94,6 +94,16 @@ type TransactionMisc struct {
from atomic.Value
}
+type RawTransactions [][]byte
+
+func (t RawTransactions) Len() int {
+ return len(t)
+}
+
+func (t RawTransactions) EncodeIndex(i int, w *bytes.Buffer) {
+ w.Write(t[i])
+}
+
func (tm TransactionMisc) Time() time.Time {
return tm.time
} | 1 | // Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"bytes"
"container/heap"
"errors"
"fmt"
"io"
"math/big"
"sync/atomic"
"time"
"github.com/holiman/uint256"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/rlp"
)
var (
ErrInvalidSig = errors.New("invalid transaction v, r, s values")
ErrUnexpectedProtection = errors.New("transaction type does not supported EIP-155 protected signatures")
ErrInvalidTxType = errors.New("transaction type not valid in this context")
ErrTxTypeNotSupported = errors.New("transaction type not supported")
)
// Transaction types.
const (
LegacyTxType = iota
AccessListTxType
DynamicFeeTxType
)
// Transaction is an Ethereum transaction.
type Transaction interface {
Type() byte
GetChainID() *uint256.Int
GetNonce() uint64
GetPrice() *uint256.Int
GetTip() *uint256.Int
GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int
GetFeeCap() *uint256.Int
Cost() *uint256.Int
GetGas() uint64
GetValue() *uint256.Int
Time() time.Time
GetTo() *common.Address
AsMessage(s Signer, baseFee *big.Int) (Message, error)
WithSignature(signer Signer, sig []byte) (Transaction, error)
FakeSign(address common.Address) (Transaction, error)
Hash() common.Hash
SigningHash(chainID *big.Int) common.Hash
Size() common.StorageSize
GetData() []byte
GetAccessList() AccessList
Protected() bool
RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int)
MarshalBinary(w io.Writer) error
// Sender returns the address derived from the signature (V, R, S) using secp256k1
// elliptic curve and an error if it failed deriving or upon an incorrect
// signature.
//
// Sender may cache the address, allowing it to be used regardless of
// signing method. The cache is invalidated if the cached signer does
// not match the signer used in the current call.
Sender(Signer) (common.Address, error)
GetSender() (common.Address, bool)
SetSender(common.Address)
}
// TransactionMisc is collection of miscelaneous fields for transaction that is supposed to be embedded into concrete
// implementations of different transaction types
type TransactionMisc struct {
time time.Time // Time first seen locally (spam avoidance)
// caches
hash atomic.Value //nolint:structcheck
size atomic.Value //nolint:structcheck
from atomic.Value
}
func (tm TransactionMisc) Time() time.Time {
return tm.time
}
func (tm TransactionMisc) From() *atomic.Value {
return &tm.from
}
func DecodeTransaction(s *rlp.Stream) (Transaction, error) {
kind, size, err := s.Kind()
if err != nil {
return nil, err
}
if rlp.List == kind {
tx := &LegacyTx{}
if err = tx.DecodeRLP(s, size); err != nil {
return nil, err
}
return tx, nil
}
if rlp.String == kind {
s.NewList(size) // Hack - convert String (envelope) into List
}
var b []byte
if b, err = s.Bytes(); err != nil {
return nil, err
}
if len(b) != 1 {
return nil, fmt.Errorf("%w, got %d bytes", rlp.ErrWrongTxTypePrefix, len(b))
}
var tx Transaction
switch b[0] {
case AccessListTxType:
t := &AccessListTx{}
if err = t.DecodeRLP(s); err != nil {
return nil, err
}
tx = t
case DynamicFeeTxType:
t := &DynamicFeeTransaction{}
if err = t.DecodeRLP(s); err != nil {
return nil, err
}
tx = t
default:
return nil, fmt.Errorf("%w, got: %d", rlp.ErrUnknownTxTypePrefix, b[0])
}
if kind == rlp.String {
if err = s.ListEnd(); err != nil {
return nil, err
}
}
return tx, nil
}
func UnmarshalTransactionFromBinary(data []byte) (Transaction, error) {
s := rlp.NewStream(bytes.NewReader(data), uint64(len(data)))
return DecodeTransaction(s)
}
func MarshalTransactionsBinary(txs Transactions) ([][]byte, error) {
var err error
var buf bytes.Buffer
result := make([][]byte, len(txs))
for i := range txs {
if txs[i] == nil {
result[i] = nil
continue
}
buf.Reset()
err = txs[i].MarshalBinary(&buf)
if err != nil {
return nil, err
}
result[i] = common.CopyBytes(buf.Bytes())
}
return result, nil
}
func DecodeTransactions(txs [][]byte) ([]Transaction, error) {
result := make([]Transaction, len(txs))
var err error
for i := range txs {
s := rlp.NewStream(bytes.NewReader(txs[i]), uint64(len(txs[i])))
result[i], err = DecodeTransaction(s)
if err != nil {
return nil, err
}
}
return result, nil
}
func sanityCheckSignature(v *uint256.Int, r *uint256.Int, s *uint256.Int, maybeProtected bool) error {
if isProtectedV(v) && !maybeProtected {
return ErrUnexpectedProtection
}
var plainV byte
if isProtectedV(v) {
chainID := DeriveChainId(v).Uint64()
plainV = byte(v.Uint64() - 35 - 2*chainID)
} else if maybeProtected {
// Only EIP-155 signatures can be optionally protected. Since
// we determined this v value is not protected, it must be a
// raw 27 or 28.
plainV = byte(v.Uint64() - 27)
} else {
// If the signature is not optionally protected, we assume it
// must already be equal to the recovery id.
plainV = byte(v.Uint64())
}
if !crypto.ValidateSignatureValues(plainV, r, s, false) {
return ErrInvalidSig
}
return nil
}
func isProtectedV(V *uint256.Int) bool {
if V.BitLen() <= 8 {
v := V.Uint64()
return v != 27 && v != 28 && v != 1 && v != 0
}
// anything not 27 or 28 is considered protected
return true
}
// Transactions implements DerivableList for transactions.
type Transactions []Transaction
// Len returns the length of s.
func (s Transactions) Len() int { return len(s) }
// EncodeIndex encodes the i'th transaction to w. Note that this does not check for errors
// because we assume that *Transaction will only ever contain valid txs that were either
// constructed by decoding or via public API in this package.
func (s Transactions) EncodeIndex(i int, w *bytes.Buffer) {
if err := s[i].MarshalBinary(w); err != nil {
panic(err)
}
}
// TransactionsGroupedBySender - lists of transactions grouped by sender
type TransactionsGroupedBySender []Transactions
// TxDifference returns a new set which is the difference between a and b.
func TxDifference(a, b Transactions) Transactions {
keep := make(Transactions, 0, len(a))
remove := make(map[common.Hash]struct{})
for _, tx := range b {
remove[tx.Hash()] = struct{}{}
}
for _, tx := range a {
if _, ok := remove[tx.Hash()]; !ok {
keep = append(keep, tx)
}
}
return keep
}
// TxByNonce implements the sort interface to allow sorting a list of transactions
// by their nonces. This is usually only useful for sorting transactions from a
// single account, otherwise a nonce comparison doesn't make much sense.
type TxByNonce Transactions
func (s TxByNonce) Len() int { return len(s) }
func (s TxByNonce) Less(i, j int) bool { return s[i].GetNonce() < s[j].GetNonce() }
func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// TxByPriceAndTime implements both the sort and the heap interface, making it useful
// for all at once sorting as well as individually adding and removing elements.
type TxByPriceAndTime Transactions
func (s TxByPriceAndTime) Len() int { return len(s) }
func (s TxByPriceAndTime) Less(i, j int) bool {
// If the prices are equal, use the time the transaction was first seen for
// deterministic sorting
cmp := s[i].GetPrice().Cmp(s[j].GetPrice())
if cmp == 0 {
return s[i].Time().Before(s[j].Time())
}
return cmp > 0
}
func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *TxByPriceAndTime) Push(x interface{}) {
*s = append(*s, x.(Transaction))
}
func (s *TxByPriceAndTime) Pop() interface{} {
old := *s
n := len(old)
x := old[n-1]
*s = old[0 : n-1]
return x
}
type TransactionsStream interface {
Empty() bool
Peek() Transaction
Shift()
Pop()
}
// TransactionsByPriceAndNonce represents a set of transactions that can return
// transactions in a profit-maximizing sorted order, while supporting removing
// entire batches of transactions for non-executable accounts.
type TransactionsByPriceAndNonce struct {
idx map[common.Address]int // Per account nonce-sorted list of transactions
txs TransactionsGroupedBySender // Per account nonce-sorted list of transactions
heads TxByPriceAndTime // Next transaction for each unique account (price heap)
signer Signer // Signer for the set of transactions
}
// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve
// price sorted transactions in a nonce-honouring way.
//
// Note, the input map is reowned so the caller should not interact any more with
// if after providing it to the constructor.
func NewTransactionsByPriceAndNonce(signer Signer, txs TransactionsGroupedBySender) *TransactionsByPriceAndNonce {
// Initialize a price and received time based heap with the head transactions
heads := make(TxByPriceAndTime, 0, len(txs))
idx := make(map[common.Address]int, len(txs))
for i, accTxs := range txs {
from, _ := accTxs[0].Sender(signer)
// Ensure the sender address is from the signer
//if acc != from {
// delete(txs, from)
//txs[i] = txs[len(txs)-1]
//txs = txs[:len(txs)-1]
//continue
//}
heads = append(heads, accTxs[0])
idx[from] = i
txs[i] = accTxs[1:]
}
heap.Init(&heads)
// Assemble and return the transaction set
return &TransactionsByPriceAndNonce{
idx: idx,
txs: txs,
heads: heads,
signer: signer,
}
}
func (t *TransactionsByPriceAndNonce) Empty() bool {
return len(t.idx) == 0
}
// Peek returns the next transaction by price.
func (t *TransactionsByPriceAndNonce) Peek() Transaction {
if len(t.heads) == 0 {
return nil
}
return t.heads[0]
}
// Shift replaces the current best head with the next one from the same account.
func (t *TransactionsByPriceAndNonce) Shift() {
acc, _ := t.heads[0].Sender(t.signer)
idx, ok := t.idx[acc]
if !ok {
heap.Pop(&t.heads)
return
}
txs := t.txs[idx]
if len(txs) == 0 {
heap.Pop(&t.heads)
return
}
t.heads[0], t.txs[idx] = txs[0], txs[1:]
heap.Fix(&t.heads, 0)
}
// Pop removes the best transaction, *not* replacing it with the next one from
// the same account. This should be used when a transaction cannot be executed
// and hence all subsequent ones should be discarded from the same account.
func (t *TransactionsByPriceAndNonce) Pop() {
heap.Pop(&t.heads)
}
// TransactionsFixedOrder represents a set of transactions that can return
// transactions in a profit-maximizing sorted order, while supporting removing
// entire batches of transactions for non-executable accounts.
type TransactionsFixedOrder struct {
Transactions
}
// NewTransactionsFixedOrder creates a transaction set that can retrieve
// price sorted transactions in a nonce-honouring way.
//
// Note, the input map is reowned so the caller should not interact any more with
// if after providing it to the constructor.
func NewTransactionsFixedOrder(txs Transactions) *TransactionsFixedOrder {
return &TransactionsFixedOrder{txs}
}
func (t *TransactionsFixedOrder) Empty() bool {
return len(t.Transactions) == 0
}
// Peek returns the next transaction by price.
func (t *TransactionsFixedOrder) Peek() Transaction {
if len(t.Transactions) == 0 {
return nil
}
return t.Transactions[0]
}
// Shift replaces the current best head with the next one from the same account.
func (t *TransactionsFixedOrder) Shift() {
t.Transactions = t.Transactions[1:]
}
// Pop removes the best transaction, *not* replacing it with the next one from
// the same account. This should be used when a transaction cannot be executed
// and hence all subsequent ones should be discarded from the same account.
func (t *TransactionsFixedOrder) Pop() {
t.Transactions = t.Transactions[1:]
}
// Message is a fully derived transaction and implements core.Message
//
// NOTE: In a future PR this will be removed.
type Message struct {
to *common.Address
from common.Address
nonce uint64
amount uint256.Int
gasLimit uint64
gasPrice uint256.Int
feeCap uint256.Int
tip uint256.Int
data []byte
accessList AccessList
checkNonce bool
}
func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *uint256.Int, gasLimit uint64, gasPrice *uint256.Int, feeCap, tip *uint256.Int, data []byte, accessList AccessList, checkNonce bool) Message {
m := Message{
from: from,
to: to,
nonce: nonce,
amount: *amount,
gasLimit: gasLimit,
data: data,
accessList: accessList,
checkNonce: checkNonce,
}
if gasPrice != nil {
m.gasPrice.Set(gasPrice)
}
if tip != nil {
m.tip.Set(tip)
}
if feeCap != nil {
m.feeCap.Set(feeCap)
}
return m
}
func (m Message) From() common.Address { return m.from }
func (m Message) To() *common.Address { return m.to }
func (m Message) GasPrice() *uint256.Int { return &m.gasPrice }
func (m Message) FeeCap() *uint256.Int { return &m.feeCap }
func (m Message) Tip() *uint256.Int { return &m.tip }
func (m Message) Value() *uint256.Int { return &m.amount }
func (m Message) Gas() uint64 { return m.gasLimit }
func (m Message) Nonce() uint64 { return m.nonce }
func (m Message) Data() []byte { return m.data }
func (m Message) AccessList() AccessList { return m.accessList }
func (m Message) CheckNonce() bool { return m.checkNonce }
| 1 | 22,816 | Why do we need RawTransactions? | ledgerwatch-erigon | go |
@@ -37,6 +37,7 @@ import (
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
"github.com/iotexproject/iotex-core/protogen/iotextypes"
+ "github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
var ( | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package api
import (
"context"
"encoding/hex"
"math/big"
"net"
"strconv"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/golang/protobuf/proto"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/dispatcher"
"github.com/iotexproject/iotex-core/gasstation"
"github.com/iotexproject/iotex-core/indexservice"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
"github.com/iotexproject/iotex-core/protogen/iotextypes"
)
var (
// ErrInternalServer indicates the internal server error
ErrInternalServer = errors.New("internal server error")
// ErrReceipt indicates the error of receipt
ErrReceipt = errors.New("invalid receipt")
// ErrAction indicates the error of action
ErrAction = errors.New("invalid action")
)
// BroadcastOutbound sends a broadcast message to the whole network
type BroadcastOutbound func(ctx context.Context, chainID uint32, msg proto.Message) error
// Config represents the config to setup api
type Config struct {
broadcastHandler BroadcastOutbound
}
// Option is the option to override the api config
type Option func(cfg *Config) error
// WithBroadcastOutbound is the option to broadcast msg outbound
func WithBroadcastOutbound(broadcastHandler BroadcastOutbound) Option {
return func(cfg *Config) error {
cfg.broadcastHandler = broadcastHandler
return nil
}
}
// Server provides api for user to query blockchain data
type Server struct {
bc blockchain.Blockchain
dp dispatcher.Dispatcher
ap actpool.ActPool
gs *gasstation.GasStation
broadcastHandler BroadcastOutbound
cfg config.API
idx *indexservice.Server
registry *protocol.Registry
grpcserver *grpc.Server
}
// NewServer creates a new server
func NewServer(
cfg config.API,
chain blockchain.Blockchain,
dispatcher dispatcher.Dispatcher,
actPool actpool.ActPool,
idx *indexservice.Server,
registry *protocol.Registry,
opts ...Option,
) (*Server, error) {
apiCfg := Config{}
for _, opt := range opts {
if err := opt(&apiCfg); err != nil {
return nil, err
}
}
if cfg == (config.API{}) {
log.L().Warn("API server is not configured.")
cfg = config.Default.API
}
svr := &Server{
bc: chain,
dp: dispatcher,
ap: actPool,
broadcastHandler: apiCfg.broadcastHandler,
cfg: cfg,
idx: idx,
registry: registry,
gs: gasstation.NewGasStation(chain, cfg),
}
svr.grpcserver = grpc.NewServer(
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
)
iotexapi.RegisterAPIServiceServer(svr.grpcserver, svr)
grpc_prometheus.Register(svr.grpcserver)
reflection.Register(svr.grpcserver)
return svr, nil
}
// GetAccount returns the metadata of an account
func (api *Server) GetAccount(ctx context.Context, in *iotexapi.GetAccountRequest) (*iotexapi.GetAccountResponse, error) {
state, err := api.bc.StateByAddr(in.Address)
if err != nil {
return nil, err
}
pendingNonce, err := api.ap.GetPendingNonce(in.Address)
if err != nil {
return nil, err
}
accountMeta := &iotextypes.AccountMeta{
Address: in.Address,
Balance: state.Balance.String(),
Nonce: state.Nonce,
PendingNonce: pendingNonce,
}
return &iotexapi.GetAccountResponse{AccountMeta: accountMeta}, nil
}
// GetActions returns actions
func (api *Server) GetActions(ctx context.Context, in *iotexapi.GetActionsRequest) (*iotexapi.GetActionsResponse, error) {
switch {
case in.GetByIndex() != nil:
request := in.GetByIndex()
return api.getActions(request.Start, request.Count)
case in.GetByHash() != nil:
request := in.GetByHash()
return api.getAction(request.ActionHash, request.CheckPending)
case in.GetByAddr() != nil:
request := in.GetByAddr()
return api.getActionsByAddress(request.Address, request.Start, request.Count)
case in.GetUnconfirmedByAddr() != nil:
request := in.GetUnconfirmedByAddr()
return api.getUnconfirmedActionsByAddress(request.Address, request.Start, request.Count)
case in.GetByBlk() != nil:
request := in.GetByBlk()
return api.getActionsByBlock(request.BlkHash, request.Start, request.Count)
default:
return nil, nil
}
}
// GetBlockMetas returns block metadata
func (api *Server) GetBlockMetas(ctx context.Context, in *iotexapi.GetBlockMetasRequest) (*iotexapi.GetBlockMetasResponse, error) {
switch {
case in.GetByIndex() != nil:
request := in.GetByIndex()
return api.getBlockMetas(request.Start, request.Count)
case in.GetByHash() != nil:
request := in.GetByHash()
return api.getBlockMeta(request.BlkHash)
default:
return nil, nil
}
}
// GetChainMeta returns blockchain metadata
func (api *Server) GetChainMeta(ctx context.Context, in *iotexapi.GetChainMetaRequest) (*iotexapi.GetChainMetaResponse, error) {
tipHeight := api.bc.TipHeight()
totalActions, err := api.bc.GetTotalActions()
if err != nil {
return nil, err
}
blockLimit := int64(api.cfg.TpsWindow)
if blockLimit <= 0 {
return nil, errors.Wrapf(ErrInternalServer, "block limit is %d", blockLimit)
}
// avoid genesis block
if int64(tipHeight) < blockLimit {
blockLimit = int64(tipHeight)
}
r, err := api.getBlockMetas(tipHeight, uint64(blockLimit))
if err != nil {
return nil, err
}
blks := r.BlkMetas
if len(blks) == 0 {
return nil, errors.New("get 0 blocks! not able to calculate aps")
}
p, ok := api.registry.Find(rolldpos.ProtocolID)
if !ok {
return nil, errors.New("rolldpos protocol is not registered")
}
rp, ok := p.(*rolldpos.Protocol)
if !ok {
return nil, errors.New("fail to cast rolldpos protocol")
}
epochNum := rp.GetEpochNum(tipHeight)
epochHeight := rp.GetEpochHeight(epochNum)
timeDuration := blks[0].Timestamp - blks[len(blks)-1].Timestamp
// if time duration is less than 1 second, we set it to be 1 second
if timeDuration == 0 {
timeDuration = 1
}
tps := int64(totalActions) / timeDuration
chainMeta := &iotextypes.ChainMeta{
Height: tipHeight,
Epoch: &iotextypes.EpochData{
Num: epochNum,
Height: epochHeight,
},
Supply: blockchain.Gen.TotalSupply.String(),
NumActions: int64(totalActions),
Tps: tps,
}
return &iotexapi.GetChainMetaResponse{ChainMeta: chainMeta}, nil
}
// SendAction is the API to send an action to blockchain.
func (api *Server) SendAction(ctx context.Context, in *iotexapi.SendActionRequest) (res *iotexapi.SendActionResponse, err error) {
log.L().Debug("receive send action request")
// broadcast to the network
if err = api.broadcastHandler(context.Background(), api.bc.ChainID(), in.Action); err != nil {
log.L().Warn("Failed to broadcast SendAction request.", zap.Error(err))
}
// send to actpool via dispatcher
api.dp.HandleBroadcast(context.Background(), api.bc.ChainID(), in.Action)
return &iotexapi.SendActionResponse{}, nil
}
// GetReceiptByAction gets receipt with corresponding action hash
func (api *Server) GetReceiptByAction(ctx context.Context, in *iotexapi.GetReceiptByActionRequest) (*iotexapi.GetReceiptByActionResponse, error) {
actHash, err := toHash256(in.ActionHash)
if err != nil {
return nil, err
}
receipt, err := api.bc.GetReceiptByActionHash(actHash)
if err != nil {
return nil, err
}
return &iotexapi.GetReceiptByActionResponse{Receipt: receipt.ConvertToReceiptPb()}, nil
}
// ReadContract reads the state in a contract address specified by the slot
func (api *Server) ReadContract(ctx context.Context, in *iotexapi.ReadContractRequest) (*iotexapi.ReadContractResponse, error) {
log.L().Debug("receive read smart contract request")
selp := &action.SealedEnvelope{}
if err := selp.LoadProto(in.Action); err != nil {
return nil, err
}
sc, ok := selp.Action().(*action.Execution)
if !ok {
return nil, errors.New("not execution")
}
callerPKHash := keypair.HashPubKey(selp.SrcPubkey())
callerAddr, err := address.FromBytes(callerPKHash[:])
if err != nil {
return nil, err
}
res, err := api.bc.ExecuteContractRead(callerAddr, sc)
if err != nil {
return nil, err
}
return &iotexapi.ReadContractResponse{Data: hex.EncodeToString(res.ReturnValue)}, nil
}
// SuggestGasPrice suggests gas price
func (api *Server) SuggestGasPrice(ctx context.Context, in *iotexapi.SuggestGasPriceRequest) (*iotexapi.SuggestGasPriceResponse, error) {
suggestPrice, err := api.gs.SuggestGasPrice()
if err != nil {
return nil, err
}
return &iotexapi.SuggestGasPriceResponse{GasPrice: suggestPrice}, nil
}
// EstimateGasForAction estimates gas for action
func (api *Server) EstimateGasForAction(ctx context.Context, in *iotexapi.EstimateGasForActionRequest) (*iotexapi.EstimateGasForActionResponse, error) {
estimateGas, err := api.gs.EstimateGasForAction(in.Action)
if err != nil {
return nil, err
}
return &iotexapi.EstimateGasForActionResponse{Gas: estimateGas}, nil
}
// Start starts the API server
func (api *Server) Start() error {
portStr := ":" + strconv.Itoa(api.cfg.Port)
lis, err := net.Listen("tcp", portStr)
if err != nil {
log.L().Error("API server failed to listen.", zap.Error(err))
return errors.Wrap(err, "API server failed to listen")
}
log.L().Info("API server is listening.", zap.String("addr", lis.Addr().String()))
go func() {
if err := api.grpcserver.Serve(lis); err != nil {
log.L().Fatal("Node failed to serve.", zap.Error(err))
}
}()
return nil
}
// Stop stops the API server
func (api *Server) Stop() error {
api.grpcserver.Stop()
log.L().Info("API server stops.")
return nil
}
// GetActions returns actions within the range
func (api *Server) getActions(start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
var res []*iotextypes.Action
var actionCount uint64
tipHeight := api.bc.TipHeight()
for height := int64(tipHeight); height >= 0; height-- {
blk, err := api.bc.GetBlockByHeight(uint64(height))
if err != nil {
return nil, err
}
selps := blk.Actions
for i := len(selps) - 1; i >= 0; i-- {
actionCount++
if actionCount <= start {
continue
}
if uint64(len(res)) >= count {
return &iotexapi.GetActionsResponse{Actions: res}, nil
}
res = append(res, selps[i].Proto())
}
}
return &iotexapi.GetActionsResponse{Actions: res}, nil
}
// getAction returns action by action hash
func (api *Server) getAction(actionHash string, checkPending bool) (*iotexapi.GetActionsResponse, error) {
actHash, err := toHash256(actionHash)
if err != nil {
return nil, err
}
actPb, err := getAction(api.bc, api.ap, actHash, checkPending)
if err != nil {
return nil, err
}
return &iotexapi.GetActionsResponse{Actions: []*iotextypes.Action{actPb}}, nil
}
// getActionsByAddress returns all actions associated with an address
func (api *Server) getActionsByAddress(address string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
var res []*iotextypes.Action
var actions []hash.Hash256
if api.cfg.UseRDS {
actionHistory, err := api.idx.Indexer().GetIndexHistory(config.IndexAction, address)
if err != nil {
return nil, err
}
actions = append(actions, actionHistory...)
} else {
actionsFromAddress, err := api.bc.GetActionsFromAddress(address)
if err != nil {
return nil, err
}
actionsToAddress, err := api.bc.GetActionsToAddress(address)
if err != nil {
return nil, err
}
actionsFromAddress = append(actionsFromAddress, actionsToAddress...)
actions = append(actions, actionsFromAddress...)
}
var actionCount uint64
for i := len(actions) - 1; i >= 0; i-- {
actionCount++
if actionCount <= start {
continue
}
if uint64(len(res)) >= count {
break
}
actPb, err := getAction(api.bc, api.ap, actions[i], false)
if err != nil {
return nil, err
}
res = append(res, actPb)
}
return &iotexapi.GetActionsResponse{Actions: res}, nil
}
// getUnconfirmedActionsByAddress returns all unconfirmed actions in actpool associated with an address
func (api *Server) getUnconfirmedActionsByAddress(address string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
var res []*iotextypes.Action
var actionCount uint64
selps := api.ap.GetUnconfirmedActs(address)
for i := len(selps) - 1; i >= 0; i-- {
actionCount++
if actionCount <= start {
continue
}
if uint64(len(res)) >= count {
break
}
res = append(res, selps[i].Proto())
}
return &iotexapi.GetActionsResponse{Actions: res}, nil
}
// getActionsByBlock returns all actions in a block
func (api *Server) getActionsByBlock(blkHash string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
var res []*iotextypes.Action
hash, err := toHash256(blkHash)
if err != nil {
return nil, err
}
blk, err := api.bc.GetBlockByHash(hash)
if err != nil {
return nil, err
}
selps := blk.Actions
var actionCount uint64
for i := len(selps) - 1; i >= 0; i-- {
actionCount++
if actionCount <= start {
continue
}
if uint64(len(res)) >= count {
break
}
res = append(res, selps[i].Proto())
}
return &iotexapi.GetActionsResponse{Actions: res}, nil
}
// getBlockMetas gets block within the height range
func (api *Server) getBlockMetas(start uint64, number uint64) (*iotexapi.GetBlockMetasResponse, error) {
var res []*iotextypes.BlockMeta
startHeight := api.bc.TipHeight()
var blkCount uint64
for height := int(startHeight); height >= 0; height-- {
blkCount++
if blkCount <= start {
continue
}
if uint64(len(res)) >= number {
break
}
blk, err := api.bc.GetBlockByHeight(uint64(height))
if err != nil {
return nil, err
}
blockHeaderPb := blk.ConvertToBlockHeaderPb()
hash := blk.HashBlock()
txRoot := blk.TxRoot()
receiptRoot := blk.ReceiptRoot()
deltaStateDigest := blk.DeltaStateDigest()
transferAmount := getTranferAmountInBlock(blk)
blockMeta := &iotextypes.BlockMeta{
Hash: hex.EncodeToString(hash[:]),
Height: blk.Height(),
Timestamp: blockHeaderPb.GetCore().GetTimestamp().GetSeconds(),
NumActions: int64(len(blk.Actions)),
ProducerAddress: blk.ProducerAddress(),
TransferAmount: transferAmount.String(),
TxRoot: hex.EncodeToString(txRoot[:]),
ReceiptRoot: hex.EncodeToString(receiptRoot[:]),
DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]),
}
res = append(res, blockMeta)
}
return &iotexapi.GetBlockMetasResponse{BlkMetas: res}, nil
}
// getBlockMeta returns block by block hash
func (api *Server) getBlockMeta(blkHash string) (*iotexapi.GetBlockMetasResponse, error) {
hash, err := toHash256(blkHash)
if err != nil {
return nil, err
}
blk, err := api.bc.GetBlockByHash(hash)
if err != nil {
return nil, err
}
blkHeaderPb := blk.ConvertToBlockHeaderPb()
txRoot := blk.TxRoot()
receiptRoot := blk.ReceiptRoot()
deltaStateDigest := blk.DeltaStateDigest()
transferAmount := getTranferAmountInBlock(blk)
blockMeta := &iotextypes.BlockMeta{
Hash: blkHash,
Height: blk.Height(),
Timestamp: blkHeaderPb.GetCore().GetTimestamp().GetSeconds(),
NumActions: int64(len(blk.Actions)),
ProducerAddress: blk.ProducerAddress(),
TransferAmount: transferAmount.String(),
TxRoot: hex.EncodeToString(txRoot[:]),
ReceiptRoot: hex.EncodeToString(receiptRoot[:]),
DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]),
}
return &iotexapi.GetBlockMetasResponse{BlkMetas: []*iotextypes.BlockMeta{blockMeta}}, nil
}
// ReadState reads state on blockchain
func (api *Server) ReadState(ctx context.Context, in *iotexapi.ReadStateRequest) (*iotexapi.ReadStateResponse, error) {
p, ok := api.registry.Find(string(in.ProtocolID))
if !ok {
return nil, errors.Errorf("protocol %s isn't registered", string(in.ProtocolID))
}
// TODO: need to complete the context
ctx = protocol.WithRunActionsCtx(ctx, protocol.RunActionsCtx{
BlockHeight: api.bc.TipHeight(),
Registry: api.registry,
})
ws, err := api.bc.GetFactory().NewWorkingSet()
if err != nil {
return nil, err
}
data, err := p.ReadState(ctx, ws, in.MethodName, in.Arguments...)
// TODO: need to distinguish user error and system error
if err != nil {
return nil, err
}
out := iotexapi.ReadStateResponse{
Data: data,
}
return &out, nil
}
func toHash256(hashString string) (hash.Hash256, error) {
bytes, err := hex.DecodeString(hashString)
if err != nil {
return hash.ZeroHash256, err
}
var hash hash.Hash256
copy(hash[:], bytes)
return hash, nil
}
func getAction(bc blockchain.Blockchain, ap actpool.ActPool, actHash hash.Hash256, checkPending bool) (*iotextypes.Action, error) {
var selp action.SealedEnvelope
var err error
if selp, err = bc.GetActionByActionHash(actHash); err != nil {
if checkPending {
// Try to fetch pending action from actpool
selp, err = ap.GetActionByHash(actHash)
}
}
if err != nil {
return nil, err
}
return selp.Proto(), nil
}
func getTranferAmountInBlock(blk *block.Block) *big.Int {
totalAmount := big.NewInt(0)
for _, selp := range blk.Actions {
transfer, ok := selp.Action().(*action.Transfer)
if !ok {
continue
}
totalAmount.Add(totalAmount, transfer.Amount())
}
return totalAmount
}
| 1 | 15,771 | File is not `gofmt`-ed with `-s` (from `gofmt`) | iotexproject-iotex-core | go |
@@ -632,7 +632,14 @@ public class NSClientService extends Service {
if (sgv.getMills() > latestDateInReceivedData)
latestDateInReceivedData = sgv.getMills();
}
- BroadcastSgvs.handleNewSgv(sgvs, MainApp.instance().getApplicationContext(), isDelta);
+ // Was that sgv more than 15 mins ago ?
+ boolean moreThan15MinAgo = false;
+ if((System.currentTimeMillis()-latestDateInReceivedData)/(60 * 1000L) < 15L )
+ moreThan15MinAgo = true;
+ if(Notification.isAlarmForStaleData() && moreThan15MinAgo){
+ MainApp.bus().post(new EventDismissNotification(Notification.NSALARM));
+ }
+ BroadcastSgvs.handleNewSgv(sgvs, MainApp.instance().getApplicationContext(), isDelta);
}
MainApp.bus().post(new EventNSClientNewLog("LAST", DateUtil.dateAndTimeString(latestDateInReceivedData)));
} catch (JSONException e) { | 1 | package info.nightscout.androidaps.plugins.NSClientInternal.services;
import android.app.Service;
import android.content.Context;
import android.content.Intent;
import android.os.Binder;
import android.os.Handler;
import android.os.HandlerThread;
import android.os.IBinder;
import android.os.PowerManager;
import com.crashlytics.android.Crashlytics;
import com.google.common.base.Charsets;
import com.google.common.hash.Hashing;
import com.j256.ormlite.dao.CloseableIterator;
import com.squareup.otto.Subscribe;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URISyntaxException;
import java.sql.SQLException;
import java.util.Date;
import info.nightscout.androidaps.Config;
import info.nightscout.androidaps.MainApp;
import info.nightscout.androidaps.R;
import info.nightscout.androidaps.data.ProfileStore;
import info.nightscout.androidaps.db.DbRequest;
import info.nightscout.androidaps.events.EventAppExit;
import info.nightscout.androidaps.events.EventConfigBuilderChange;
import info.nightscout.androidaps.events.EventPreferenceChange;
import info.nightscout.androidaps.interfaces.PluginBase;
import info.nightscout.androidaps.plugins.NSClientInternal.NSClientInternalPlugin;
import info.nightscout.androidaps.plugins.NSClientInternal.UploadQueue;
import info.nightscout.androidaps.plugins.NSClientInternal.acks.NSAddAck;
import info.nightscout.androidaps.plugins.NSClientInternal.acks.NSAuthAck;
import info.nightscout.androidaps.plugins.NSClientInternal.acks.NSUpdateAck;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastAlarm;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastAnnouncement;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastCals;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastClearAlarm;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastDeviceStatus;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastFood;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastMbgs;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastProfile;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastSgvs;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastStatus;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastTreatment;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastUrgentAlarm;
import info.nightscout.androidaps.plugins.NSClientInternal.data.AlarmAck;
import info.nightscout.androidaps.plugins.NSClientInternal.data.NSSgv;
import info.nightscout.androidaps.plugins.NSClientInternal.data.NSSettingsStatus;
import info.nightscout.androidaps.plugins.NSClientInternal.data.NSTreatment;
import info.nightscout.androidaps.plugins.NSClientInternal.events.EventNSClientNewLog;
import info.nightscout.androidaps.plugins.NSClientInternal.events.EventNSClientRestart;
import info.nightscout.androidaps.plugins.NSClientInternal.events.EventNSClientStatus;
import info.nightscout.androidaps.plugins.Overview.Notification;
import info.nightscout.androidaps.plugins.Overview.events.EventDismissNotification;
import info.nightscout.androidaps.plugins.Overview.events.EventNewNotification;
import info.nightscout.utils.DateUtil;
import info.nightscout.utils.SP;
import io.socket.client.IO;
import io.socket.client.Socket;
import io.socket.emitter.Emitter;
public class NSClientService extends Service {
private static Logger log = LoggerFactory.getLogger(NSClientService.class);
static public PowerManager.WakeLock mWakeLock;
private IBinder mBinder = new NSClientService.LocalBinder();
static ProfileStore profileStore;
static public Handler handler;
public static Socket mSocket;
public static boolean isConnected = false;
public static boolean hasWriteAuth = false;
private static Integer dataCounter = 0;
private static Integer connectCounter = 0;
public static String nightscoutVersionName = "";
public static Integer nightscoutVersionCode = 0;
private boolean nsEnabled = false;
static public String nsURL = "";
private String nsAPISecret = "";
private String nsDevice = "";
private Integer nsHours = 48;
public long lastResendTime = 0;
public long latestDateInReceivedData = 0;
private String nsAPIhashCode = "";
public static UploadQueue uploadQueue = new UploadQueue();
public NSClientService() {
registerBus();
if (handler == null) {
HandlerThread handlerThread = new HandlerThread(NSClientService.class.getSimpleName() + "Handler");
handlerThread.start();
handler = new Handler(handlerThread.getLooper());
}
PowerManager powerManager = (PowerManager) MainApp.instance().getApplicationContext().getSystemService(Context.POWER_SERVICE);
mWakeLock = powerManager.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, "NSClientService");
initialize();
}
public class LocalBinder extends Binder {
public NSClientService getServiceInstance() {
return NSClientService.this;
}
}
@Override
public IBinder onBind(Intent intent) {
return mBinder;
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
return START_STICKY;
}
private void registerBus() {
try {
MainApp.bus().unregister(this);
} catch (RuntimeException x) {
// Ignore
}
MainApp.bus().register(this);
}
@Subscribe
public void onStatusEvent(EventAppExit event) {
if (Config.logFunctionCalls)
log.debug("EventAppExit received");
destroy();
stopSelf();
if (Config.logFunctionCalls)
log.debug("EventAppExit finished");
}
@Subscribe
public void onStatusEvent(EventPreferenceChange ev) {
if (ev.isChanged(R.string.key_nsclientinternal_url) ||
ev.isChanged(R.string.key_nsclientinternal_api_secret) ||
ev.isChanged(R.string.key_nsclientinternal_paused)
) {
latestDateInReceivedData = 0;
destroy();
initialize();
}
}
@Subscribe
public void onStatusEvent(EventConfigBuilderChange ev) {
if (nsEnabled != MainApp.getSpecificPlugin(NSClientInternalPlugin.class).isEnabled(PluginBase.GENERAL)) {
latestDateInReceivedData = 0;
destroy();
initialize();
}
}
@Subscribe
public void onStatusEvent(final EventNSClientRestart ev) {
latestDateInReceivedData = 0;
restart();
}
public void initialize() {
dataCounter = 0;
NSClientService.mWakeLock.acquire();
readPreferences();
if (!nsAPISecret.equals(""))
nsAPIhashCode = Hashing.sha1().hashString(nsAPISecret, Charsets.UTF_8).toString();
MainApp.bus().post(new EventNSClientStatus("Initializing"));
if (MainApp.getSpecificPlugin(NSClientInternalPlugin.class).paused) {
MainApp.bus().post(new EventNSClientNewLog("NSCLIENT", "paused"));
MainApp.bus().post(new EventNSClientStatus("Paused"));
} else if (!nsEnabled) {
MainApp.bus().post(new EventNSClientNewLog("NSCLIENT", "disabled"));
MainApp.bus().post(new EventNSClientStatus("Disabled"));
} else if (!nsURL.equals("")) {
try {
MainApp.bus().post(new EventNSClientStatus("Connecting ..."));
IO.Options opt = new IO.Options();
opt.forceNew = true;
opt.reconnection = true;
mSocket = IO.socket(nsURL, opt);
mSocket.on(Socket.EVENT_CONNECT, onConnect);
mSocket.on(Socket.EVENT_DISCONNECT, onDisconnect);
mSocket.on(Socket.EVENT_PING, onPing);
MainApp.bus().post(new EventNSClientNewLog("NSCLIENT", "do connect"));
mSocket.connect();
mSocket.on("dataUpdate", onDataUpdate);
mSocket.on("announcement", onAnnouncement);
mSocket.on("alarm", onAlarm);
mSocket.on("urgent_alarm", onUrgentAlarm);
mSocket.on("clear_alarm", onClearAlarm);
} catch (URISyntaxException | RuntimeException e) {
MainApp.bus().post(new EventNSClientNewLog("NSCLIENT", "Wrong URL syntax"));
MainApp.bus().post(new EventNSClientStatus("Wrong URL syntax"));
}
} else {
MainApp.bus().post(new EventNSClientNewLog("NSCLIENT", "No NS URL specified"));
MainApp.bus().post(new EventNSClientStatus("Not configured"));
}
NSClientService.mWakeLock.release();
}
private Emitter.Listener onConnect = new Emitter.Listener() {
@Override
public void call(Object... args) {
connectCounter++;
MainApp.bus().post(new EventNSClientNewLog("NSCLIENT", "connect #" + connectCounter + " event. ID: " + mSocket.id()));
sendAuthMessage(new NSAuthAck());
}
};
private Emitter.Listener onDisconnect = new Emitter.Listener() {
@Override
public void call(Object... args) {
MainApp.bus().post(new EventNSClientNewLog("NSCLIENT", "disconnect event"));
}
};
public void destroy() {
if (mSocket != null) {
MainApp.bus().post(new EventNSClientNewLog("NSCLIENT", "destroy"));
isConnected = false;
hasWriteAuth = false;
mSocket.disconnect();
mSocket = null;
}
}
public void sendAuthMessage(NSAuthAck ack) {
JSONObject authMessage = new JSONObject();
try {
authMessage.put("client", "Android_" + nsDevice);
authMessage.put("history", nsHours);
authMessage.put("status", true); // receive status
authMessage.put("from", latestDateInReceivedData); // send data newer than
authMessage.put("secret", nsAPIhashCode);
} catch (JSONException e) {
log.error("Unhandled exception", e);
return;
}
MainApp.bus().post(new EventNSClientNewLog("AUTH", "requesting auth"));
mSocket.emit("authorize", authMessage, ack);
}
@Subscribe
public void onStatusEvent(NSAuthAck ack) {
String connectionStatus = "Authenticated (";
if (ack.read) connectionStatus += "R";
if (ack.write) connectionStatus += "W";
if (ack.write_treatment) connectionStatus += "T";
connectionStatus += ')';
isConnected = true;
hasWriteAuth = ack.write && ack.write_treatment;
MainApp.bus().post(new EventNSClientStatus(connectionStatus));
MainApp.bus().post(new EventNSClientNewLog("AUTH", connectionStatus));
if (!ack.write) {
MainApp.bus().post(new EventNSClientNewLog("ERROR", "Write permission not granted !!!!"));
}
if (!ack.write_treatment) {
MainApp.bus().post(new EventNSClientNewLog("ERROR", "Write treatment permission not granted !!!!"));
}
if (!hasWriteAuth) {
Notification noperm = new Notification(Notification.NSCLIENT_NO_WRITE_PERMISSION, MainApp.sResources.getString(R.string.nowritepermission), Notification.URGENT);
MainApp.bus().post(new EventNewNotification(noperm));
} else {
MainApp.bus().post(new EventDismissNotification(Notification.NSCLIENT_NO_WRITE_PERMISSION));
}
}
public void readPreferences() {
nsEnabled = MainApp.getSpecificPlugin(NSClientInternalPlugin.class).isEnabled(PluginBase.GENERAL);
nsURL = SP.getString(R.string.key_nsclientinternal_url, "");
nsAPISecret = SP.getString(R.string.key_nsclientinternal_api_secret, "");
nsDevice = SP.getString("careportal_enteredby", "");
}
private Emitter.Listener onPing = new Emitter.Listener() {
@Override
public void call(final Object... args) {
if (Config.detailedLog)
MainApp.bus().post(new EventNSClientNewLog("PING", "received"));
// send data if there is something waiting
resend("Ping received");
}
};
private Emitter.Listener onAnnouncement = new Emitter.Listener() {
/*
{
"level":0,
"title":"Announcement",
"message":"test",
"plugin":{"name":"treatmentnotify","label":"Treatment Notifications","pluginType":"notification","enabled":true},
"group":"Announcement",
"isAnnouncement":true,
"key":"9ac46ad9a1dcda79dd87dae418fce0e7955c68da"
}
*/
@Override
public void call(final Object... args) {
JSONObject data;
try {
data = (JSONObject) args[0];
} catch (Exception e) {
Crashlytics.log("Wrong Announcement from NS: " + args[0]);
return;
}
if (Config.detailedLog)
try {
MainApp.bus().post(new EventNSClientNewLog("ANNOUNCEMENT", data.has("message") ? data.getString("message") : "received"));
} catch (Exception e) {
Crashlytics.logException(e);
}
BroadcastAnnouncement.handleAnnouncement(data, getApplicationContext());
log.debug(data.toString());
}
};
private Emitter.Listener onAlarm = new Emitter.Listener() {
/*
{
"level":1,
"title":"Warning HIGH",
"message":"BG Now: 5 -0.2 → mmol\/L\nRaw BG: 4.8 mmol\/L Čistý\nBG 15m: 4.8 mmol\/L\nIOB: -0.02U\nCOB: 0g",
"eventName":"high",
"plugin":{"name":"simplealarms","label":"Simple Alarms","pluginType":"notification","enabled":true},
"pushoverSound":"climb",
"debug":{"lastSGV":5,"thresholds":{"bgHigh":180,"bgTargetTop":75,"bgTargetBottom":72,"bgLow":70}},
"group":"default",
"key":"simplealarms_1"
}
*/
@Override
public void call(final Object... args) {
if (Config.detailedLog)
MainApp.bus().post(new EventNSClientNewLog("ALARM", "received"));
JSONObject data;
try {
data = (JSONObject) args[0];
} catch (Exception e) {
Crashlytics.log("Wrong alarm from NS: " + args[0]);
return;
}
BroadcastAlarm.handleAlarm(data, getApplicationContext());
log.debug(data.toString());
}
};
private Emitter.Listener onUrgentAlarm = new Emitter.Listener() {
/*
{
"level":2,
"title":"Urgent HIGH",
"message":"BG Now: 5.2 -0.1 → mmol\/L\nRaw BG: 5 mmol\/L Čistý\nBG 15m: 5 mmol\/L\nIOB: 0.00U\nCOB: 0g",
"eventName":"high",
"plugin":{"name":"simplealarms","label":"Simple Alarms","pluginType":"notification","enabled":true},
"pushoverSound":"persistent",
"debug":{"lastSGV":5.2,"thresholds":{"bgHigh":80,"bgTargetTop":75,"bgTargetBottom":72,"bgLow":70}},
"group":"default",
"key":"simplealarms_2"
}
*/
@Override
public void call(final Object... args) {
JSONObject data;
try {
data = (JSONObject) args[0];
} catch (Exception e) {
Crashlytics.log("Wrong Urgent alarm from NS: " + args[0]);
return;
}
if (Config.detailedLog)
MainApp.bus().post(new EventNSClientNewLog("URGENTALARM", "received"));
BroadcastUrgentAlarm.handleUrgentAlarm(data, getApplicationContext());
log.debug(data.toString());
}
};
private Emitter.Listener onClearAlarm = new Emitter.Listener() {
/*
{
"clear":true,
"title":"All Clear",
"message":"default - Urgent was ack'd",
"group":"default"
}
*/
@Override
public void call(final Object... args) {
JSONObject data;
try {
data = (JSONObject) args[0];
} catch (Exception e) {
Crashlytics.log("Wrong Urgent alarm from NS: " + args[0]);
return;
}
if (Config.detailedLog)
MainApp.bus().post(new EventNSClientNewLog("CLEARALARM", "received"));
BroadcastClearAlarm.handleClearAlarm(data, getApplicationContext());
log.debug(data.toString());
}
};
private Emitter.Listener onDataUpdate = new Emitter.Listener() {
@Override
public void call(final Object... args) {
NSClientService.handler.post(new Runnable() {
@Override
public void run() {
PowerManager powerManager = (PowerManager) MainApp.instance().getApplicationContext().getSystemService(Context.POWER_SERVICE);
PowerManager.WakeLock wakeLock = powerManager.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK,
"onDataUpdate");
wakeLock.acquire();
try {
JSONObject data = (JSONObject) args[0];
boolean broadcastProfile = false;
try {
// delta means only increment/changes are comming
boolean isDelta = data.has("delta");
boolean isFull = !isDelta;
MainApp.bus().post(new EventNSClientNewLog("DATA", "Data packet #" + dataCounter++ + (isDelta ? " delta" : " full")));
if (data.has("profiles")) {
JSONArray profiles = data.getJSONArray("profiles");
if (profiles.length() > 0) {
JSONObject profile = (JSONObject) profiles.get(profiles.length() - 1);
profileStore = new ProfileStore(profile);
broadcastProfile = true;
MainApp.bus().post(new EventNSClientNewLog("PROFILE", "profile received"));
}
}
if (data.has("status")) {
JSONObject status = data.getJSONObject("status");
NSSettingsStatus nsSettingsStatus = NSSettingsStatus.getInstance().setData(status);
if (!status.has("versionNum")) {
if (status.getInt("versionNum") < Config.SUPPORTEDNSVERSION) {
MainApp.bus().post(new EventNSClientNewLog("ERROR", "Unsupported Nightscout version !!!!"));
}
} else {
nightscoutVersionName = nsSettingsStatus.getVersion();
nightscoutVersionCode = nsSettingsStatus.getVersionNum();
}
BroadcastStatus.handleNewStatus(nsSettingsStatus, MainApp.instance().getApplicationContext(), isDelta);
/* Other received data to 2016/02/10
{
status: 'ok'
, name: env.name
, version: env.version
, versionNum: versionNum (for ver 1.2.3 contains 10203)
, serverTime: new Date().toISOString()
, apiEnabled: apiEnabled
, careportalEnabled: apiEnabled && env.settings.enable.indexOf('careportal') > -1
, boluscalcEnabled: apiEnabled && env.settings.enable.indexOf('boluscalc') > -1
, head: env.head
, settings: env.settings
, extendedSettings: ctx.plugins && ctx.plugins.extendedClientSettings ? ctx.plugins.extendedClientSettings(env.extendedSettings) : {}
, activeProfile ..... calculated from treatments or missing
}
*/
} else if (!isDelta) {
MainApp.bus().post(new EventNSClientNewLog("ERROR", "Unsupported Nightscout version !!!!"));
}
// If new profile received or change detected broadcast it
if (broadcastProfile && profileStore != null) {
BroadcastProfile.handleNewTreatment(profileStore, MainApp.instance().getApplicationContext(), isDelta);
MainApp.bus().post(new EventNSClientNewLog("PROFILE", "broadcasting"));
}
if (data.has("treatments")) {
JSONArray treatments = data.getJSONArray("treatments");
JSONArray removedTreatments = new JSONArray();
JSONArray updatedTreatments = new JSONArray();
JSONArray addedTreatments = new JSONArray();
if (treatments.length() > 0)
MainApp.bus().post(new EventNSClientNewLog("DATA", "received " + treatments.length() + " treatments"));
for (Integer index = 0; index < treatments.length(); index++) {
JSONObject jsonTreatment = treatments.getJSONObject(index);
NSTreatment treatment = new NSTreatment(jsonTreatment);
// remove from upload queue if Ack is failing
UploadQueue.removeID(jsonTreatment);
//Find latest date in treatment
if (treatment.getMills() != null && treatment.getMills() < System.currentTimeMillis())
if (treatment.getMills() > latestDateInReceivedData)
latestDateInReceivedData = treatment.getMills();
if (treatment.getAction() == null) {
addedTreatments.put(jsonTreatment);
} else if (treatment.getAction().equals("update")) {
updatedTreatments.put(jsonTreatment);
} else if (treatment.getAction().equals("remove")) {
if (treatment.getMills() != null && treatment.getMills() > System.currentTimeMillis() - 24 * 60 * 60 * 1000L) // handle 1 day old deletions only
removedTreatments.put(jsonTreatment);
}
}
if (removedTreatments.length() > 0) {
BroadcastTreatment.handleRemovedTreatment(removedTreatments, MainApp.instance().getApplicationContext(), isDelta);
}
if (updatedTreatments.length() > 0) {
BroadcastTreatment.handleChangedTreatment(updatedTreatments, MainApp.instance().getApplicationContext(), isDelta);
}
if (addedTreatments.length() > 0) {
BroadcastTreatment.handleNewTreatment(addedTreatments, MainApp.instance().getApplicationContext(), isDelta);
}
}
if (data.has("devicestatus")) {
JSONArray devicestatuses = data.getJSONArray("devicestatus");
if (devicestatuses.length() > 0) {
MainApp.bus().post(new EventNSClientNewLog("DATA", "received " + devicestatuses.length() + " devicestatuses"));
for (Integer index = 0; index < devicestatuses.length(); index++) {
JSONObject jsonStatus = devicestatuses.getJSONObject(index);
// remove from upload queue if Ack is failing
UploadQueue.removeID(jsonStatus);
}
BroadcastDeviceStatus.handleNewDeviceStatus(devicestatuses, MainApp.instance().getApplicationContext(), isDelta);
}
}
if (data.has("food")) {
JSONArray foods = data.getJSONArray("food");
JSONArray removedFoods = new JSONArray();
JSONArray updatedFoods = new JSONArray();
JSONArray addedFoods = new JSONArray();
if (foods.length() > 0)
MainApp.bus().post(new EventNSClientNewLog("DATA", "received " + foods.length() + " foods"));
for (Integer index = 0; index < foods.length(); index++) {
JSONObject jsonFood = foods.getJSONObject(index);
NSTreatment treatment = new NSTreatment(jsonFood);
// remove from upload queue if Ack is failing
UploadQueue.removeID(jsonFood);
//Find latest date in treatment
if (treatment.getMills() != null && treatment.getMills() < System.currentTimeMillis())
if (treatment.getMills() > latestDateInReceivedData)
latestDateInReceivedData = treatment.getMills();
if (treatment.getAction() == null) {
addedFoods.put(jsonFood);
} else if (treatment.getAction().equals("update")) {
updatedFoods.put(jsonFood);
} else if (treatment.getAction().equals("remove")) {
if (treatment.getMills() != null && treatment.getMills() > System.currentTimeMillis() - 24 * 60 * 60 * 1000L) // handle 1 day old deletions only
removedFoods.put(jsonFood);
}
}
if (removedFoods.length() > 0) {
BroadcastFood.handleRemovedFood(removedFoods, MainApp.instance().getApplicationContext(), isDelta);
}
if (updatedFoods.length() > 0) {
BroadcastFood.handleChangedFood(updatedFoods, MainApp.instance().getApplicationContext(), isDelta);
}
if (addedFoods.length() > 0) {
BroadcastFood.handleNewFood(addedFoods, MainApp.instance().getApplicationContext(), isDelta);
}
}
if (data.has("")) {
JSONArray foods = data.getJSONArray("food");
if (foods.length() > 0) {
MainApp.bus().post(new EventNSClientNewLog("DATA", "received " + foods.length() + " foods"));
for (Integer index = 0; index < foods.length(); index++) {
JSONObject jsonFood = foods.getJSONObject(index);
// remove from upload queue if Ack is failing
UploadQueue.removeID(jsonFood);
}
BroadcastDeviceStatus.handleNewFoods(foods, MainApp.instance().getApplicationContext(), isDelta);
}
}
if (data.has("mbgs")) {
JSONArray mbgs = data.getJSONArray("mbgs");
if (mbgs.length() > 0)
MainApp.bus().post(new EventNSClientNewLog("DATA", "received " + mbgs.length() + " mbgs"));
for (Integer index = 0; index < mbgs.length(); index++) {
JSONObject jsonMbg = mbgs.getJSONObject(index);
// remove from upload queue if Ack is failing
UploadQueue.removeID(jsonMbg);
}
BroadcastMbgs.handleNewMbg(mbgs, MainApp.instance().getApplicationContext(), isDelta);
}
if (data.has("cals")) {
JSONArray cals = data.getJSONArray("cals");
if (cals.length() > 0)
MainApp.bus().post(new EventNSClientNewLog("DATA", "received " + cals.length() + " cals"));
// Retreive actual calibration
for (Integer index = 0; index < cals.length(); index++) {
// remove from upload queue if Ack is failing
UploadQueue.removeID(cals.optJSONObject(index));
}
BroadcastCals.handleNewCal(cals, MainApp.instance().getApplicationContext(), isDelta);
}
if (data.has("sgvs")) {
JSONArray sgvs = data.getJSONArray("sgvs");
if (sgvs.length() > 0)
MainApp.bus().post(new EventNSClientNewLog("DATA", "received " + sgvs.length() + " sgvs"));
for (Integer index = 0; index < sgvs.length(); index++) {
JSONObject jsonSgv = sgvs.getJSONObject(index);
// MainApp.bus().post(new EventNSClientNewLog("DATA", "svg " + sgvs.getJSONObject(index).toString());
NSSgv sgv = new NSSgv(jsonSgv);
// Handle new sgv here
// remove from upload queue if Ack is failing
UploadQueue.removeID(jsonSgv);
//Find latest date in sgv
if (sgv.getMills() != null && sgv.getMills() < System.currentTimeMillis())
if (sgv.getMills() > latestDateInReceivedData)
latestDateInReceivedData = sgv.getMills();
}
BroadcastSgvs.handleNewSgv(sgvs, MainApp.instance().getApplicationContext(), isDelta);
}
MainApp.bus().post(new EventNSClientNewLog("LAST", DateUtil.dateAndTimeString(latestDateInReceivedData)));
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
//MainApp.bus().post(new EventNSClientNewLog("NSCLIENT", "onDataUpdate end");
} finally {
wakeLock.release();
}
}
});
}
};
public void dbUpdate(DbRequest dbr, NSUpdateAck ack) {
try {
if (!isConnected || !hasWriteAuth) return;
JSONObject message = new JSONObject();
message.put("collection", dbr.collection);
message.put("_id", dbr._id);
message.put("data", new JSONObject(dbr.data));
mSocket.emit("dbUpdate", message, ack);
MainApp.bus().post(new EventNSClientNewLog("DBUPDATE " + dbr.collection, "Sent " + dbr._id));
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
}
public void dbUpdateUnset(DbRequest dbr, NSUpdateAck ack) {
try {
if (!isConnected || !hasWriteAuth) return;
JSONObject message = new JSONObject();
message.put("collection", dbr.collection);
message.put("_id", dbr._id);
message.put("data", new JSONObject(dbr.data));
mSocket.emit("dbUpdateUnset", message, ack);
MainApp.bus().post(new EventNSClientNewLog("DBUPDATEUNSET " + dbr.collection, "Sent " + dbr._id));
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
}
public void dbRemove(DbRequest dbr, NSUpdateAck ack) {
try {
if (!isConnected || !hasWriteAuth) return;
JSONObject message = new JSONObject();
message.put("collection", dbr.collection);
message.put("_id", dbr._id);
mSocket.emit("dbRemove", message, ack);
MainApp.bus().post(new EventNSClientNewLog("DBREMOVE " + dbr.collection, "Sent " + dbr._id));
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
}
@Subscribe
public void onStatusEvent(NSUpdateAck ack) {
if (ack.result) {
uploadQueue.removeID(ack.action, ack._id);
MainApp.bus().post(new EventNSClientNewLog("DBUPDATE/DBREMOVE", "Acked " + ack._id));
} else {
MainApp.bus().post(new EventNSClientNewLog("ERROR", "DBUPDATE/DBREMOVE Unknown response"));
}
}
public void dbAdd(DbRequest dbr, NSAddAck ack) {
try {
if (!isConnected || !hasWriteAuth) return;
JSONObject message = new JSONObject();
message.put("collection", dbr.collection);
message.put("data", new JSONObject(dbr.data));
mSocket.emit("dbAdd", message, ack);
MainApp.bus().post(new EventNSClientNewLog("DBADD " + dbr.collection, "Sent " + dbr.nsClientID));
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
}
public void sendAlarmAck(AlarmAck alarmAck) {
if (!isConnected || !hasWriteAuth) return;
mSocket.emit("ack", alarmAck.level, alarmAck.group, alarmAck.silenceTime);
MainApp.bus().post(new EventNSClientNewLog("ALARMACK ", alarmAck.level + " " + alarmAck.group + " " + alarmAck.silenceTime));
}
@Subscribe
public void onStatusEvent(NSAddAck ack) {
if (ack.nsClientID != null) {
uploadQueue.removeID(ack.json);
MainApp.bus().post(new EventNSClientNewLog("DBADD", "Acked " + ack.nsClientID));
} else {
MainApp.bus().post(new EventNSClientNewLog("ERROR", "DBADD Unknown response"));
}
}
private boolean isCurrent(NSTreatment treatment) {
long now = (new Date()).getTime();
long minPast = now - nsHours * 60L * 60 * 1000;
if (treatment.getMills() == null) {
log.debug("treatment.getMills() == null " + treatment.getData().toString());
return false;
}
if (treatment.getMills() > minPast) return true;
return false;
}
public void resend(final String reason) {
if (UploadQueue.size() == 0)
return;
if (!isConnected || !hasWriteAuth) return;
handler.post(new Runnable() {
@Override
public void run() {
if (mSocket == null || !mSocket.connected()) return;
if (lastResendTime > System.currentTimeMillis() - 10 * 1000L) {
log.debug("Skipping resend by lastResendTime: " + ((System.currentTimeMillis() - lastResendTime) / 1000L) + " sec");
return;
}
lastResendTime = System.currentTimeMillis();
MainApp.bus().post(new EventNSClientNewLog("QUEUE", "Resend started: " + reason));
CloseableIterator<DbRequest> iterator = null;
int maxcount = 30;
try {
iterator = MainApp.getDbHelper().getDbRequestInterator();
try {
while (iterator.hasNext() && maxcount > 0) {
DbRequest dbr = iterator.next();
if (dbr.action.equals("dbAdd")) {
NSAddAck addAck = new NSAddAck();
dbAdd(dbr, addAck);
} else if (dbr.action.equals("dbRemove")) {
NSUpdateAck removeAck = new NSUpdateAck(dbr.action, dbr._id);
dbRemove(dbr, removeAck);
} else if (dbr.action.equals("dbUpdate")) {
NSUpdateAck updateAck = new NSUpdateAck(dbr.action, dbr._id);
dbUpdate(dbr, updateAck);
} else if (dbr.action.equals("dbUpdateUnset")) {
NSUpdateAck updateUnsetAck = new NSUpdateAck(dbr.action, dbr._id);
dbUpdateUnset(dbr, updateUnsetAck);
}
maxcount--;
}
} finally {
iterator.close();
}
} catch (SQLException e) {
log.error("Unhandled exception", e);
}
MainApp.bus().post(new EventNSClientNewLog("QUEUE", "Resend ended: " + reason));
}
});
}
public void restart() {
destroy();
initialize();
}
}
| 1 | 29,688 | should not be this lessThan15MinAgo ? | MilosKozak-AndroidAPS | java |
@@ -1192,6 +1192,13 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Http
return RequestLineStatus.Done;
}
+ catch (DecodingException)
+ {
+ RejectRequest(RequestRejectionReason.NonAsciiOrNullCharactersInRequestLine,
+ Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
+ // won't run but compiler complains
+ return RequestLineStatus.Done;
+ }
finally
{
input.ConsumingComplete(consumed, end); | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Net;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Adapter;
using Microsoft.AspNetCore.Server.Kestrel.Internal.Infrastructure;
using Microsoft.Extensions.Internal;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Primitives;
// ReSharper disable AccessToModifiedClosure
namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Http
{
public abstract partial class Frame : IFrameControl
{
// byte types don't have a data type annotation so we pre-cast them; to avoid in-place casts
private const byte ByteCR = (byte)'\r';
private const byte ByteLF = (byte)'\n';
private const byte ByteColon = (byte)':';
private const byte ByteSpace = (byte)' ';
private const byte ByteTab = (byte)'\t';
private const byte ByteQuestionMark = (byte)'?';
private const byte BytePercentage = (byte)'%';
private static readonly ArraySegment<byte> _endChunkedResponseBytes = CreateAsciiByteArraySegment("0\r\n\r\n");
private static readonly ArraySegment<byte> _continueBytes = CreateAsciiByteArraySegment("HTTP/1.1 100 Continue\r\n\r\n");
private static readonly byte[] _bytesConnectionClose = Encoding.ASCII.GetBytes("\r\nConnection: close");
private static readonly byte[] _bytesConnectionKeepAlive = Encoding.ASCII.GetBytes("\r\nConnection: keep-alive");
private static readonly byte[] _bytesTransferEncodingChunked = Encoding.ASCII.GetBytes("\r\nTransfer-Encoding: chunked");
private static readonly byte[] _bytesHttpVersion11 = Encoding.ASCII.GetBytes("HTTP/1.1 ");
private static readonly byte[] _bytesEndHeaders = Encoding.ASCII.GetBytes("\r\n\r\n");
private static readonly byte[] _bytesServer = Encoding.ASCII.GetBytes("\r\nServer: Kestrel");
private readonly object _onStartingSync = new Object();
private readonly object _onCompletedSync = new Object();
private Streams _frameStreams;
protected Stack<KeyValuePair<Func<object, Task>, object>> _onStarting;
protected Stack<KeyValuePair<Func<object, Task>, object>> _onCompleted;
private TaskCompletionSource<object> _frameStartedTcs = new TaskCompletionSource<object>();
private Task _requestProcessingTask;
protected volatile bool _requestProcessingStopping; // volatile, see: https://msdn.microsoft.com/en-us/library/x13ttww7.aspx
protected int _requestAborted;
private CancellationTokenSource _abortedCts;
private CancellationToken? _manuallySetRequestAbortToken;
private RequestProcessingStatus _requestProcessingStatus;
protected bool _keepAlive;
protected bool _upgrade;
private bool _canHaveBody;
private bool _autoChunk;
protected Exception _applicationException;
private BadHttpRequestException _requestRejectedException;
protected HttpVersion _httpVersion;
private readonly string _pathBase;
private int _remainingRequestHeadersBytesAllowed;
private int _requestHeadersParsed;
protected readonly long _keepAliveMilliseconds;
private readonly long _requestHeadersTimeoutMilliseconds;
protected long _responseBytesWritten;
public Frame(ConnectionContext context)
{
ConnectionContext = context;
Input = context.Input;
Output = context.Output;
ServerOptions = context.ListenerContext.ServiceContext.ServerOptions;
_pathBase = context.ListenerContext.ListenOptions.PathBase;
FrameControl = this;
_keepAliveMilliseconds = (long)ServerOptions.Limits.KeepAliveTimeout.TotalMilliseconds;
_requestHeadersTimeoutMilliseconds = (long)ServerOptions.Limits.RequestHeadersTimeout.TotalMilliseconds;
}
public ConnectionContext ConnectionContext { get; }
public SocketInput Input { get; set; }
public ISocketOutput Output { get; set; }
public IEnumerable<IAdaptedConnection> AdaptedConnections { get; set; }
protected IConnectionControl ConnectionControl => ConnectionContext.ConnectionControl;
protected IKestrelTrace Log => ConnectionContext.ListenerContext.ServiceContext.Log;
private DateHeaderValueManager DateHeaderValueManager => ConnectionContext.ListenerContext.ServiceContext.DateHeaderValueManager;
// Hold direct reference to ServerOptions since this is used very often in the request processing path
private KestrelServerOptions ServerOptions { get; }
private IPEndPoint LocalEndPoint => ConnectionContext.LocalEndPoint;
private IPEndPoint RemoteEndPoint => ConnectionContext.RemoteEndPoint;
protected string ConnectionId => ConnectionContext.ConnectionId;
public string ConnectionIdFeature { get; set; }
public IPAddress RemoteIpAddress { get; set; }
public int RemotePort { get; set; }
public IPAddress LocalIpAddress { get; set; }
public int LocalPort { get; set; }
public string Scheme { get; set; }
public string Method { get; set; }
public string PathBase { get; set; }
public string Path { get; set; }
public string QueryString { get; set; }
public string RawTarget { get; set; }
public string HttpVersion
{
get
{
if (_httpVersion == Http.HttpVersion.Http11)
{
return "HTTP/1.1";
}
if (_httpVersion == Http.HttpVersion.Http10)
{
return "HTTP/1.0";
}
return string.Empty;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
set
{
// GetKnownVersion returns versions which ReferenceEquals interned string
// As most common path, check for this only in fast-path and inline
if (ReferenceEquals(value, "HTTP/1.1"))
{
_httpVersion = Http.HttpVersion.Http11;
}
else if (ReferenceEquals(value, "HTTP/1.0"))
{
_httpVersion = Http.HttpVersion.Http10;
}
else
{
HttpVersionSetSlow(value);
}
}
}
[MethodImpl(MethodImplOptions.NoInlining)]
private void HttpVersionSetSlow(string value)
{
if (value == "HTTP/1.1")
{
_httpVersion = Http.HttpVersion.Http11;
}
else if (value == "HTTP/1.0")
{
_httpVersion = Http.HttpVersion.Http10;
}
else
{
_httpVersion = Http.HttpVersion.Unset;
}
}
public IHeaderDictionary RequestHeaders { get; set; }
public Stream RequestBody { get; set; }
private int _statusCode;
public int StatusCode
{
get
{
return _statusCode;
}
set
{
if (HasResponseStarted)
{
ThrowResponseAlreadyStartedException(nameof(StatusCode));
}
_statusCode = value;
}
}
private string _reasonPhrase;
public string ReasonPhrase
{
get
{
return _reasonPhrase;
}
set
{
if (HasResponseStarted)
{
ThrowResponseAlreadyStartedException(nameof(ReasonPhrase));
}
_reasonPhrase = value;
}
}
public IHeaderDictionary ResponseHeaders { get; set; }
public Stream ResponseBody { get; set; }
public Stream DuplexStream { get; set; }
public Task FrameStartedTask => _frameStartedTcs.Task;
public CancellationToken RequestAborted
{
get
{
// If a request abort token was previously explicitly set, return it.
if (_manuallySetRequestAbortToken.HasValue)
{
return _manuallySetRequestAbortToken.Value;
}
// Otherwise, get the abort CTS. If we have one, which would mean that someone previously
// asked for the RequestAborted token, simply return its token. If we don't,
// check to see whether we've already aborted, in which case just return an
// already canceled token. Finally, force a source into existence if we still
// don't have one, and return its token.
var cts = _abortedCts;
return
cts != null ? cts.Token :
(Volatile.Read(ref _requestAborted) == 1) ? new CancellationToken(true) :
RequestAbortedSource.Token;
}
set
{
// Set an abort token, overriding one we create internally. This setter and associated
// field exist purely to support IHttpRequestLifetimeFeature.set_RequestAborted.
_manuallySetRequestAbortToken = value;
}
}
private CancellationTokenSource RequestAbortedSource
{
get
{
// Get the abort token, lazily-initializing it if necessary.
// Make sure it's canceled if an abort request already came in.
// EnsureInitialized can return null since _abortedCts is reset to null
// after it's already been initialized to a non-null value.
// If EnsureInitialized does return null, this property was accessed between
// requests so it's safe to return an ephemeral CancellationTokenSource.
var cts = LazyInitializer.EnsureInitialized(ref _abortedCts, () => new CancellationTokenSource())
?? new CancellationTokenSource();
if (Volatile.Read(ref _requestAborted) == 1)
{
cts.Cancel();
}
return cts;
}
}
public bool HasResponseStarted => _requestProcessingStatus == RequestProcessingStatus.ResponseStarted;
protected FrameRequestHeaders FrameRequestHeaders { get; private set; }
protected FrameResponseHeaders FrameResponseHeaders { get; private set; }
public void InitializeHeaders()
{
if (FrameRequestHeaders == null)
{
FrameRequestHeaders = new FrameRequestHeaders();
}
RequestHeaders = FrameRequestHeaders;
if (FrameResponseHeaders == null)
{
FrameResponseHeaders = new FrameResponseHeaders();
}
ResponseHeaders = FrameResponseHeaders;
}
public void InitializeStreams(MessageBody messageBody)
{
if (_frameStreams == null)
{
_frameStreams = new Streams(this);
}
RequestBody = _frameStreams.RequestBody;
ResponseBody = _frameStreams.ResponseBody;
DuplexStream = _frameStreams.DuplexStream;
_frameStreams.RequestBody.StartAcceptingReads(messageBody);
_frameStreams.ResponseBody.StartAcceptingWrites();
}
public void PauseStreams()
{
_frameStreams.RequestBody.PauseAcceptingReads();
_frameStreams.ResponseBody.PauseAcceptingWrites();
}
public void ResumeStreams()
{
_frameStreams.RequestBody.ResumeAcceptingReads();
_frameStreams.ResponseBody.ResumeAcceptingWrites();
}
public void StopStreams()
{
_frameStreams.RequestBody.StopAcceptingReads();
_frameStreams.ResponseBody.StopAcceptingWrites();
}
public void Reset()
{
FrameRequestHeaders?.Reset();
FrameResponseHeaders?.Reset();
_onStarting = null;
_onCompleted = null;
_requestProcessingStatus = RequestProcessingStatus.RequestPending;
_keepAlive = false;
_autoChunk = false;
_applicationException = null;
ResetFeatureCollection();
Scheme = null;
Method = null;
PathBase = null;
Path = null;
QueryString = null;
_httpVersion = Http.HttpVersion.Unset;
StatusCode = StatusCodes.Status200OK;
ReasonPhrase = null;
RemoteIpAddress = RemoteEndPoint?.Address;
RemotePort = RemoteEndPoint?.Port ?? 0;
LocalIpAddress = LocalEndPoint?.Address;
LocalPort = LocalEndPoint?.Port ?? 0;
ConnectionIdFeature = ConnectionId;
if (AdaptedConnections != null)
{
try
{
foreach (var adaptedConnection in AdaptedConnections)
{
adaptedConnection.PrepareRequest(this);
}
}
catch (Exception ex)
{
Log.LogError(0, ex, $"Uncaught exception from the {nameof(IAdaptedConnection.PrepareRequest)} method of an {nameof(IAdaptedConnection)}.");
}
}
_manuallySetRequestAbortToken = null;
_abortedCts = null;
_remainingRequestHeadersBytesAllowed = ServerOptions.Limits.MaxRequestHeadersTotalSize;
_requestHeadersParsed = 0;
_responseBytesWritten = 0;
}
/// <summary>
/// Called once by Connection class to begin the RequestProcessingAsync loop.
/// </summary>
public void Start()
{
Reset();
_requestProcessingTask =
Task.Factory.StartNew(
(o) => ((Frame)o).RequestProcessingAsync(),
this,
default(CancellationToken),
TaskCreationOptions.DenyChildAttach,
TaskScheduler.Default).Unwrap();
_frameStartedTcs.SetResult(null);
}
/// <summary>
/// Should be called when the server wants to initiate a shutdown. The Task returned will
/// become complete when the RequestProcessingAsync function has exited. It is expected that
/// Stop will be called on all active connections, and Task.WaitAll() will be called on every
/// return value.
/// </summary>
public Task StopAsync()
{
_requestProcessingStopping = true;
Input.CompleteAwaiting();
return _requestProcessingTask ?? TaskCache.CompletedTask;
}
/// <summary>
/// Immediate kill the connection and poison the request and response streams.
/// </summary>
public void Abort(Exception error = null)
{
if (Interlocked.Exchange(ref _requestAborted, 1) == 0)
{
_requestProcessingStopping = true;
_frameStreams?.RequestBody.Abort(error);
_frameStreams?.ResponseBody.Abort();
try
{
ConnectionControl.End(ProduceEndType.SocketDisconnect);
}
catch (Exception ex)
{
Log.LogError(0, ex, "Abort");
}
try
{
RequestAbortedSource.Cancel();
}
catch (Exception ex)
{
Log.LogError(0, ex, "Abort");
}
_abortedCts = null;
}
}
/// <summary>
/// Primary loop which consumes socket input, parses it for protocol framing, and invokes the
/// application delegate for as long as the socket is intended to remain open.
/// The resulting Task from this loop is preserved in a field which is used when the server needs
/// to drain and close all currently active connections.
/// </summary>
public abstract Task RequestProcessingAsync();
public void OnStarting(Func<object, Task> callback, object state)
{
lock (_onStartingSync)
{
if (HasResponseStarted)
{
ThrowResponseAlreadyStartedException(nameof(OnStarting));
}
if (_onStarting == null)
{
_onStarting = new Stack<KeyValuePair<Func<object, Task>, object>>();
}
_onStarting.Push(new KeyValuePair<Func<object, Task>, object>(callback, state));
}
}
public void OnCompleted(Func<object, Task> callback, object state)
{
lock (_onCompletedSync)
{
if (_onCompleted == null)
{
_onCompleted = new Stack<KeyValuePair<Func<object, Task>, object>>();
}
_onCompleted.Push(new KeyValuePair<Func<object, Task>, object>(callback, state));
}
}
protected async Task FireOnStarting()
{
Stack<KeyValuePair<Func<object, Task>, object>> onStarting = null;
lock (_onStartingSync)
{
onStarting = _onStarting;
_onStarting = null;
}
if (onStarting != null)
{
try
{
foreach (var entry in onStarting)
{
await entry.Key.Invoke(entry.Value);
}
}
catch (Exception ex)
{
ReportApplicationError(ex);
}
}
}
protected async Task FireOnCompleted()
{
Stack<KeyValuePair<Func<object, Task>, object>> onCompleted = null;
lock (_onCompletedSync)
{
onCompleted = _onCompleted;
_onCompleted = null;
}
if (onCompleted != null)
{
foreach (var entry in onCompleted)
{
try
{
await entry.Key.Invoke(entry.Value);
}
catch (Exception ex)
{
ReportApplicationError(ex);
}
}
}
}
public void Flush()
{
InitializeResponse(0).GetAwaiter().GetResult();
Output.Flush();
}
public async Task FlushAsync(CancellationToken cancellationToken)
{
await InitializeResponse(0);
await Output.FlushAsync(cancellationToken);
}
public void Write(ArraySegment<byte> data)
{
// For the first write, ensure headers are flushed if Write(Chunked) isn't called.
var firstWrite = !HasResponseStarted;
if (firstWrite)
{
InitializeResponse(data.Count).GetAwaiter().GetResult();
}
else
{
VerifyAndUpdateWrite(data.Count);
}
if (_canHaveBody)
{
if (_autoChunk)
{
if (data.Count == 0)
{
if (firstWrite)
{
Flush();
}
return;
}
WriteChunked(data);
}
else
{
CheckLastWrite();
Output.Write(data);
}
}
else
{
HandleNonBodyResponseWrite();
if (firstWrite)
{
Flush();
}
}
}
public Task WriteAsync(ArraySegment<byte> data, CancellationToken cancellationToken)
{
if (!HasResponseStarted)
{
return WriteAsyncAwaited(data, cancellationToken);
}
VerifyAndUpdateWrite(data.Count);
if (_canHaveBody)
{
if (_autoChunk)
{
if (data.Count == 0)
{
return TaskCache.CompletedTask;
}
return WriteChunkedAsync(data, cancellationToken);
}
else
{
CheckLastWrite();
return Output.WriteAsync(data, cancellationToken: cancellationToken);
}
}
else
{
HandleNonBodyResponseWrite();
return TaskCache.CompletedTask;
}
}
public async Task WriteAsyncAwaited(ArraySegment<byte> data, CancellationToken cancellationToken)
{
await InitializeResponseAwaited(data.Count);
// WriteAsyncAwaited is only called for the first write to the body.
// Ensure headers are flushed if Write(Chunked)Async isn't called.
if (_canHaveBody)
{
if (_autoChunk)
{
if (data.Count == 0)
{
await FlushAsync(cancellationToken);
return;
}
await WriteChunkedAsync(data, cancellationToken);
}
else
{
CheckLastWrite();
await Output.WriteAsync(data, cancellationToken: cancellationToken);
}
}
else
{
HandleNonBodyResponseWrite();
await FlushAsync(cancellationToken);
}
}
private void VerifyAndUpdateWrite(int count)
{
var responseHeaders = FrameResponseHeaders;
if (responseHeaders != null &&
!responseHeaders.HasTransferEncoding &&
responseHeaders.ContentLength.HasValue &&
_responseBytesWritten + count > responseHeaders.ContentLength.Value)
{
_keepAlive = false;
throw new InvalidOperationException(
$"Response Content-Length mismatch: too many bytes written ({_responseBytesWritten + count} of {responseHeaders.ContentLength.Value}).");
}
_responseBytesWritten += count;
}
private void CheckLastWrite()
{
var responseHeaders = FrameResponseHeaders;
// Prevent firing request aborted token if this is the last write, to avoid
// aborting the request if the app is still running when the client receives
// the final bytes of the response and gracefully closes the connection.
//
// Called after VerifyAndUpdateWrite(), so _responseBytesWritten has already been updated.
if (responseHeaders != null &&
!responseHeaders.HasTransferEncoding &&
responseHeaders.ContentLength.HasValue &&
_responseBytesWritten == responseHeaders.ContentLength.Value)
{
_abortedCts = null;
}
}
protected void VerifyResponseContentLength()
{
var responseHeaders = FrameResponseHeaders;
if (!HttpMethods.IsHead(Method) &&
!responseHeaders.HasTransferEncoding &&
responseHeaders.ContentLength.HasValue &&
_responseBytesWritten < responseHeaders.ContentLength.Value)
{
// We need to close the connection if any bytes were written since the client
// cannot be certain of how many bytes it will receive.
if (_responseBytesWritten > 0)
{
_keepAlive = false;
}
ReportApplicationError(new InvalidOperationException(
$"Response Content-Length mismatch: too few bytes written ({_responseBytesWritten} of {responseHeaders.ContentLength.Value})."));
}
}
private void WriteChunked(ArraySegment<byte> data)
{
Output.Write(data, chunk: true);
}
private Task WriteChunkedAsync(ArraySegment<byte> data, CancellationToken cancellationToken)
{
return Output.WriteAsync(data, chunk: true, cancellationToken: cancellationToken);
}
private Task WriteChunkedResponseSuffix()
{
return Output.WriteAsync(_endChunkedResponseBytes);
}
private static ArraySegment<byte> CreateAsciiByteArraySegment(string text)
{
var bytes = Encoding.ASCII.GetBytes(text);
return new ArraySegment<byte>(bytes);
}
public void ProduceContinue()
{
if (HasResponseStarted)
{
return;
}
StringValues expect;
if (_httpVersion == Http.HttpVersion.Http11 &&
RequestHeaders.TryGetValue("Expect", out expect) &&
(expect.FirstOrDefault() ?? "").Equals("100-continue", StringComparison.OrdinalIgnoreCase))
{
Output.Write(_continueBytes);
}
}
public Task InitializeResponse(int firstWriteByteCount)
{
if (HasResponseStarted)
{
return TaskCache.CompletedTask;
}
if (_onStarting != null)
{
return InitializeResponseAwaited(firstWriteByteCount);
}
if (_applicationException != null)
{
ThrowResponseAbortedException();
}
VerifyAndUpdateWrite(firstWriteByteCount);
ProduceStart(appCompleted: false);
return TaskCache.CompletedTask;
}
private async Task InitializeResponseAwaited(int firstWriteByteCount)
{
await FireOnStarting();
if (_applicationException != null)
{
ThrowResponseAbortedException();
}
VerifyAndUpdateWrite(firstWriteByteCount);
ProduceStart(appCompleted: false);
}
private void ProduceStart(bool appCompleted)
{
if (HasResponseStarted)
{
return;
}
_requestProcessingStatus = RequestProcessingStatus.ResponseStarted;
var statusBytes = ReasonPhrases.ToStatusBytes(StatusCode, ReasonPhrase);
CreateResponseHeader(statusBytes, appCompleted);
}
protected Task TryProduceInvalidRequestResponse()
{
if (_requestRejectedException != null)
{
if (FrameRequestHeaders == null || FrameResponseHeaders == null)
{
InitializeHeaders();
}
return ProduceEnd();
}
return TaskCache.CompletedTask;
}
protected Task ProduceEnd()
{
if (_requestRejectedException != null || _applicationException != null)
{
if (HasResponseStarted)
{
// We can no longer change the response, so we simply close the connection.
_requestProcessingStopping = true;
return TaskCache.CompletedTask;
}
// If the request was rejected, the error state has already been set by SetBadRequestState and
// that should take precedence.
if (_requestRejectedException != null)
{
SetErrorResponseHeaders(statusCode: _requestRejectedException.StatusCode);
}
else
{
// 500 Internal Server Error
SetErrorResponseHeaders(statusCode: StatusCodes.Status500InternalServerError);
}
}
if (!HasResponseStarted)
{
return ProduceEndAwaited();
}
return WriteSuffix();
}
private async Task ProduceEndAwaited()
{
ProduceStart(appCompleted: true);
// Force flush
await Output.FlushAsync();
await WriteSuffix();
}
private Task WriteSuffix()
{
// _autoChunk should be checked after we are sure ProduceStart() has been called
// since ProduceStart() may set _autoChunk to true.
if (_autoChunk)
{
return WriteAutoChunkSuffixAwaited();
}
if (_keepAlive)
{
ConnectionControl.End(ProduceEndType.ConnectionKeepAlive);
}
if (HttpMethods.IsHead(Method) && _responseBytesWritten > 0)
{
Log.ConnectionHeadResponseBodyWrite(ConnectionId, _responseBytesWritten);
}
return TaskCache.CompletedTask;
}
private async Task WriteAutoChunkSuffixAwaited()
{
// For the same reason we call CheckLastWrite() in Content-Length responses.
_abortedCts = null;
await WriteChunkedResponseSuffix();
if (_keepAlive)
{
ConnectionControl.End(ProduceEndType.ConnectionKeepAlive);
}
}
private void CreateResponseHeader(
byte[] statusBytes,
bool appCompleted)
{
var responseHeaders = FrameResponseHeaders;
var hasConnection = responseHeaders.HasConnection;
var connectionOptions = FrameHeaders.ParseConnection(responseHeaders.HeaderConnection);
var hasTransferEncoding = responseHeaders.HasTransferEncoding;
var transferCoding = FrameHeaders.GetFinalTransferCoding(responseHeaders.HeaderTransferEncoding);
var end = Output.ProducingStart();
if (_keepAlive && hasConnection)
{
_keepAlive = (connectionOptions & ConnectionOptions.KeepAlive) == ConnectionOptions.KeepAlive;
}
// https://tools.ietf.org/html/rfc7230#section-3.3.1
// If any transfer coding other than
// chunked is applied to a response payload body, the sender MUST either
// apply chunked as the final transfer coding or terminate the message
// by closing the connection.
if (hasTransferEncoding && transferCoding != TransferCoding.Chunked)
{
_keepAlive = false;
}
// Set whether response can have body
_canHaveBody = StatusCanHaveBody(StatusCode) && Method != "HEAD";
// Don't set the Content-Length or Transfer-Encoding headers
// automatically for HEAD requests or 204, 205, 304 responses.
if (_canHaveBody)
{
if (!hasTransferEncoding && !responseHeaders.ContentLength.HasValue)
{
if (appCompleted && StatusCode != StatusCodes.Status101SwitchingProtocols)
{
// Since the app has completed and we are only now generating
// the headers we can safely set the Content-Length to 0.
responseHeaders.ContentLength = 0;
}
else
{
// Note for future reference: never change this to set _autoChunk to true on HTTP/1.0
// connections, even if we were to infer the client supports it because an HTTP/1.0 request
// was received that used chunked encoding. Sending a chunked response to an HTTP/1.0
// client would break compliance with RFC 7230 (section 3.3.1):
//
// A server MUST NOT send a response containing Transfer-Encoding unless the corresponding
// request indicates HTTP/1.1 (or later).
if (_httpVersion == Http.HttpVersion.Http11 && StatusCode != StatusCodes.Status101SwitchingProtocols)
{
_autoChunk = true;
responseHeaders.SetRawTransferEncoding("chunked", _bytesTransferEncodingChunked);
}
else
{
_keepAlive = false;
}
}
}
}
else if (hasTransferEncoding)
{
RejectNonBodyTransferEncodingResponse(appCompleted);
}
responseHeaders.SetReadOnly();
if (!hasConnection)
{
if (!_keepAlive)
{
responseHeaders.SetRawConnection("close", _bytesConnectionClose);
}
else if (_httpVersion == Http.HttpVersion.Http10)
{
responseHeaders.SetRawConnection("keep-alive", _bytesConnectionKeepAlive);
}
}
if (ServerOptions.AddServerHeader && !responseHeaders.HasServer)
{
responseHeaders.SetRawServer(Constants.ServerName, _bytesServer);
}
if (!responseHeaders.HasDate)
{
var dateHeaderValues = DateHeaderValueManager.GetDateHeaderValues();
responseHeaders.SetRawDate(dateHeaderValues.String, dateHeaderValues.Bytes);
}
end.CopyFrom(_bytesHttpVersion11);
end.CopyFrom(statusBytes);
responseHeaders.CopyTo(ref end);
end.CopyFrom(_bytesEndHeaders, 0, _bytesEndHeaders.Length);
Output.ProducingComplete(end);
}
public RequestLineStatus TakeStartLine(SocketInput input)
{
const int MaxInvalidRequestLineChars = 32;
var scan = input.ConsumingStart();
var start = scan;
var consumed = scan;
var end = scan;
try
{
// We may hit this when the client has stopped sending data but
// the connection hasn't closed yet, and therefore Frame.Stop()
// hasn't been called yet.
if (scan.Peek() == -1)
{
return RequestLineStatus.Empty;
}
if (_requestProcessingStatus == RequestProcessingStatus.RequestPending)
{
ConnectionControl.ResetTimeout(_requestHeadersTimeoutMilliseconds, TimeoutAction.SendTimeoutResponse);
}
_requestProcessingStatus = RequestProcessingStatus.RequestStarted;
int bytesScanned;
if (end.Seek(ByteLF, out bytesScanned, ServerOptions.Limits.MaxRequestLineSize) == -1)
{
if (bytesScanned >= ServerOptions.Limits.MaxRequestLineSize)
{
RejectRequest(RequestRejectionReason.RequestLineTooLong);
}
else
{
return RequestLineStatus.Incomplete;
}
}
end.Take();
string method;
var begin = scan;
if (!begin.GetKnownMethod(out method))
{
if (scan.Seek(ByteSpace, ref end) == -1)
{
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
method = begin.GetAsciiString(ref scan);
if (method == null)
{
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
// Note: We're not in the fast path any more (GetKnownMethod should have handled any HTTP Method we're aware of)
// So we can be a tiny bit slower and more careful here.
for (int i = 0; i < method.Length; i++)
{
if (!IsValidTokenChar(method[i]))
{
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
}
}
else
{
scan.Skip(method.Length);
}
scan.Take();
begin = scan;
var needDecode = false;
var chFound = scan.Seek(ByteSpace, ByteQuestionMark, BytePercentage, ref end);
if (chFound == -1)
{
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
else if (chFound == BytePercentage)
{
needDecode = true;
chFound = scan.Seek(ByteSpace, ByteQuestionMark, ref end);
if (chFound == -1)
{
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
}
var pathBegin = begin;
var pathEnd = scan;
var queryString = "";
if (chFound == ByteQuestionMark)
{
begin = scan;
if (scan.Seek(ByteSpace, ref end) == -1)
{
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
queryString = begin.GetAsciiString(ref scan);
}
var queryEnd = scan;
var pathBeginCh = pathBegin.Peek();
if (pathBeginCh == ByteSpace || pathBeginCh == ByteQuestionMark || pathBeginCh == BytePercentage)
{
// Empty or malformed request line
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
scan.Take();
begin = scan;
if (scan.Seek(ByteCR, ref end) == -1)
{
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
string httpVersion;
if (!begin.GetKnownVersion(out httpVersion))
{
httpVersion = begin.GetAsciiStringEscaped(scan, 9);
if (httpVersion == string.Empty)
{
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
else
{
RejectRequest(RequestRejectionReason.UnrecognizedHTTPVersion, httpVersion);
}
}
scan.Take(); // consume CR
if (scan.Take() != ByteLF)
{
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxInvalidRequestLineChars) : string.Empty);
}
// URIs are always encoded/escaped to ASCII https://tools.ietf.org/html/rfc3986#page-11
// Multibyte Internationalized Resource Identifiers (IRIs) are first converted to utf8;
// then encoded/escaped to ASCII https://www.ietf.org/rfc/rfc3987.txt "Mapping of IRIs to URIs"
string requestUrlPath;
string rawTarget;
if (needDecode)
{
// Read raw target before mutating memory.
rawTarget = pathBegin.GetAsciiString(ref queryEnd);
// URI was encoded, unescape and then parse as utf8
pathEnd = UrlPathDecoder.Unescape(pathBegin, pathEnd);
requestUrlPath = pathBegin.GetUtf8String(ref pathEnd);
}
else
{
// URI wasn't encoded, parse as ASCII
requestUrlPath = pathBegin.GetAsciiString(ref pathEnd);
if (queryString.Length == 0)
{
// No need to allocate an extra string if the path didn't need
// decoding and there's no query string following it.
rawTarget = requestUrlPath;
}
else
{
rawTarget = pathBegin.GetAsciiString(ref queryEnd);
}
}
var normalizedTarget = PathNormalizer.RemoveDotSegments(requestUrlPath);
consumed = scan;
Method = method;
QueryString = queryString;
RawTarget = rawTarget;
HttpVersion = httpVersion;
bool caseMatches;
if (RequestUrlStartsWithPathBase(normalizedTarget, out caseMatches))
{
PathBase = caseMatches ? _pathBase : normalizedTarget.Substring(0, _pathBase.Length);
Path = normalizedTarget.Substring(_pathBase.Length);
}
else if (rawTarget[0] == '/') // check rawTarget since normalizedTarget can be "" or "/" after dot segment removal
{
Path = normalizedTarget;
}
else
{
Path = string.Empty;
PathBase = string.Empty;
QueryString = string.Empty;
}
return RequestLineStatus.Done;
}
finally
{
input.ConsumingComplete(consumed, end);
}
}
private static bool IsValidTokenChar(char c)
{
// Determines if a character is valid as a 'token' as defined in the
// HTTP spec: https://tools.ietf.org/html/rfc7230#section-3.2.6
return
(c >= '0' && c <= '9') ||
(c >= 'A' && c <= 'Z') ||
(c >= 'a' && c <= 'z') ||
c == '!' ||
c == '#' ||
c == '$' ||
c == '%' ||
c == '&' ||
c == '\'' ||
c == '*' ||
c == '+' ||
c == '-' ||
c == '.' ||
c == '^' ||
c == '_' ||
c == '`' ||
c == '|' ||
c == '~';
}
private bool RequestUrlStartsWithPathBase(string requestUrl, out bool caseMatches)
{
caseMatches = true;
if (string.IsNullOrEmpty(_pathBase))
{
return false;
}
if (requestUrl.Length < _pathBase.Length || (requestUrl.Length > _pathBase.Length && requestUrl[_pathBase.Length] != '/'))
{
return false;
}
for (var i = 0; i < _pathBase.Length; i++)
{
if (requestUrl[i] != _pathBase[i])
{
if (char.ToLowerInvariant(requestUrl[i]) == char.ToLowerInvariant(_pathBase[i]))
{
caseMatches = false;
}
else
{
return false;
}
}
}
return true;
}
public bool TakeMessageHeaders(SocketInput input, FrameRequestHeaders requestHeaders)
{
var scan = input.ConsumingStart();
var consumed = scan;
var end = scan;
try
{
while (!end.IsEnd)
{
var ch = end.Peek();
if (ch == -1)
{
return false;
}
else if (ch == ByteCR)
{
// Check for final CRLF.
end.Take();
ch = end.Take();
if (ch == -1)
{
return false;
}
else if (ch == ByteLF)
{
ConnectionControl.CancelTimeout();
consumed = end;
return true;
}
// Headers don't end in CRLF line.
RejectRequest(RequestRejectionReason.HeadersCorruptedInvalidHeaderSequence);
}
else if (ch == ByteSpace || ch == ByteTab)
{
RejectRequest(RequestRejectionReason.HeaderLineMustNotStartWithWhitespace);
}
// If we've parsed the max allowed numbers of headers and we're starting a new
// one, we've gone over the limit.
if (_requestHeadersParsed == ServerOptions.Limits.MaxRequestHeaderCount)
{
RejectRequest(RequestRejectionReason.TooManyHeaders);
}
int bytesScanned;
if (end.Seek(ByteLF, out bytesScanned, _remainingRequestHeadersBytesAllowed) == -1)
{
if (bytesScanned >= _remainingRequestHeadersBytesAllowed)
{
RejectRequest(RequestRejectionReason.HeadersExceedMaxTotalSize);
}
else
{
return false;
}
}
var beginName = scan;
if (scan.Seek(ByteColon, ref end) == -1)
{
RejectRequest(RequestRejectionReason.NoColonCharacterFoundInHeaderLine);
}
var endName = scan;
scan.Take();
var validateName = beginName;
if (validateName.Seek(ByteSpace, ByteTab, ref endName) != -1)
{
RejectRequest(RequestRejectionReason.WhitespaceIsNotAllowedInHeaderName);
}
var beginValue = scan;
ch = scan.Take();
while (ch == ByteSpace || ch == ByteTab)
{
beginValue = scan;
ch = scan.Take();
}
scan = beginValue;
if (scan.Seek(ByteCR, ref end) == -1)
{
RejectRequest(RequestRejectionReason.MissingCRInHeaderLine);
}
scan.Take(); // we know this is '\r'
ch = scan.Take(); // expecting '\n'
end = scan;
if (ch != ByteLF)
{
RejectRequest(RequestRejectionReason.HeaderValueMustNotContainCR);
}
var next = scan.Peek();
if (next == -1)
{
return false;
}
else if (next == ByteSpace || next == ByteTab)
{
// From https://tools.ietf.org/html/rfc7230#section-3.2.4:
//
// Historically, HTTP header field values could be extended over
// multiple lines by preceding each extra line with at least one space
// or horizontal tab (obs-fold). This specification deprecates such
// line folding except within the message/http media type
// (Section 8.3.1). A sender MUST NOT generate a message that includes
// line folding (i.e., that has any field-value that contains a match to
// the obs-fold rule) unless the message is intended for packaging
// within the message/http media type.
//
// A server that receives an obs-fold in a request message that is not
// within a message/http container MUST either reject the message by
// sending a 400 (Bad Request), preferably with a representation
// explaining that obsolete line folding is unacceptable, or replace
// each received obs-fold with one or more SP octets prior to
// interpreting the field value or forwarding the message downstream.
RejectRequest(RequestRejectionReason.HeaderValueLineFoldingNotSupported);
}
// Trim trailing whitespace from header value by repeatedly advancing to next
// whitespace or CR.
//
// - If CR is found, this is the end of the header value.
// - If whitespace is found, this is the _tentative_ end of the header value.
// If non-whitespace is found after it and it's not CR, seek again to the next
// whitespace or CR for a new (possibly tentative) end of value.
var ws = beginValue;
var endValue = scan;
do
{
ws.Seek(ByteSpace, ByteTab, ByteCR);
endValue = ws;
ch = ws.Take();
while (ch == ByteSpace || ch == ByteTab)
{
ch = ws.Take();
}
} while (ch != ByteCR);
var name = beginName.GetArraySegment(endName);
var value = beginValue.GetAsciiString(ref endValue);
consumed = scan;
requestHeaders.Append(name.Array, name.Offset, name.Count, value);
_remainingRequestHeadersBytesAllowed -= bytesScanned;
_requestHeadersParsed++;
}
return false;
}
finally
{
input.ConsumingComplete(consumed, end);
}
}
public bool StatusCanHaveBody(int statusCode)
{
// List of status codes taken from Microsoft.Net.Http.Server.Response
return statusCode != StatusCodes.Status204NoContent &&
statusCode != StatusCodes.Status205ResetContent &&
statusCode != StatusCodes.Status304NotModified;
}
private void ThrowResponseAlreadyStartedException(string value)
{
throw new InvalidOperationException($"{value} cannot be set, response has already started.");
}
private void RejectNonBodyTransferEncodingResponse(bool appCompleted)
{
var ex = new InvalidOperationException($"Transfer-Encoding set on a {StatusCode} non-body request.");
if (!appCompleted)
{
// Back out of header creation surface exeception in user code
_requestProcessingStatus = RequestProcessingStatus.RequestStarted;
throw ex;
}
else
{
ReportApplicationError(ex);
// 500 Internal Server Error
SetErrorResponseHeaders(statusCode: StatusCodes.Status500InternalServerError);
}
}
private void SetErrorResponseHeaders(int statusCode)
{
Debug.Assert(!HasResponseStarted, $"{nameof(SetErrorResponseHeaders)} called after response had already started.");
StatusCode = statusCode;
ReasonPhrase = null;
if (FrameResponseHeaders == null)
{
InitializeHeaders();
}
var responseHeaders = FrameResponseHeaders;
responseHeaders.Reset();
var dateHeaderValues = DateHeaderValueManager.GetDateHeaderValues();
responseHeaders.SetRawDate(dateHeaderValues.String, dateHeaderValues.Bytes);
responseHeaders.ContentLength = 0;
if (ServerOptions.AddServerHeader)
{
responseHeaders.SetRawServer(Constants.ServerName, _bytesServer);
}
}
public void HandleNonBodyResponseWrite()
{
// Writes to HEAD response are ignored and logged at the end of the request
if (Method != "HEAD")
{
// Throw Exception for 204, 205, 304 responses.
throw new InvalidOperationException($"Write to non-body {StatusCode} response.");
}
}
private void ThrowResponseAbortedException()
{
throw new ObjectDisposedException(
"The response has been aborted due to an unhandled application exception.",
_applicationException);
}
public void RejectRequest(RequestRejectionReason reason)
{
RejectRequest(BadHttpRequestException.GetException(reason));
}
public void RejectRequest(RequestRejectionReason reason, string value)
{
RejectRequest(BadHttpRequestException.GetException(reason, value));
}
private void RejectRequest(BadHttpRequestException ex)
{
Log.ConnectionBadRequest(ConnectionId, ex);
throw ex;
}
public void SetBadRequestState(RequestRejectionReason reason)
{
SetBadRequestState(BadHttpRequestException.GetException(reason));
}
public void SetBadRequestState(BadHttpRequestException ex)
{
if (!HasResponseStarted)
{
SetErrorResponseHeaders(ex.StatusCode);
}
_keepAlive = false;
_requestProcessingStopping = true;
_requestRejectedException = ex;
}
protected void ReportApplicationError(Exception ex)
{
if (_applicationException == null)
{
_applicationException = ex;
}
else if (_applicationException is AggregateException)
{
_applicationException = new AggregateException(_applicationException, ex).Flatten();
}
else
{
_applicationException = new AggregateException(_applicationException, ex);
}
Log.ApplicationError(ConnectionId, ex);
}
public enum RequestLineStatus
{
Empty,
Incomplete,
Done
}
private enum RequestProcessingStatus
{
RequestPending,
RequestStarted,
ResponseStarted
}
}
}
| 1 | 11,252 | Would it work if we changed the return type of 'RejectRequest*' methods to Exception and instead did `throw RejectRequest(...` ? | aspnet-KestrelHttpServer | .cs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.