patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -383,6 +383,7 @@ class ConsoleAddon:
part in ("response-headers", "response-body", "set-cookies") and
flow.response is None
)
+ flow.backup()
if require_dummy_response:
flow.response = http.HTTPResponse.make()
if part == "cookies": | 1 | import csv
import typing
from mitmproxy import ctx
from mitmproxy import command
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import contentviews
from mitmproxy.utils import strutils
import mitmproxy.types
from mitmproxy.tools.console import overlay
from mitmproxy.tools.console import signals
from mitmproxy.tools.console import keymap
console_palettes = [
"lowlight",
"lowdark",
"light",
"dark",
"solarized_light",
"solarized_dark"
]
view_orders = [
"time",
"method",
"url",
"size",
]
console_layouts = [
"single",
"vertical",
"horizontal",
]
class UnsupportedLog:
"""
A small addon to dump info on flow types we don't support yet.
"""
def websocket_message(self, f):
message = f.messages[-1]
ctx.log.info(f.message_info(message))
ctx.log.debug(message.content if isinstance(message.content, str) else strutils.bytes_to_escaped_str(message.content))
def websocket_end(self, f):
ctx.log.info("WebSocket connection closed by {}: {} {}, {}".format(
f.close_sender,
f.close_code,
f.close_message,
f.close_reason))
def tcp_message(self, f):
message = f.messages[-1]
direction = "->" if message.from_client else "<-"
ctx.log.info("{client_host}:{client_port} {direction} tcp {direction} {server_host}:{server_port}".format(
client_host=f.client_conn.address[0],
client_port=f.client_conn.address[1],
server_host=f.server_conn.address[0],
server_port=f.server_conn.address[1],
direction=direction,
))
ctx.log.debug(strutils.bytes_to_escaped_str(message.content))
class ConsoleAddon:
"""
An addon that exposes console-specific commands, and hooks into required
events.
"""
def __init__(self, master):
self.master = master
self.started = False
def load(self, loader):
loader.add_option(
"console_layout", str, "single",
"Console layout.",
choices=sorted(console_layouts),
)
loader.add_option(
"console_layout_headers", bool, True,
"Show layout comonent headers",
)
loader.add_option(
"console_focus_follow", bool, False,
"Focus follows new flows."
)
loader.add_option(
"console_palette", str, "solarized_dark",
"Color palette.",
choices=sorted(console_palettes),
)
loader.add_option(
"console_palette_transparent", bool, False,
"Set transparent background for palette."
)
loader.add_option(
"console_mouse", bool, True,
"Console mouse interaction."
)
@command.command("console.layout.options")
def layout_options(self) -> typing.Sequence[str]:
"""
Returns the available options for the console_layout option.
"""
return ["single", "vertical", "horizontal"]
@command.command("console.intercept.toggle")
def intercept_toggle(self) -> None:
"""
Toggles interception on/off leaving intercept filters intact.
"""
ctx.options.update(
intercept_active = not ctx.options.intercept_active
)
@command.command("console.layout.cycle")
def layout_cycle(self) -> None:
"""
Cycle through the console layout options.
"""
opts = self.layout_options()
off = self.layout_options().index(ctx.options.console_layout)
ctx.options.update(
console_layout = opts[(off + 1) % len(opts)]
)
@command.command("console.panes.next")
def panes_next(self) -> None:
"""
Go to the next layout pane.
"""
self.master.window.switch()
@command.command("console.options.reset.focus")
def options_reset_current(self) -> None:
"""
Reset the current option in the options editor.
"""
fv = self.master.window.current("options")
if not fv:
raise exceptions.CommandError("Not viewing options.")
self.master.commands.call("options.reset.one %s" % fv.current_name())
@command.command("console.nav.start")
def nav_start(self) -> None:
"""
Go to the start of a list or scrollable.
"""
self.master.inject_key("m_start")
@command.command("console.nav.end")
def nav_end(self) -> None:
"""
Go to the end of a list or scrollable.
"""
self.master.inject_key("m_end")
@command.command("console.nav.next")
def nav_next(self) -> None:
"""
Go to the next navigatable item.
"""
self.master.inject_key("m_next")
@command.command("console.nav.select")
def nav_select(self) -> None:
"""
Select a navigable item for viewing or editing.
"""
self.master.inject_key("m_select")
@command.command("console.nav.up")
def nav_up(self) -> None:
"""
Go up.
"""
self.master.inject_key("up")
@command.command("console.nav.down")
def nav_down(self) -> None:
"""
Go down.
"""
self.master.inject_key("down")
@command.command("console.nav.pageup")
def nav_pageup(self) -> None:
"""
Go up.
"""
self.master.inject_key("page up")
@command.command("console.nav.pagedown")
def nav_pagedown(self) -> None:
"""
Go down.
"""
self.master.inject_key("page down")
@command.command("console.nav.left")
def nav_left(self) -> None:
"""
Go left.
"""
self.master.inject_key("left")
@command.command("console.nav.right")
def nav_right(self) -> None:
"""
Go right.
"""
self.master.inject_key("right")
@command.command("console.choose")
def console_choose(
self,
prompt: str,
choices: typing.Sequence[str],
cmd: mitmproxy.types.Cmd,
*args: mitmproxy.types.Arg
) -> None:
"""
Prompt the user to choose from a specified list of strings, then
invoke another command with all occurances of {choice} replaced by
the choice the user made.
"""
def callback(opt):
# We're now outside of the call context...
repl = cmd + " " + " ".join(args)
repl = repl.replace("{choice}", opt)
try:
self.master.commands.call(repl)
except exceptions.CommandError as e:
signals.status_message.send(message=str(e))
self.master.overlay(
overlay.Chooser(self.master, prompt, choices, "", callback)
)
@command.command("console.choose.cmd")
def console_choose_cmd(
self,
prompt: str,
choicecmd: mitmproxy.types.Cmd,
subcmd: mitmproxy.types.Cmd,
*args: mitmproxy.types.Arg
) -> None:
"""
Prompt the user to choose from a list of strings returned by a
command, then invoke another command with all occurances of {choice}
replaced by the choice the user made.
"""
choices = ctx.master.commands.call_args(choicecmd, [])
def callback(opt):
# We're now outside of the call context...
repl = " ".join(args)
repl = repl.replace("{choice}", opt)
try:
self.master.commands.call(subcmd + " " + repl)
except exceptions.CommandError as e:
signals.status_message.send(message=str(e))
self.master.overlay(
overlay.Chooser(self.master, prompt, choices, "", callback)
)
@command.command("console.command")
def console_command(self, *partial: str) -> None:
"""
Prompt the user to edit a command with a (possibly empty) starting value.
"""
signals.status_prompt_command.send(partial=" ".join(partial)) # type: ignore
@command.command("console.view.keybindings")
def view_keybindings(self) -> None:
"""View the commands list."""
self.master.switch_view("keybindings")
@command.command("console.view.commands")
def view_commands(self) -> None:
"""View the commands list."""
self.master.switch_view("commands")
@command.command("console.view.options")
def view_options(self) -> None:
"""View the options editor."""
self.master.switch_view("options")
@command.command("console.view.eventlog")
def view_eventlog(self) -> None:
"""View the options editor."""
self.master.switch_view("eventlog")
@command.command("console.view.help")
def view_help(self) -> None:
"""View help."""
self.master.switch_view("help")
@command.command("console.view.flow")
def view_flow(self, flow: flow.Flow) -> None:
"""View a flow."""
if hasattr(flow, "request"):
# FIME: Also set focus?
self.master.switch_view("flowview")
@command.command("console.exit")
def exit(self) -> None:
"""Exit mitmproxy."""
self.master.shutdown()
@command.command("console.view.pop")
def view_pop(self) -> None:
"""
Pop a view off the console stack. At the top level, this prompts the
user to exit mitmproxy.
"""
signals.pop_view_state.send(self)
@command.command("console.bodyview")
@command.argument("part", type=mitmproxy.types.Choice("console.bodyview.options"))
def bodyview(self, f: flow.Flow, part: str) -> None:
"""
Spawn an external viewer for a flow request or response body based
on the detected MIME type. We use the mailcap system to find the
correct viewier, and fall back to the programs in $PAGER or $EDITOR
if necessary.
"""
fpart = getattr(f, part, None)
if not fpart:
raise exceptions.CommandError("Part must be either request or response, not %s." % part)
t = fpart.headers.get("content-type")
content = fpart.get_content(strict=False)
if not content:
raise exceptions.CommandError("No content to view.")
self.master.spawn_external_viewer(content, t)
@command.command("console.bodyview.options")
def bodyview_options(self) -> typing.Sequence[str]:
"""
Possible parts for console.bodyview.
"""
return ["request", "response"]
@command.command("console.edit.focus.options")
def edit_focus_options(self) -> typing.Sequence[str]:
"""
Possible components for console.edit.focus.
"""
return [
"cookies",
"form",
"path",
"method",
"query",
"reason",
"request-headers",
"response-headers",
"request-body",
"response-body",
"status_code",
"set-cookies",
"url",
]
@command.command("console.edit.focus")
@command.argument("part", type=mitmproxy.types.Choice("console.edit.focus.options"))
def edit_focus(self, part: str) -> None:
"""
Edit a component of the currently focused flow.
"""
flow = self.master.view.focus.flow
# This shouldn't be necessary once this command is "console.edit @focus",
# but for now it is.
if not flow:
raise exceptions.CommandError("No flow selected.")
require_dummy_response = (
part in ("response-headers", "response-body", "set-cookies") and
flow.response is None
)
if require_dummy_response:
flow.response = http.HTTPResponse.make()
if part == "cookies":
self.master.switch_view("edit_focus_cookies")
elif part == "form":
self.master.switch_view("edit_focus_form")
elif part == "path":
self.master.switch_view("edit_focus_path")
elif part == "query":
self.master.switch_view("edit_focus_query")
elif part == "request-headers":
self.master.switch_view("edit_focus_request_headers")
elif part == "response-headers":
self.master.switch_view("edit_focus_response_headers")
elif part in ("request-body", "response-body"):
if part == "request-body":
message = flow.request
else:
message = flow.response
c = self.master.spawn_editor(message.get_content(strict=False) or b"")
# Fix an issue caused by some editors when editing a
# request/response body. Many editors make it hard to save a
# file without a terminating newline on the last line. When
# editing message bodies, this can cause problems. For now, I
# just strip the newlines off the end of the body when we return
# from an editor.
message.content = c.rstrip(b"\n")
elif part == "set-cookies":
self.master.switch_view("edit_focus_setcookies")
elif part in ["url", "method", "status_code", "reason"]:
self.master.commands.call(
"console.command flow.set @focus %s " % part
)
def _grideditor(self):
gewidget = self.master.window.current("grideditor")
if not gewidget:
raise exceptions.CommandError("Not in a grideditor.")
return gewidget.key_responder()
@command.command("console.grideditor.add")
def grideditor_add(self) -> None:
"""
Add a row after the cursor.
"""
self._grideditor().cmd_add()
@command.command("console.grideditor.insert")
def grideditor_insert(self) -> None:
"""
Insert a row before the cursor.
"""
self._grideditor().cmd_insert()
@command.command("console.grideditor.delete")
def grideditor_delete(self) -> None:
"""
Delete row
"""
self._grideditor().cmd_delete()
@command.command("console.grideditor.load")
def grideditor_load(self, path: mitmproxy.types.Path) -> None:
"""
Read a file into the currrent cell.
"""
self._grideditor().cmd_read_file(path)
@command.command("console.grideditor.load_escaped")
def grideditor_load_escaped(self, path: mitmproxy.types.Path) -> None:
"""
Read a file containing a Python-style escaped string into the
currrent cell.
"""
self._grideditor().cmd_read_file_escaped(path)
@command.command("console.grideditor.save")
def grideditor_save(self, path: mitmproxy.types.Path) -> None:
"""
Save data to file as a CSV.
"""
rows = self._grideditor().value
try:
with open(path, "w", newline='', encoding="utf8") as fp:
writer = csv.writer(fp)
for row in rows:
writer.writerow(
[strutils.always_str(x) or "" for x in row] # type: ignore
)
ctx.log.alert("Saved %s rows as CSV." % (len(rows)))
except IOError as e:
ctx.log.error(str(e))
@command.command("console.grideditor.editor")
def grideditor_editor(self) -> None:
"""
Spawn an external editor on the current cell.
"""
self._grideditor().cmd_spawn_editor()
@command.command("console.flowview.mode.set")
@command.argument("mode", type=mitmproxy.types.Choice("console.flowview.mode.options"))
def flowview_mode_set(self, mode: str) -> None:
"""
Set the display mode for the current flow view.
"""
fv = self.master.window.current_window("flowview")
if not fv:
raise exceptions.CommandError("Not viewing a flow.")
idx = fv.body.tab_offset
if mode not in [i.name.lower() for i in contentviews.views]:
raise exceptions.CommandError("Invalid flowview mode.")
try:
self.master.commands.call_args(
"view.setval",
["@focus", "flowview_mode_%s" % idx, mode]
)
except exceptions.CommandError as e:
signals.status_message.send(message=str(e))
@command.command("console.flowview.mode.options")
def flowview_mode_options(self) -> typing.Sequence[str]:
"""
Returns the valid options for the flowview mode.
"""
return [i.name.lower() for i in contentviews.views]
@command.command("console.flowview.mode")
def flowview_mode(self) -> str:
"""
Get the display mode for the current flow view.
"""
fv = self.master.window.current_window("flowview")
if not fv:
raise exceptions.CommandError("Not viewing a flow.")
idx = fv.body.tab_offset
return self.master.commands.call_args(
"view.getval",
[
"@focus",
"flowview_mode_%s" % idx,
self.master.options.default_contentview,
]
)
@command.command("console.key.contexts")
def key_contexts(self) -> typing.Sequence[str]:
"""
The available contexts for key binding.
"""
return list(sorted(keymap.Contexts))
@command.command("console.key.bind")
def key_bind(
self,
contexts: typing.Sequence[str],
key: str,
cmd: mitmproxy.types.Cmd,
*args: mitmproxy.types.Arg
) -> None:
"""
Bind a shortcut key.
"""
try:
self.master.keymap.add(
key,
cmd + " " + " ".join(args),
contexts,
""
)
except ValueError as v:
raise exceptions.CommandError(v)
@command.command("console.key.unbind")
def key_unbind(self, contexts: typing.Sequence[str], key: str) -> None:
"""
Un-bind a shortcut key.
"""
try:
self.master.keymap.remove(key, contexts)
except ValueError as v:
raise exceptions.CommandError(v)
def _keyfocus(self):
kwidget = self.master.window.current("keybindings")
if not kwidget:
raise exceptions.CommandError("Not viewing key bindings.")
f = kwidget.get_focused_binding()
if not f:
raise exceptions.CommandError("No key binding focused")
return f
@command.command("console.key.unbind.focus")
def key_unbind_focus(self) -> None:
"""
Un-bind the shortcut key currently focused in the key binding viewer.
"""
b = self._keyfocus()
try:
self.master.keymap.remove(b.key, b.contexts)
except ValueError as v:
raise exceptions.CommandError(v)
@command.command("console.key.execute.focus")
def key_execute_focus(self) -> None:
"""
Execute the currently focused key binding.
"""
b = self._keyfocus()
self.console_command(b.command)
@command.command("console.key.edit.focus")
def key_edit_focus(self) -> None:
"""
Execute the currently focused key binding.
"""
b = self._keyfocus()
self.console_command(
"console.key.bind",
",".join(b.contexts),
b.key,
b.command,
)
def running(self):
self.started = True
def update(self, flows):
if not flows:
signals.update_settings.send(self)
for f in flows:
signals.flow_change.send(self, flow=f)
| 1 | 13,881 | Nit: Don't move it between `require_dummy_response` definition and usage, this can live above or below :) | mitmproxy-mitmproxy | py |
@@ -377,6 +377,17 @@ type Local struct {
// connections that are originating from the local machine. Setting this to "true", allow to create large
// local-machine networks that won't trip the incoming connection limit observed by relays.
DisableLocalhostConnectionRateLimit bool `version[16]:"true"`
+
+ // BlockServiceCustomFallbackEndpoints is a comma delimited list of endpoints which the block service uses to
+ // redirect the http requests to in case it does not have the round. If it is not specified, will check
+ // EnableBlockServiceFallbackToArchiver.
+ BlockServiceCustomFallbackEndpoints string `version[16]:""`
+
+ // EnableBlockServiceFallbackToArchiver controls whether the block service redirects the http requests to
+ // an archiver or return StatusNotFound (404) when in does not have the requested round, and
+ // BlockServiceCustomFallbackEndpoints is empty.
+ // The archiver is randomly selected, if none is available, will return StatusNotFound (404).
+ EnableBlockServiceFallbackToArchiver bool `version[16]:"true"`
}
// Filenames of config files within the configdir (e.g. ~/.algorand) | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package config
import (
"encoding/json"
"errors"
"io"
"os"
"os/user"
"path/filepath"
"strings"
"time"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/codecs"
)
// Devnet identifies the 'development network' use for development and not generally accessible publicly
const Devnet protocol.NetworkID = "devnet"
// Betanet identifies the 'beta network' use for early releases of feature to the public prior to releasing these to mainnet/testnet
const Betanet protocol.NetworkID = "betanet"
// Devtestnet identifies the 'development network for tests' use for running tests against development and not generally accessible publicly
const Devtestnet protocol.NetworkID = "devtestnet"
// Testnet identifies the publicly-available test network
const Testnet protocol.NetworkID = "testnet"
// Mainnet identifies the publicly-available real-money network
const Mainnet protocol.NetworkID = "mainnet"
// GenesisJSONFile is the name of the genesis.json file
const GenesisJSONFile = "genesis.json"
// Local holds the per-node-instance configuration settings for the protocol.
// !!! WARNING !!!
//
// These versioned struct tags need to be maintained CAREFULLY and treated
// like UNIVERSAL CONSTANTS - they should not be modified once committed.
//
// New fields may be added to the Local struct, along with a version tag
// denoting a new version. When doing so, also update the
// test/testdata/configs/config-v{n}.json and call "make generate" to regenerate the constants.
//
// !!! WARNING !!!
type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16"`
// environmental (may be overridden)
// When enabled, stores blocks indefinitally, otherwise, only the most recents blocks
// are being kept around. ( the precise number of recent blocks depends on the consensus parameters )
Archival bool `version[0]:"false"`
// gossipNode.go
// how many peers to propagate to?
GossipFanout int `version[0]:"4"`
NetAddress string `version[0]:""`
// 1 * time.Minute = 60000000000 ns
ReconnectTime time.Duration `version[0]:"60" version[1]:"60000000000"`
// what we should tell people to connect to
PublicAddress string `version[0]:""`
MaxConnectionsPerIP int `version[3]:"30"`
// 0 == disable
PeerPingPeriodSeconds int `version[0]:"0"`
// for https serving
TLSCertFile string `version[0]:""`
TLSKeyFile string `version[0]:""`
// Logging
BaseLoggerDebugLevel uint32 `version[0]:"1" version[1]:"4"`
// if this is 0, do not produce agreement.cadaver
CadaverSizeTarget uint64 `version[0]:"1073741824"`
// IncomingConnectionsLimit specifies the max number of long-lived incoming
// connections. 0 means no connections allowed. -1 is unbounded.
IncomingConnectionsLimit int `version[0]:"-1" version[1]:"10000"`
// BroadcastConnectionsLimit specifies the number of connections that
// will receive broadcast (gossip) messages from this node. If the
// node has more connections than this number, it will send broadcasts
// to the top connections by priority (outgoing connections first, then
// by money held by peers based on their participation key). 0 means
// no outgoing messages (not even transaction broadcasting to outgoing
// peers). -1 means unbounded (default).
BroadcastConnectionsLimit int `version[4]:"-1"`
// AnnounceParticipationKey specifies that this node should announce its
// participation key (with the largest stake) to its gossip peers. This
// allows peers to prioritize our connection, if necessary, in case of a
// DoS attack. Disabling this means that the peers will not have any
// additional information to allow them to prioritize our connection.
AnnounceParticipationKey bool `version[4]:"true"`
// PriorityPeers specifies peer IP addresses that should always get
// outgoing broadcast messages from this node.
PriorityPeers map[string]bool `version[4]:""`
// To make sure the algod process does not run out of FDs, algod ensures
// that RLIMIT_NOFILE exceeds the max number of incoming connections (i.e.,
// IncomingConnectionsLimit) by at least ReservedFDs. ReservedFDs are meant
// to leave room for short-lived FDs like DNS queries, SQLite files, etc.
ReservedFDs uint64 `version[2]:"256"`
// local server
// API endpoint address
EndpointAddress string `version[0]:"127.0.0.1:0"`
// timeouts passed to the rest http.Server implementation
RestReadTimeoutSeconds int `version[4]:"15"`
RestWriteTimeoutSeconds int `version[4]:"120"`
// SRV-based phonebook
DNSBootstrapID string `version[0]:"<network>.algorand.network"`
// Log file size limit in bytes
LogSizeLimit uint64 `version[0]:"1073741824"`
// text/template for creating log archive filename.
// Available template vars:
// Time at start of log: {{.Year}} {{.Month}} {{.Day}} {{.Hour}} {{.Minute}} {{.Second}}
// Time at end of log: {{.EndYear}} {{.EndMonth}} {{.EndDay}} {{.EndHour}} {{.EndMinute}} {{.EndSecond}}
//
// If the filename ends with .gz or .bz2 it will be compressed.
//
// default: "node.archive.log" (no rotation, clobbers previous archive)
LogArchiveName string `version[4]:"node.archive.log"`
// LogArchiveMaxAge will be parsed by time.ParseDuration().
// Valid units are 's' seconds, 'm' minutes, 'h' hours
LogArchiveMaxAge string `version[4]:""`
// number of consecutive attempts to catchup after which we replace the peers we're connected to
CatchupFailurePeerRefreshRate int `version[0]:"10"`
// where should the node exporter listen for metrics
NodeExporterListenAddress string `version[0]:":9100"`
// enable metric reporting flag
EnableMetricReporting bool `version[0]:"false"`
// enable top accounts reporting flag
EnableTopAccountsReporting bool `version[0]:"false"`
// enable agreement reporting flag. Currently only prints additional period events.
EnableAgreementReporting bool `version[3]:"false"`
// enable agreement timing metrics flag
EnableAgreementTimeMetrics bool `version[3]:"false"`
// The path to the node exporter.
NodeExporterPath string `version[0]:"./node_exporter"`
// The fallback DNS resolver address that would be used if the system resolver would fail to retrieve SRV records
FallbackDNSResolverAddress string `version[0]:""`
// exponential increase factor of transaction pool's fee threshold, should always be 2 in production
TxPoolExponentialIncreaseFactor uint64 `version[0]:"2"`
SuggestedFeeBlockHistory int `version[0]:"3"`
// TxPoolSize is the number of transactions that fit in the transaction pool
TxPoolSize int `version[0]:"50000" version[5]:"15000"`
// number of seconds allowed for syncing transactions
TxSyncTimeoutSeconds int64 `version[0]:"30"`
// number of seconds between transaction synchronizations
TxSyncIntervalSeconds int64 `version[0]:"60"`
// the number of incoming message hashes buckets.
IncomingMessageFilterBucketCount int `version[0]:"5"`
// the size of each incoming message hash bucket.
IncomingMessageFilterBucketSize int `version[0]:"512"`
// the number of outgoing message hashes buckets.
OutgoingMessageFilterBucketCount int `version[0]:"3"`
// the size of each outgoing message hash bucket.
OutgoingMessageFilterBucketSize int `version[0]:"128"`
// enable the filtering of outgoing messages
EnableOutgoingNetworkMessageFiltering bool `version[0]:"true"`
// enable the filtering of incoming messages
EnableIncomingMessageFilter bool `version[0]:"false"`
// control enabling / disabling deadlock detection.
// negative (-1) to disable, positive (1) to enable, 0 for default.
DeadlockDetection int `version[1]:"0"`
// Prefer to run algod Hosted (under algoh)
// Observed by `goal` for now.
RunHosted bool `version[3]:"false"`
// The maximal number of blocks that catchup will fetch in parallel.
// If less than Protocol.SeedLookback, then Protocol.SeedLookback will be used as to limit the catchup.
// Setting this variable to 0 would disable the catchup
CatchupParallelBlocks uint64 `version[3]:"50" version[5]:"16"`
// Generate AssembleBlockMetrics telemetry event
EnableAssembleStats bool `version[0]:""`
// Generate ProcessBlockMetrics telemetry event
EnableProcessBlockStats bool `version[0]:""`
// SuggestedFeeSlidingWindowSize is number of past blocks that will be considered in computing the suggested fee
SuggestedFeeSlidingWindowSize uint32 `version[3]:"50"`
// the max size the sync server would return
TxSyncServeResponseSize int `version[3]:"1000000"`
// IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions
// Note -- Indexer cannot operate on non Archival nodes
IsIndexerActive bool `version[3]:"false"`
// UseXForwardedForAddress indicates whether or not the node should use the X-Forwarded-For HTTP Header when
// determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the
// proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header
// field can be used.
UseXForwardedForAddressField string `version[0]:""`
// ForceRelayMessages indicates whether the network library relay messages even in the case that no NetAddress was specified.
ForceRelayMessages bool `version[0]:"false"`
// ConnectionsRateLimitingWindowSeconds is being used in conjunction with ConnectionsRateLimitingCount;
// see ConnectionsRateLimitingCount description for further information. Providing a zero value
// in this variable disables the connection rate limiting.
ConnectionsRateLimitingWindowSeconds uint `version[4]:"1"`
// ConnectionsRateLimitingCount is being used along with ConnectionsRateLimitingWindowSeconds to determine if
// a connection request should be accepted or not. The gossip network examine all the incoming requests in the past
// ConnectionsRateLimitingWindowSeconds seconds that share the same origin. If the total count exceed the ConnectionsRateLimitingCount
// value, the connection is refused.
ConnectionsRateLimitingCount uint `version[4]:"60"`
// EnableRequestLogger enabled the logging of the incoming requests to the telemetry server.
EnableRequestLogger bool `version[4]:"false"`
// PeerConnectionsUpdateInterval defines the interval at which the peer connections information is being sent to the
// telemetry ( when enabled ). Defined in seconds.
PeerConnectionsUpdateInterval int `version[5]:"3600"`
// EnableProfiler enables the go pprof endpoints, should be false if
// the algod api will be exposed to untrusted individuals
EnableProfiler bool `version[0]:"false"`
// TelemetryToLog records messages to node.log that are normally sent to remote event monitoring
TelemetryToLog bool `version[5]:"true"`
// DNSSecurityFlags instructs algod validating DNS responses.
// Possible fla values
// 0x00 - disabled
// 0x01 (dnssecSRV) - validate SRV response
// 0x02 (dnssecRelayAddr) - validate relays' names to addresses resolution
// 0x04 (dnssecTelemetryAddr) - validate telemetry and metrics names to addresses resolution
// ...
DNSSecurityFlags uint32 `version[6]:"1"`
// EnablePingHandler controls whether the gossip node would respond to ping messages with a pong message.
EnablePingHandler bool `version[6]:"true"`
// DisableOutgoingConnectionThrottling disables the connection throttling of the network library, which
// allow the network library to continuesly disconnect relays based on their relative ( and absolute ) performance.
DisableOutgoingConnectionThrottling bool `version[5]:"false"`
// NetworkProtocolVersion overrides network protocol version ( if present )
NetworkProtocolVersion string `version[6]:""`
// CatchpointInterval sets the interval at which catchpoint are being generated. Setting this to 0 disables the catchpoint from being generated.
// See CatchpointTracking for more details.
CatchpointInterval uint64 `version[7]:"10000"`
// CatchpointFileHistoryLength defines how many catchpoint files we want to store back.
// 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
CatchpointFileHistoryLength int `version[7]:"365"`
// EnableLedgerService enables the ledger serving service. The functionality of this depends on NetAddress, which must also be provided.
// This functionality is required for the catchpoint catchup.
EnableLedgerService bool `version[7]:"false"`
// EnableBlockService enables the block serving service. The functionality of this depends on NetAddress, which must also be provided.
// This functionality is required for the catchup.
EnableBlockService bool `version[7]:"false"`
// EnableGossipBlockService enables the block serving service over the gossip network. The functionality of this depends on NetAddress, which must also be provided.
// This functionality is required for the relays to perform catchup from nodes.
EnableGossipBlockService bool `version[8]:"true"`
// CatchupHTTPBlockFetchTimeoutSec controls how long the http query for fetching a block from a relay would take before giving up and trying another relay.
CatchupHTTPBlockFetchTimeoutSec int `version[9]:"4"`
// CatchupGossipBlockFetchTimeoutSec controls how long the gossip query for fetching a block from a relay would take before giving up and trying another relay.
CatchupGossipBlockFetchTimeoutSec int `version[9]:"4"`
// CatchupLedgerDownloadRetryAttempts controls the number of attempt the ledger fetching would be attempted before giving up catching up to the provided catchpoint.
CatchupLedgerDownloadRetryAttempts int `version[9]:"50"`
// CatchupLedgerDownloadRetryAttempts controls the number of attempt the block fetching would be attempted before giving up catching up to the provided catchpoint.
CatchupBlockDownloadRetryAttempts int `version[9]:"1000"`
// EnableDeveloperAPI enables teal/compile, teal/dryrun API endpoints.
// This functionlity is disabled by default.
EnableDeveloperAPI bool `version[9]:"false"`
// OptimizeAccountsDatabaseOnStartup controls whether the accounts database would be optimized
// on algod startup.
OptimizeAccountsDatabaseOnStartup bool `version[10]:"false"`
// CatchpointTracking determines if catchpoints are going to be tracked. The value is interpreted as follows:
// A value of -1 means "don't track catchpoints".
// A value of 1 means "track catchpoints as long as CatchpointInterval is also set to a positive non-zero value". If CatchpointInterval <= 0, no catchpoint tracking would be performed.
// A value of 0 means automatic, which is the default value. In this mode, a non archival node would not track the catchpoints, and an archival node would track the catchpoints as long as CatchpointInterval > 0.
// Other values of CatchpointTracking would give a warning in the log file, and would behave as if the default value was provided.
CatchpointTracking int64 `version[11]:"0"`
// LedgerSynchronousMode defines the synchronous mode used by the ledger database. The supported options are:
// 0 - SQLite continues without syncing as soon as it has handed data off to the operating system.
// 1 - SQLite database engine will still sync at the most critical moments, but less often than in FULL mode.
// 2 - SQLite database engine will use the xSync method of the VFS to ensure that all content is safely written to the disk surface prior to continuing. On Mac OS, the data is additionally syncronized via fullfsync.
// 3 - In addition to what being done in 2, it provides additional durability if the commit is followed closely by a power loss.
// for further information see the description of SynchronousMode in dbutil.go
LedgerSynchronousMode int `version[12]:"2"`
// AccountsRebuildSynchronousMode defines the synchronous mode used by the ledger database while the account database is being rebuilt. This is not a typical operational usecase,
// and is expected to happen only on either startup ( after enabling the catchpoint interval, or on certain database upgrades ) or during fast catchup. The values specified here
// and their meanings are identical to the ones in LedgerSynchronousMode.
AccountsRebuildSynchronousMode int `version[12]:"1"`
// MaxCatchpointDownloadDuration defines the maximum duration a client will be keeping the outgoing connection of a catchpoint download request open for processing before
// shutting it down. Networks that have large catchpoint files, slow connection or slow storage could be a good reason to increase this value. Note that this is a client-side only
// configuration value, and it's independent of the actual catchpoint file size.
MaxCatchpointDownloadDuration time.Duration `version[13]:"7200000000000"`
// MinCatchpointFileDownloadBytesPerSecond defines the minimal download speed that would be considered to be "acceptable" by the catchpoint file fetcher, measured in bytes per seconds. If the
// provided stream speed drops below this threshold, the connection would be recycled. Note that this field is evaluated per catchpoint "chunk" and not on it's own. If this field is zero,
// the default of 20480 would be used.
MinCatchpointFileDownloadBytesPerSecond uint64 `version[13]:"20480"`
// TraceServer is a host:port to report graph propagation trace info to.
NetworkMessageTraceServer string `version[13]:""`
// VerifiedTranscationsCacheSize defines the number of transactions that the verified transactions cache would hold before cycling the cache storage in a round-robin fashion.
VerifiedTranscationsCacheSize int `version[14]:"30000"`
// EnableCatchupFromArchiveServers controls which peers the catchup service would use in order to catchup.
// When enabled, the catchup service would use the archive servers before falling back to the relays.
// On networks that doesn't have archive servers, this becomes a no-op, as the catchup service would have no
// archive server to pick from, and therefore automatically selects one of the relay nodes.
EnableCatchupFromArchiveServers bool `version[15]:"false"`
// DisableLocalhostConnectionRateLimit controls whether the incoming connection rate limit would apply for
// connections that are originating from the local machine. Setting this to "true", allow to create large
// local-machine networks that won't trip the incoming connection limit observed by relays.
DisableLocalhostConnectionRateLimit bool `version[16]:"true"`
}
// Filenames of config files within the configdir (e.g. ~/.algorand)
// ConfigFilename is the name of the config.json file where we store per-algod-instance settings
const ConfigFilename = "config.json"
// PhonebookFilename is the name of the phonebook configuration files - no longer used
const PhonebookFilename = "phonebook.json" // No longer used in product - still in tests
// LedgerFilenamePrefix is the prefix of the name of the ledger database files
const LedgerFilenamePrefix = "ledger"
// CrashFilename is the name of the agreement database file.
// It is used to recover from node crashes.
const CrashFilename = "crash.sqlite"
// CompactCertFilename is the name of the compact certificate database file.
// It is used to track in-progress compact certificates.
const CompactCertFilename = "compactcert.sqlite"
// ConfigurableConsensusProtocolsFilename defines a set of consensus prototocols that
// are to be loaded from the data directory ( if present ), to override the
// built-in supported consensus protocols.
const ConfigurableConsensusProtocolsFilename = "consensus.json"
// LoadConfigFromDisk returns a Local config structure based on merging the defaults
// with settings loaded from the config file from the custom dir. If the custom file
// cannot be loaded, the default config is returned (with the error from loading the
// custom file).
func LoadConfigFromDisk(custom string) (c Local, err error) {
return loadConfigFromFile(filepath.Join(custom, ConfigFilename))
}
func loadConfigFromFile(configFile string) (c Local, err error) {
c = defaultLocal
c.Version = 0 // Reset to 0 so we get the version from the loaded file.
c, err = mergeConfigFromFile(configFile, c)
if err != nil {
return
}
// Migrate in case defaults were changed
// If a config file does not have version, it is assumed to be zero.
// All fields listed in migrate() might be changed if an actual value matches to default value from a previous version.
c, err = migrate(c)
return
}
// GetDefaultLocal returns a copy of the current defaultLocal config
func GetDefaultLocal() Local {
return defaultLocal
}
func mergeConfigFromDir(root string, source Local) (Local, error) {
return mergeConfigFromFile(filepath.Join(root, ConfigFilename), source)
}
func mergeConfigFromFile(configpath string, source Local) (Local, error) {
f, err := os.Open(configpath)
if err != nil {
return source, err
}
defer f.Close()
err = loadConfig(f, &source)
// For now, all relays (listening for incoming connections) are also Archival
// We can change this logic in the future, but it's currently the sanest default.
if source.NetAddress != "" {
source.Archival = true
source.EnableLedgerService = true
source.EnableBlockService = true
}
return source, err
}
func loadConfig(reader io.Reader, config *Local) error {
dec := json.NewDecoder(reader)
return dec.Decode(config)
}
// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) (bootstrapArray []string) {
dnsBootstrapString := cfg.DNSBootstrap(networkID)
bootstrapArray = strings.Split(dnsBootstrapString, ";")
// omit zero length entries from the result set.
for i := len(bootstrapArray) - 1; i >= 0; i-- {
if len(bootstrapArray[i]) == 0 {
bootstrapArray = append(bootstrapArray[:i], bootstrapArray[i+1:]...)
}
}
return
}
// DNSBootstrap returns the network-specific DNSBootstrap identifier
func (cfg Local) DNSBootstrap(network protocol.NetworkID) string {
// if user hasn't modified the default DNSBootstrapID in the configuration
// file and we're targeting a devnet ( via genesis file ), we the
// explicit devnet network bootstrap.
if defaultLocal.DNSBootstrapID == cfg.DNSBootstrapID {
if network == Devnet {
return "devnet.algodev.network"
} else if network == Betanet {
return "betanet.algodev.network"
}
}
return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1)
}
// SaveToDisk writes the Local settings into a root/ConfigFilename file
func (cfg Local) SaveToDisk(root string) error {
configpath := filepath.Join(root, ConfigFilename)
filename := os.ExpandEnv(configpath)
return cfg.SaveToFile(filename)
}
// SaveToFile saves the config to a specific filename, allowing overriding the default name
func (cfg Local) SaveToFile(filename string) error {
var alwaysInclude []string
alwaysInclude = append(alwaysInclude, "Version")
return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude, true)
}
type phonebookBlackWhiteList struct {
Include []string
}
// LoadPhonebook returns a phonebook loaded from the provided directory, if it exists.
// NOTE: We no longer use phonebook for anything but tests, but users should be able to use it
func LoadPhonebook(datadir string) ([]string, error) {
var entries []string
path := filepath.Join(datadir, PhonebookFilename)
f, rootErr := os.Open(path)
if rootErr != nil {
if !os.IsNotExist(rootErr) {
return nil, rootErr
}
} else {
defer f.Close()
phonebook := phonebookBlackWhiteList{}
dec := json.NewDecoder(f)
err := dec.Decode(&phonebook)
if err != nil {
return nil, errors.New("error decoding phonebook! got error: " + err.Error())
}
entries = phonebook.Include
}
// get an initial list of peers
return entries, rootErr
}
// SavePhonebookToDisk writes the phonebook into a root/PhonebookFilename file
func SavePhonebookToDisk(entries []string, root string) error {
configpath := filepath.Join(root, PhonebookFilename)
f, err := os.OpenFile(os.ExpandEnv(configpath), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err == nil {
defer f.Close()
err = savePhonebook(entries, f)
}
return err
}
func savePhonebook(entries []string, w io.Writer) error {
pb := phonebookBlackWhiteList{
Include: entries,
}
enc := codecs.NewFormattedJSONEncoder(w)
return enc.Encode(pb)
}
var globalConfigFileRoot string
// GetConfigFilePath retrieves the full path to a configuration file
// These are global configurations - not specific to data-directory / network.
func GetConfigFilePath(file string) (string, error) {
rootPath, err := GetGlobalConfigFileRoot()
if err != nil {
return "", err
}
return filepath.Join(rootPath, file), nil
}
// GetGlobalConfigFileRoot returns the current root folder for global configuration files.
// This will likely only change for tests.
func GetGlobalConfigFileRoot() (string, error) {
var err error
if globalConfigFileRoot == "" {
globalConfigFileRoot, err = GetDefaultConfigFilePath()
if err == nil {
dirErr := os.Mkdir(globalConfigFileRoot, os.ModePerm)
if !os.IsExist(dirErr) {
err = dirErr
}
}
}
return globalConfigFileRoot, err
}
// SetGlobalConfigFileRoot allows overriding the root folder for global configuration files.
// It returns the current one so it can be restored, if desired.
// This will likely only change for tests.
func SetGlobalConfigFileRoot(rootPath string) string {
currentRoot := globalConfigFileRoot
globalConfigFileRoot = rootPath
return currentRoot
}
// GetDefaultConfigFilePath retrieves the default directory for global (not per-instance) config files
// By default we store in ~/.algorand/.
// This will likely only change for tests.
func GetDefaultConfigFilePath() (string, error) {
currentUser, err := user.Current()
if err != nil {
return "", err
}
if currentUser.HomeDir == "" {
return "", errors.New("GetDefaultConfigFilePath fail - current user has no home directory")
}
return filepath.Join(currentUser.HomeDir, ".algorand"), nil
}
const (
dnssecSRV = 1 << iota
dnssecRelayAddr
dnssecTelemetryAddr
)
// DNSSecuritySRVEnforced returns true if SRV response verification enforced
func (cfg Local) DNSSecuritySRVEnforced() bool {
return cfg.DNSSecurityFlags&dnssecSRV != 0
}
// DNSSecurityRelayAddrEnforced returns true if relay name to ip addr resolution enforced
func (cfg Local) DNSSecurityRelayAddrEnforced() bool {
return cfg.DNSSecurityFlags&dnssecRelayAddr != 0
}
// DNSSecurityTelemeryAddrEnforced returns true if relay name to ip addr resolution enforced
func (cfg Local) DNSSecurityTelemeryAddrEnforced() bool {
return cfg.DNSSecurityFlags&dnssecTelemetryAddr != 0
}
// ProposalAssemblyTime is the max amount of time to spend on generating a proposal block. This should eventually have it's own configurable value.
const ProposalAssemblyTime time.Duration = 250 * time.Millisecond
| 1 | 42,221 | With the current code, it not work if `EnableCatchupFromArchiveServers` is disabled. to fix it: in getDNSAddrs, change the predicate to `if wn.config.EnableCatchupFromArchiveServers || wn.config. EnableCatchupFromArchiveServers {` and in the catchup/service.go and catchup/catchpointService.go, use the `PeersPhonebookArchivers` only when `wn.config.EnableCatchupFromArchiveServers` is enabled. | algorand-go-algorand | go |
@@ -910,8 +910,9 @@ func (cr *ConflictResolver) resolveMergedPaths(ctx context.Context,
// cache for the merged branch.
mergedNodeCache := newNodeCacheStandard(cr.fbo.folderBranch)
// Initialize the root node. There will always be at least one
- // unmerged path.
- mergedNodeCache.GetOrCreate(mergedChains.mostRecentMD.data.Dir.BlockPointer,
+ // unmerged path. Hold it here so it doesn't get gc'd.
+ rootNode, _ := mergedNodeCache.GetOrCreate(
+ mergedChains.mostRecentMD.data.Dir.BlockPointer,
unmergedPaths[0].path[0].Name, nil)
newPtrs := make(map[BlockPointer]bool) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"encoding/json"
"fmt"
"sort"
"strings"
"sync"
"github.com/keybase/client/go/logger"
keybase1 "github.com/keybase/client/go/protocol"
"golang.org/x/net/context"
)
// CtxCRTagKey is the type used for unique context tags related to
// conflict resolution
type CtxCRTagKey int
const (
// CtxCRIDKey is the type of the tag for unique operation IDs
// related to conflict resolution
CtxCRIDKey CtxCRTagKey = iota
)
// CtxCROpID is the display name for the unique operation
// conflict resolution ID tag.
const CtxCROpID = "CRID"
type conflictInput struct {
unmerged MetadataRevision
merged MetadataRevision
}
// ConflictResolver is responsible for resolving conflicts in the
// background.
type ConflictResolver struct {
config Config
fbo *folderBranchOps
log logger.Logger
inputChanLock sync.RWMutex
inputChan chan conflictInput
// resolveGroup tracks the outstanding resolves.
resolveGroup RepeatedWaitGroup
inputLock sync.Mutex
currInput conflictInput
}
// NewConflictResolver constructs a new ConflictResolver (and launches
// any necessary background goroutines).
func NewConflictResolver(
config Config, fbo *folderBranchOps) *ConflictResolver {
// make a logger with an appropriate module name
branchSuffix := ""
if fbo.branch() != MasterBranch {
branchSuffix = " " + string(fbo.branch())
}
tlfStringFull := fbo.id().String()
log := config.MakeLogger(fmt.Sprintf("CR %s%s", tlfStringFull[:8],
branchSuffix))
cr := &ConflictResolver{
config: config,
fbo: fbo,
log: log,
currInput: conflictInput{
unmerged: MetadataRevisionUninitialized,
merged: MetadataRevisionUninitialized,
},
}
cr.startProcessing()
return cr
}
func (cr *ConflictResolver) startProcessing() {
cr.inputChanLock.Lock()
defer cr.inputChanLock.Unlock()
if cr.inputChan != nil {
return
}
cr.inputChan = make(chan conflictInput)
go cr.processInput(cr.inputChan)
}
func (cr *ConflictResolver) stopProcessing() {
cr.inputChanLock.Lock()
defer cr.inputChanLock.Unlock()
if cr.inputChan == nil {
return
}
close(cr.inputChan)
cr.inputChan = nil
}
// processInput processes conflict resolution jobs from the given
// channel until it is closed. This function uses a parameter for the
// channel instead of accessing cr.inputChan directly so that it
// doesn't have to hold inputChanLock.
func (cr *ConflictResolver) processInput(inputChan <-chan conflictInput) {
var cancel context.CancelFunc
var prevCRDone chan struct{}
defer func() {
if cancel != nil {
cancel()
}
}()
for ci := range inputChan {
ctx := ctxWithRandomID(context.Background(), CtxCRIDKey,
CtxCROpID, cr.log)
valid := func() bool {
cr.inputLock.Lock()
defer cr.inputLock.Unlock()
// The input is only interesting if one of the revisions
// is greater than what we've looked at to date.
if ci.unmerged <= cr.currInput.unmerged &&
ci.merged <= cr.currInput.merged {
return false
}
cr.log.CDebugf(ctx, "New conflict input %v following old "+
"input %v", ci, cr.currInput)
cr.currInput = ci
// cancel the existing conflict resolution (if any)
if cancel != nil {
cancel()
}
return true
}()
if !valid {
cr.log.CDebugf(ctx, "Ignoring uninteresting input: %v", ci)
cr.resolveGroup.Done()
continue
}
var waitChan chan struct{}
if cancel != nil {
waitChan = prevCRDone
}
ctx, cancel = context.WithCancel(ctx)
prevCRDone = make(chan struct{}) // closed when doResolve finishes
go func(ci conflictInput, done chan<- struct{}) {
defer cr.resolveGroup.Done()
defer close(done)
if waitChan != nil {
// Wait for the previous CR without blocking any
// Resolve callers, as that could result in deadlock
// (KBFS-1001).
select {
case <-waitChan:
case <-ctx.Done():
cr.log.CDebugf(ctx, "Resolution canceled before starting")
return
}
}
cr.doResolve(ctx, ci)
}(ci, prevCRDone)
}
}
// Resolve takes the latest known unmerged and merged revision
// numbers, and kicks off the resolution process.
func (cr *ConflictResolver) Resolve(unmerged MetadataRevision,
merged MetadataRevision) {
cr.inputChanLock.RLock()
defer cr.inputChanLock.RUnlock()
if cr.inputChan == nil {
return
}
cr.resolveGroup.Add(1)
cr.inputChan <- conflictInput{unmerged, merged}
}
// Wait blocks until the current set of submitted resolutions are
// complete (though not necessarily successful), or until the given
// context is canceled.
func (cr *ConflictResolver) Wait(ctx context.Context) error {
return cr.resolveGroup.Wait(ctx)
}
// Shutdown cancels any ongoing resolutions and stops any background
// goroutines.
func (cr *ConflictResolver) Shutdown() {
cr.stopProcessing()
}
// Pause cancels any ongoing resolutions and prevents any new ones from
// starting.
func (cr *ConflictResolver) Pause() {
cr.stopProcessing()
}
// Restart re-enables conflict resolution.
func (cr *ConflictResolver) Restart() {
cr.startProcessing()
}
func (cr *ConflictResolver) checkDone(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return nil
}
}
func (cr *ConflictResolver) getMDs(ctx context.Context, lState *lockState) (
unmerged []*RootMetadata, merged []*RootMetadata, err error) {
// first get all outstanding unmerged MDs for this device
branchPoint, unmerged, err := cr.fbo.getUnmergedMDUpdates(ctx, lState)
if err != nil {
return nil, nil, err
}
// now get all the merged MDs, starting from after the branch point
merged, err = getMergedMDUpdates(ctx, cr.fbo.config, cr.fbo.id(),
branchPoint+1)
if err != nil {
return nil, nil, err
}
// re-embed all the block changes
err = cr.fbo.reembedBlockChanges(ctx, lState, unmerged)
if err != nil {
return nil, nil, err
}
err = cr.fbo.reembedBlockChanges(ctx, lState, merged)
if err != nil {
return nil, nil, err
}
return unmerged, merged, nil
}
func (cr *ConflictResolver) updateCurrInput(ctx context.Context,
unmerged []*RootMetadata, merged []*RootMetadata) (err error) {
cr.inputLock.Lock()
defer cr.inputLock.Unlock()
// check done while holding the lock, so we know for sure if
// we've already been canceled and replaced by a new input.
err = cr.checkDone(ctx)
if err != nil {
return err
}
prevInput := cr.currInput
defer func() {
// reset the currInput if we get an error below
if err != nil {
cr.currInput = prevInput
}
}()
if len(unmerged) > 0 {
rev := unmerged[len(unmerged)-1].Revision
if rev < cr.currInput.unmerged {
return fmt.Errorf("Unmerged revision %d is lower than the "+
"expected unmerged revision %d", rev, cr.currInput.unmerged)
}
cr.currInput.unmerged = rev
}
if len(merged) > 0 {
rev := merged[len(merged)-1].Revision
if rev < cr.currInput.merged {
return fmt.Errorf("Merged revision %d is lower than the "+
"expected merged revision %d", rev, cr.currInput.merged)
}
cr.currInput.merged = rev
}
return nil
}
func (cr *ConflictResolver) makeChains(ctx context.Context,
unmerged []*RootMetadata, merged []*RootMetadata) (
unmergedChains *crChains, mergedChains *crChains, err error) {
unmergedChains, err = newCRChains(ctx, cr.config, unmerged)
if err != nil {
return nil, nil, err
}
mergedChains, err = newCRChains(ctx, cr.config, merged)
if err != nil {
return nil, nil, err
}
cr.fbo.status.setCRChains(unmergedChains, mergedChains)
return unmergedChains, mergedChains, nil
}
// A helper class that implements sort.Interface to sort paths by
// descending path length.
type crSortedPaths []path
// Len implements sort.Interface for crSortedPaths
func (sp crSortedPaths) Len() int {
return len(sp)
}
// Less implements sort.Interface for crSortedPaths
func (sp crSortedPaths) Less(i, j int) bool {
return len(sp[i].path) > len(sp[j].path)
}
// Swap implements sort.Interface for crSortedPaths
func (sp crSortedPaths) Swap(i, j int) {
sp[j], sp[i] = sp[i], sp[j]
}
// getPathsFromChains returns a sorted slice of most recent paths to
// all the nodes in the given CR chains that were directly modified
// during a branch, and which existed at both the start and the end of
// the branch. This represents the paths that need to be checked for
// conflicts. The paths are sorted by descending path length. It
// uses the corresponding node cache when looking up paths, which must
// at least contain the most recent root node of the branch. Note
// that if a path cannot be found, the corresponding chain is
// completely removed from the set of CR chains.
func (cr *ConflictResolver) getPathsFromChains(ctx context.Context,
chains *crChains, nodeCache NodeCache) ([]path, error) {
newPtrs := make(map[BlockPointer]bool)
var ptrs []BlockPointer
for ptr, chain := range chains.byMostRecent {
newPtrs[ptr] = true
// We only care about the paths for ptrs that are directly
// affected by operations and were live through the entire
// unmerged branch.
if len(chain.ops) > 0 && !chains.isCreated(chain.original) &&
!chains.isDeleted(chain.original) {
ptrs = append(ptrs, ptr)
}
}
nodeMap, err := cr.fbo.blocks.SearchForNodes(
ctx, nodeCache, ptrs, newPtrs, chains.mostRecentMD)
if err != nil {
return nil, err
}
paths := make([]path, 0, len(nodeMap))
for ptr, n := range nodeMap {
if n == nil {
cr.log.CDebugf(ctx, "Ignoring pointer with no found path: %v", ptr)
chains.removeChain(ptr)
continue
}
p := cr.fbo.nodeCache.PathFromNode(n)
if p.tailPointer() != ptr {
return nil, NodeNotFoundError{ptr}
}
paths = append(paths, p)
// update the unmerged final paths
chain, ok := chains.byMostRecent[ptr]
if !ok {
cr.log.CErrorf(ctx, "Couldn't find chain for found path: %v", ptr)
continue
}
for _, op := range chain.ops {
op.setFinalPath(p)
}
}
// Order by descending path length.
sort.Sort(crSortedPaths(paths))
return paths, nil
}
func fileWithConflictingWrite(unmergedChains *crChains, mergedChains *crChains,
unmergedOriginal BlockPointer, mergedOriginal BlockPointer) bool {
mergedChain := mergedChains.byOriginal[mergedOriginal]
unmergedChain := unmergedChains.byOriginal[unmergedOriginal]
if mergedChain != nil && unmergedChain != nil {
mergedSync := false
unmergedSync := false
for _, op := range mergedChain.ops {
if _, ok := op.(*syncOp); ok {
mergedSync = true
break
}
}
for _, op := range unmergedChain.ops {
if _, ok := op.(*syncOp); ok {
unmergedSync = true
break
}
}
return mergedSync && unmergedSync
}
return false
}
// checkPathForMerge checks whether the given unmerged chain and path
// contains any newly-created subdirectories that were created
// simultaneously in the merged branch as well. If so, it recursively
// checks that directory as well. It returns a slice of any new
// unmerged paths that need to be checked for conflicts later in
// conflict resolution, for all subdirectories of the given path.
func (cr *ConflictResolver) checkPathForMerge(ctx context.Context,
unmergedChain *crChain, unmergedPath path, unmergedChains *crChains,
mergedChains *crChains) ([]path, error) {
mergedChain, ok := mergedChains.byOriginal[unmergedChain.original]
if !ok {
// No corresponding merged chain means we don't have to merge
// any directories.
return nil, nil
}
// Find instances of the same directory being created in both
// branches. Only merge completely new directories -- anything
// involving a rename will result in a conflict for now.
//
// TODO: have a better merge strategy for renamed directories!
mergedCreates := make(map[string]*createOp)
for _, op := range mergedChain.ops {
cop, ok := op.(*createOp)
if !ok || len(cop.Refs()) == 0 || cop.renamed {
continue
}
mergedCreates[cop.NewName] = cop
}
if len(mergedCreates) == 0 {
return nil, nil
}
var newUnmergedPaths []path
toDrop := make(map[int]bool)
for i, op := range unmergedChain.ops {
cop, ok := op.(*createOp)
if !ok || len(cop.Refs()) == 0 || cop.renamed {
continue
}
// Is there a corresponding merged create with the same type?
mergedCop, ok := mergedCreates[cop.NewName]
if !ok || mergedCop.Type != cop.Type {
continue
}
unmergedOriginal := cop.Refs()[0]
mergedOriginal := mergedCop.Refs()[0]
if cop.Type != Dir {
// Only merge files if they don't both have writes.
if fileWithConflictingWrite(unmergedChains, mergedChains,
unmergedOriginal, mergedOriginal) {
continue
}
}
toDrop[i] = true
cr.log.CDebugf(ctx, "Merging name %s (%s) in %v (unmerged original %v "+
"changed to %v)", cop.NewName, cop.Type, unmergedChain.mostRecent,
unmergedOriginal, mergedOriginal)
// Change the original to match the merged original, so we can
// check for conflicts later. Note that the most recent will
// stay the same, so we can still match the unmerged path
// correctly.
err := unmergedChains.changeOriginal(unmergedOriginal, mergedOriginal)
if _, notFound := err.(NoChainFoundError); notFound {
unmergedChains.toUnrefPointers[unmergedOriginal] = true
continue
} else if err != nil {
return nil, err
}
unmergedChain, ok := unmergedChains.byOriginal[mergedOriginal]
if !ok {
return nil, fmt.Errorf("Change original (%v -> %v) didn't work",
unmergedOriginal, mergedOriginal)
}
newPath := unmergedPath.ChildPath(cop.NewName, unmergedChain.mostRecent)
if cop.Type == Dir {
// recurse for this chain
newPaths, err := cr.checkPathForMerge(ctx, unmergedChain, newPath,
unmergedChains, mergedChains)
if err != nil {
return nil, err
}
// Add any further subdirectories that need merging under this
// subdirectory.
newUnmergedPaths = append(newUnmergedPaths, newPaths...)
} else {
// Set the path for all child ops
unrefedOrig := false
for _, op := range unmergedChain.ops {
op.setFinalPath(newPath)
_, isSyncOp := op.(*syncOp)
// If a later write overwrites the original, take it
// out of the unmerged created list so it can be
// properly unreferenced.
if !unrefedOrig && isSyncOp {
unrefedOrig = true
delete(unmergedChains.createdOriginals, mergedOriginal)
}
}
}
// Then add this create's path.
newUnmergedPaths = append(newUnmergedPaths, newPath)
}
// Remove the unneeded create ops
if len(toDrop) > 0 {
newOps := make([]op, 0, len(unmergedChain.ops)-len(toDrop))
for i, op := range unmergedChain.ops {
if toDrop[i] {
cr.log.CDebugf(ctx,
"Dropping double create unmerged operation: %s", op)
} else {
newOps = append(newOps, op)
}
}
unmergedChain.ops = newOps
}
return newUnmergedPaths, nil
}
// findCreatedDirsToMerge finds directories that were created in both
// the unmerged and merged branches, and resets the original unmerged
// pointer to match the original merged pointer. It returns a slice of
// new unmerged paths that need to be combined with the unmergedPaths
// slice.
func (cr *ConflictResolver) findCreatedDirsToMerge(ctx context.Context,
unmergedPaths []path, unmergedChains *crChains, mergedChains *crChains) (
[]path, error) {
var newUnmergedPaths []path
for _, unmergedPath := range unmergedPaths {
unmergedChain, ok :=
unmergedChains.byMostRecent[unmergedPath.tailPointer()]
if !ok {
return nil, fmt.Errorf("findCreatedDirsToMerge: No unmerged chain "+
"for most recent %v", unmergedPath.tailPointer())
}
newPaths, err := cr.checkPathForMerge(ctx, unmergedChain, unmergedPath,
unmergedChains, mergedChains)
if err != nil {
return nil, err
}
newUnmergedPaths = append(newUnmergedPaths, newPaths...)
}
return newUnmergedPaths, nil
}
type createMapKey struct {
ptr BlockPointer
name string
}
// addChildBlocksIfIndirectFile adds refblocks for all child blocks of
// the given file. It will return an error if called with a pointer
// that doesn't represent a file.
func (cr *ConflictResolver) addChildBlocksIfIndirectFile(ctx context.Context,
lState *lockState, original BlockPointer, unmergedChains *crChains,
currPath path, op op) error {
mostRecent, err := unmergedChains.mostRecentFromOriginalOrSame(original)
if err != nil {
return err
}
// For files with indirect pointers, and all child blocks
// as refblocks for the re-created file.
fblock, err := cr.fbo.blocks.GetFileBlockForReading(ctx, lState,
unmergedChains.mostRecentMD, mostRecent, currPath.Branch, currPath)
if err != nil {
return err
}
if fblock.IsInd {
cr.log.CDebugf(ctx, "Adding child pointers for recreated "+
"file %s", currPath)
for _, ptr := range fblock.IPtrs {
op.AddRefBlock(ptr.BlockPointer)
}
}
return nil
}
// resolvedMergedPathTail takes an unmerged path, and returns as much
// of the tail-end of the corresponding merged path that it can, using
// only information within the chains. It may not be able to return a
// complete chain if, for example, a directory was changed in the
// unmerged branch but not in the merged branch, and so the merged
// chain would not have enough information to construct the merged
// branch completely. This function returns the partial path, as well
// as the most recent pointer to the first changed node in the merged
// chains (which can be subsequently used to find the beginning of the
// merged path).
//
// The given unmerged path should be for a node that wasn't created
// during the unmerged branch.
//
// It is also possible for directories used in the unmerged path to
// have been completely removed from the merged path. In this case,
// they need to be recreated. So this function also returns a slice
// of create ops that will need to be replayed in the merged branch
// for the conflicts to be resolved.
func (cr *ConflictResolver) resolveMergedPathTail(ctx context.Context,
lState *lockState, unmergedPath path, unmergedChains *crChains,
mergedChains *crChains) (path, BlockPointer, []*createOp, error) {
unmergedOriginal, err :=
unmergedChains.originalFromMostRecent(unmergedPath.tailPointer())
if err != nil {
cr.log.CDebugf(ctx, "Couldn't find original pointer for %v",
unmergedPath.tailPointer())
return path{}, BlockPointer{}, nil, err
}
var recreateOps []*createOp // fill in backwards, and reverse at the end
currOriginal := unmergedOriginal
currPath := unmergedPath
mergedPath := path{
FolderBranch: unmergedPath.FolderBranch,
path: nil, // fill in backwards, and reverse at the end
}
// First find the earliest merged parent.
for mergedChains.isDeleted(currOriginal) {
cr.log.CDebugf(ctx, "%v was deleted in the merged branch (%s)",
currOriginal, currPath)
if !currPath.hasValidParent() {
return path{}, BlockPointer{}, nil,
fmt.Errorf("Couldn't find valid merged parent path for %v",
unmergedOriginal)
}
// If this node has been deleted, we need to search
// backwards in the path to find the latest node that
// hasn't been deleted and re-recreate nodes upward from
// there.
name := currPath.tailName()
mergedPath.path = append(mergedPath.path, pathNode{
BlockPointer: currOriginal,
Name: name,
})
parentPath := *currPath.parentPath()
parentOriginal, err :=
unmergedChains.originalFromMostRecent(parentPath.tailPointer())
if err != nil {
cr.log.CDebugf(ctx, "Couldn't find original pointer for %v",
parentPath.tailPointer())
return path{}, BlockPointer{}, nil, err
}
// Drop the merged rmOp since we're recreating it, and we
// don't want to replay that notification locally.
mergedChain, ok := mergedChains.byOriginal[parentOriginal]
if !ok {
continue
}
mergedMostRecent, err :=
mergedChains.mostRecentFromOriginalOrSame(currOriginal)
if err != nil {
return path{}, BlockPointer{}, nil, err
}
outer:
for i, op := range mergedChain.ops {
ro, ok := op.(*rmOp)
if !ok {
continue
}
// Use the unref'd pointer, and not the name, to identify
// the operation, since renames might have happened on the
// merged branch.
for _, unref := range ro.Unrefs() {
if unref != mergedMostRecent {
continue
}
mergedChain.ops =
append(mergedChain.ops[:i], mergedChain.ops[i+1:]...)
break outer
}
}
de, err := cr.fbo.blocks.GetDirtyEntry(
ctx, lState, unmergedChains.mostRecentMD, currPath)
if err != nil {
return path{}, BlockPointer{}, nil, err
}
co := newCreateOp(name, parentOriginal, de.Type)
co.AddUpdate(parentOriginal, parentOriginal)
co.setFinalPath(parentPath)
co.AddRefBlock(currOriginal)
if co.Type != Dir {
err = cr.addChildBlocksIfIndirectFile(ctx, lState,
currOriginal, unmergedChains, currPath, co)
if err != nil {
return path{}, BlockPointer{}, nil, err
}
}
// If this happens to have been renamed on the unmerged
// branch, drop the rm half of the rename operation; just
// leave it as a create.
if ri, ok := unmergedChains.renamedOriginals[currOriginal]; ok {
oldParent, ok := unmergedChains.byOriginal[ri.originalOldParent]
if !ok {
continue
}
for _, op := range oldParent.ops {
ro, ok := op.(*rmOp)
if !ok {
continue
}
if ro.OldName == ri.oldName {
ro.dropThis = true
break
}
}
// Replace the create op with the new recreate op,
// which contains the proper refblock.
newParent, ok := unmergedChains.byOriginal[ri.originalNewParent]
if !ok {
continue
}
for i, op := range newParent.ops {
oldCo, ok := op.(*createOp)
if !ok {
continue
}
if oldCo.NewName == ri.newName {
newParent.ops[i] = co
break
}
}
} else {
recreateOps = append(recreateOps, co)
}
currOriginal = parentOriginal
currPath = parentPath
}
// Now we have the latest pointer along the path that is
// shared between the branches. Our next step is to find the
// current merged path to the most recent version of that
// original. We can do that as follows:
// * If the pointer has been changed in the merged branch, we
// can search for it later using fbo.blocks.SearchForNodes
// * If it hasn't been changed, check if it has been renamed to
// somewhere else. If so, use fbo.blocks.SearchForNodes on
// that parent later.
// * Otherwise, iterate up the path towards the root.
var mostRecent BlockPointer
for i := len(currPath.path) - 1; i >= 0; i-- {
currOriginal, err := unmergedChains.originalFromMostRecent(
currPath.path[i].BlockPointer)
if err != nil {
cr.log.CDebugf(ctx, "Couldn't find original pointer for %v",
currPath.path[i])
return path{}, BlockPointer{}, nil, err
}
// Has it changed in the merged branch?
mostRecent, err = mergedChains.mostRecentFromOriginal(currOriginal)
if err == nil {
break
}
mergedPath.path = append(mergedPath.path, pathNode{
BlockPointer: currOriginal,
Name: currPath.path[i].Name,
})
// Has it been renamed?
if originalParent, newName, ok :=
mergedChains.renamedParentAndName(currOriginal); ok {
cr.log.CDebugf(ctx, "%v has been renamed in the merged branch",
currOriginal)
mostRecentParent, err :=
mergedChains.mostRecentFromOriginal(originalParent)
if err != nil {
cr.log.CDebugf(ctx, "Couldn't find original pointer for %v",
originalParent)
return path{}, BlockPointer{}, nil, err
}
mostRecent = mostRecentParent
// update the name for this renamed node
mergedPath.path[len(mergedPath.path)-1].Name = newName
break
}
}
// reverse the merged path
for i, j := 0, len(mergedPath.path)-1; i < j; i, j = i+1, j-1 {
mergedPath.path[i], mergedPath.path[j] =
mergedPath.path[j], mergedPath.path[i]
}
// reverse recreateOps
for i, j := 0, len(recreateOps)-1; i < j; i, j = i+1, j-1 {
recreateOps[i], recreateOps[j] = recreateOps[j], recreateOps[i]
}
return mergedPath, mostRecent, recreateOps, nil
}
// resolveMergedPaths maps each tail most recent pointer for all the
// given unmerged paths to a corresponding path in the merged branch.
// The merged branch may be missing some nodes that have been deleted;
// in that case, the merged path will contain placeholder path nodes
// using the original pointers for those directories.
//
// This function also returns a set of createOps that can be used to
// recreate the missing directories in the merged branch. If the
// parent directory needing the create has been deleted, then the
// unref ptr in the createOp contains the original pointer for the
// directory rather than the most recent merged pointer.
//
// It also potentially returns a new slice of unmerged paths that the
// caller should combine with the existing slice, corresponding to
// deleted unmerged chains that still have relevant operations to
// resolve.
func (cr *ConflictResolver) resolveMergedPaths(ctx context.Context,
lState *lockState, unmergedPaths []path, unmergedChains *crChains,
mergedChains *crChains) (map[BlockPointer]path, []*createOp, []path, error) {
// maps each most recent unmerged pointer to the corresponding
// most recent merged path.
mergedPaths := make(map[BlockPointer]path)
chainsToSearchFor := make(map[BlockPointer][]BlockPointer)
var ptrs []BlockPointer
// While we're at it, find any deleted unmerged directory chains
// containing operations, where the corresponding merged chain has
// changed. The unmerged ops will need to be re-applied in that
// case.
var newUnmergedPaths []path
for original, unmergedChain := range unmergedChains.byOriginal {
if !unmergedChains.isDeleted(original) || len(unmergedChain.ops) == 0 ||
unmergedChain.isFile() {
continue
}
mergedChain, ok := mergedChains.byOriginal[original]
if !ok || len(mergedChain.ops) == 0 {
continue
}
cr.log.CDebugf(ctx, "A modified unmerged path %v was deleted but "+
"also modified in the merged branch %v",
unmergedChain.mostRecent, mergedChain.mostRecent)
// Fake the unmerged path, it doesn't matter
unmergedPath := path{
FolderBranch: cr.fbo.folderBranch,
path: []pathNode{{BlockPointer: unmergedChain.mostRecent}},
}
chainsToSearchFor[mergedChain.mostRecent] =
append(chainsToSearchFor[mergedChain.mostRecent],
unmergedChain.mostRecent)
ptrs = append(ptrs, mergedChain.mostRecent)
newUnmergedPaths = append(newUnmergedPaths, unmergedPath)
}
// Skip out early if there's nothing to do.
if len(unmergedPaths) == 0 && len(ptrs) == 0 {
return mergedPaths, nil, nil, nil
}
// For each unmerged path, find the corresponding most recent
// pointer in the merged path. Track which entries need to be
// re-created.
var recreateOps []*createOp
createsSeen := make(map[createMapKey]bool)
// maps a merged most recent pointer to the set of unmerged most
// recent pointers that need some of their path filled in.
for _, p := range unmergedPaths {
mergedPath, mostRecent, ops, err := cr.resolveMergedPathTail(
ctx, lState, p, unmergedChains, mergedChains)
if err != nil {
return nil, nil, nil, err
}
// Save any recreateOps we've haven't seen yet.
for _, op := range ops {
key := createMapKey{op.Dir.Unref, op.NewName}
if _, ok := createsSeen[key]; ok {
continue
}
createsSeen[key] = true
recreateOps = append(recreateOps, op)
}
// At the end of this process, we are left with a merged path
// that begins just after mostRecent. We will fill this in
// later with the searchFromNodes result.
mergedPaths[p.tailPointer()] = mergedPath
if mostRecent.IsInitialized() {
// Remember to fill in the corresponding mergedPath once we
// get mostRecent's full path.
chainsToSearchFor[mostRecent] =
append(chainsToSearchFor[mostRecent], p.tailPointer())
}
}
// Now we can search for all the merged paths that need to be
// updated due to unmerged operations. Start with a clean node
// cache for the merged branch.
mergedNodeCache := newNodeCacheStandard(cr.fbo.folderBranch)
// Initialize the root node. There will always be at least one
// unmerged path.
mergedNodeCache.GetOrCreate(mergedChains.mostRecentMD.data.Dir.BlockPointer,
unmergedPaths[0].path[0].Name, nil)
newPtrs := make(map[BlockPointer]bool)
for ptr := range mergedChains.byMostRecent {
newPtrs[ptr] = true
}
for ptr := range chainsToSearchFor {
ptrs = append(ptrs, ptr)
}
if len(ptrs) == 0 {
// Nothing to search for
return mergedPaths, recreateOps, newUnmergedPaths, nil
}
nodeMap, err := cr.fbo.blocks.SearchForNodes(
ctx, mergedNodeCache, ptrs, newPtrs, mergedChains.mostRecentMD)
if err != nil {
return nil, nil, nil, err
}
for ptr, n := range nodeMap {
if n == nil {
// All the pointers we're looking for should definitely be
// findable in the merged branch somewhere.
return nil, nil, nil, NodeNotFoundError{ptr}
}
p := mergedNodeCache.PathFromNode(n)
for _, unmergedMostRecent := range chainsToSearchFor[ptr] {
// Prepend the found path to the existing path
mergedPath := mergedPaths[unmergedMostRecent]
newPath := make([]pathNode, len(p.path)+len(mergedPath.path))
copy(newPath[:len(p.path)], p.path)
copy(newPath[len(p.path):], mergedPath.path)
mergedPath.path = newPath
mergedPaths[unmergedMostRecent] = mergedPath
// update the final paths for those corresponding merged
// chains
mergedMostRecent := mergedPath.tailPointer()
chain, ok := mergedChains.byMostRecent[mergedMostRecent]
if !ok {
// it's ok for the merged path not to exist because we
// might still need to create it.
continue
}
for _, op := range chain.ops {
op.setFinalPath(mergedPath)
}
}
}
return mergedPaths, recreateOps, newUnmergedPaths, nil
}
// buildChainsAndPaths make crChains for both the unmerged and merged
// branches since the branch point, the corresponding full paths for
// those changes, any new recreate ops, and returns the MDs used to
// compute all this. Note that even if err is nil, the merged MD list
// might be non-nil to allow for better error handling.
func (cr *ConflictResolver) buildChainsAndPaths(
ctx context.Context, lState *lockState) (
unmergedChains, mergedChains *crChains, unmergedPaths []path,
mergedPaths map[BlockPointer]path, recreateOps []*createOp,
unmerged, merged []*RootMetadata, err error) {
// Fetch the merged and unmerged MDs
unmerged, merged, err = cr.getMDs(ctx, lState)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, err
}
if u, m := len(unmerged), len(merged); u == 0 || m == 0 {
cr.log.CDebugf(ctx, "Skipping merge process due to empty MD list: "+
"%d unmerged, %d merged", u, m)
return nil, nil, nil, nil, nil, nil, nil, nil
}
// Update the current input to reflect the MDs we'll actually be
// working with.
err = cr.updateCurrInput(ctx, unmerged, merged)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, err
}
// Canceled before we start the heavy lifting?
err = cr.checkDone(ctx)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, err
}
// Make the chains
unmergedChains, mergedChains, err = cr.makeChains(ctx, unmerged, merged)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, err
}
// TODO: if the root node didn't change in either chain, we can
// short circuit the rest of the process with a really easy
// merge...
// Get the full path for every most recent unmerged pointer with a
// chain of unmerged operations, and which was not created or
// deleted within in the unmerged branch.
unmergedPaths, err = cr.getPathsFromChains(ctx, unmergedChains,
cr.fbo.nodeCache)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, err
}
// Add in any directory paths that were created in both branches.
newUnmergedPaths, err := cr.findCreatedDirsToMerge(ctx, unmergedPaths,
unmergedChains, mergedChains)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, err
}
unmergedPaths = append(unmergedPaths, newUnmergedPaths...)
if len(newUnmergedPaths) > 0 {
sort.Sort(crSortedPaths(unmergedPaths))
}
// Find the corresponding path in the merged branch for each of
// these unmerged paths, and the set of any createOps needed to
// apply these unmerged operations in the merged branch.
mergedPaths, recreateOps, newUnmergedPaths, err = cr.resolveMergedPaths(
ctx, lState, unmergedPaths, unmergedChains, mergedChains)
if err != nil {
// Return mergedChains in this error case, to allow the error
// handling code to unstage if necessary.
return nil, nil, nil, nil, nil, nil, merged, err
}
unmergedPaths = append(unmergedPaths, newUnmergedPaths...)
if len(newUnmergedPaths) > 0 {
sort.Sort(crSortedPaths(unmergedPaths))
}
return unmergedChains, mergedChains, unmergedPaths, mergedPaths,
recreateOps, unmerged, merged, nil
}
// addRecreateOpsToUnmergedChains inserts each recreateOp, into its
// appropriate unmerged chain, creating one if it doesn't exist yet.
// It also adds entries as necessary to mergedPaths, and returns a
// slice of new unmergedPaths to be added.
func (cr *ConflictResolver) addRecreateOpsToUnmergedChains(ctx context.Context,
recreateOps []*createOp, unmergedChains *crChains, mergedChains *crChains,
mergedPaths map[BlockPointer]path) ([]path, error) {
if len(recreateOps) == 0 {
return nil, nil
}
// First create a lookup table that maps every block pointer in
// every merged path to a corresponding key in the mergedPaths map.
keys := make(map[BlockPointer]BlockPointer)
for ptr, p := range mergedPaths {
for _, node := range p.path {
keys[node.BlockPointer] = ptr
}
}
// we know all of these recreate ops were authored by the current user
kbpki := cr.config.KBPKI()
_, uid, err := kbpki.GetCurrentUserInfo(ctx)
if err != nil {
return nil, err
}
winfo, err := newWriterInfo(ctx, cr.config, uid, unmergedChains.mostRecentMD.writerKID())
if err != nil {
return nil, err
}
var newUnmergedPaths []path
for _, rop := range recreateOps {
rop.setWriterInfo(winfo)
// If rop.Dir.Unref is a merged most recent pointer, look up the
// original. Otherwise rop.Dir.Unref is the original. Use the
// original to look up the appropriate unmerged chain and stick
// this op at the front.
origTargetPtr, err :=
mergedChains.originalFromMostRecentOrSame(rop.Dir.Unref)
if err != nil {
return nil, err
}
chain, ok := unmergedChains.byOriginal[origTargetPtr]
if !ok {
return nil, fmt.Errorf("recreateOp for %v has no chain",
origTargetPtr)
}
if len(chain.ops) == 0 {
newUnmergedPaths = append(newUnmergedPaths, rop.getFinalPath())
}
chain.ops = append([]op{rop}, chain.ops...)
// Look up the corresponding unmerged most recent pointer, and
// check whether there's a merged path for it yet. If not,
// create one by looking it up in the lookup table (created
// above) and taking the appropriate subpath.
_, ok = mergedPaths[chain.mostRecent]
if !ok {
mergedMostRecent := chain.original
if !mergedChains.isDeleted(chain.original) {
if mChain, ok := mergedChains.byOriginal[chain.original]; ok {
mergedMostRecent = mChain.mostRecent
}
}
key, ok := keys[mergedMostRecent]
if !ok {
return nil, fmt.Errorf("Couldn't find a merged path "+
"containing the target of a recreate op: %v",
mergedMostRecent)
}
currPath := mergedPaths[key]
for currPath.tailPointer() != mergedMostRecent &&
currPath.hasValidParent() {
currPath = *currPath.parentPath()
}
mergedPaths[chain.mostRecent] = currPath
}
}
return newUnmergedPaths, nil
}
// convertCreateIntoSymlink finds the create operation for the given
// node in the chain, and makes it into one that creates a new symlink
// (for directories) or a file copy. It also removes the
// corresponding remove operation from the old parent chain.
func (cr *ConflictResolver) convertCreateIntoSymlinkOrCopy(ctx context.Context,
ptr BlockPointer, info renameInfo, chain *crChain, unmergedChains *crChains,
mergedChains *crChains, symPath string) error {
found := false
outer:
for _, op := range chain.ops {
switch cop := op.(type) {
case *createOp:
if !cop.renamed || cop.NewName != info.newName {
continue
}
if cop.Type == Dir {
cop.Type = Sym
cop.crSymPath = symPath
cop.RefBlocks = nil
} else {
cop.forceCopy = true
}
cop.renamed = false
newInfo := renameInfo{
originalOldParent: info.originalNewParent,
oldName: info.newName,
originalNewParent: info.originalOldParent,
newName: info.oldName,
}
if newInfo2, ok := mergedChains.renamedOriginals[ptr]; ok {
// If this node was already moved in the merged
// branch, we need to tweak the merged branch's rename
// info so that it looks like it's being renamed from
// the new unmerged location.
newInfo = newInfo2
newInfo.originalOldParent = info.originalNewParent
newInfo.oldName = info.newName
} else {
// invert the op in the merged chains
invertCreate := newRmOp(info.newName,
info.originalNewParent)
invertCreate.Dir.Ref = info.originalNewParent
invertRm := newCreateOp(info.oldName,
info.originalOldParent, cop.Type)
invertRm.Dir.Ref = info.originalOldParent
invertRm.renamed = true
invertRm.AddRefBlock(ptr)
mergedNewMostRecent, err := mergedChains.
mostRecentFromOriginalOrSame(info.originalNewParent)
if err != nil {
return err
}
mergedOldMostRecent, err := mergedChains.
mostRecentFromOriginalOrSame(info.originalOldParent)
if err != nil {
return err
}
prependOpsToChain(mergedOldMostRecent, mergedChains,
invertRm)
prependOpsToChain(mergedNewMostRecent, mergedChains,
invertCreate)
}
cr.log.CDebugf(ctx, "Putting new merged rename info "+
"%v -> %v (symPath: %v)", ptr, newInfo, symPath)
mergedChains.renamedOriginals[ptr] = newInfo
// Fix up the corresponding rmOp to make sure
// that it gets dropped
oldParentChain :=
unmergedChains.byOriginal[info.originalOldParent]
for _, oldOp := range oldParentChain.ops {
ro, ok := oldOp.(*rmOp)
if !ok {
continue
}
if ro.OldName == info.oldName {
// No need to copy since this createOp
// must have been created as part of
// conflict resolution.
ro.dropThis = true
break
}
}
found = true
break outer
}
}
if !found {
return fmt.Errorf("fixRenameConflicts: couldn't find "+
"rename op corresponding to %v,%s", ptr, info.newName)
}
return nil
}
// fixRenameConflicts checks every unmerged createOp associated with a
// rename to see if it will cause a cycle. If so, it makes it a
// symlink create operation instead. It also checks whether a
// particular node had been renamed in both branches; if so, it will
// copy files, and use symlinks for directories.
func (cr *ConflictResolver) fixRenameConflicts(ctx context.Context,
unmergedChains *crChains, mergedChains *crChains,
mergedPaths map[BlockPointer]path) ([]path, error) {
// For every renamed block pointer in the unmerged chains:
// * Check if any BlockPointer in its merged path contains a relative of
// itself
// * If so, replace the corresponding unmerged create operation with a
// symlink creation to the new merged path instead.
// So, if in the merged branch someone did `mv b/ a/` and in the unmerged
// branch someone did `mv a/ b/`, the conflict resolution would end up with
// `a/b/a` where the second a is a symlink to "../".
//
// To calculate what the symlink should be, consider the following:
// * The unmerged path for the new parent of ptr P is u_1/u_2/.../u_n
// * u_i is the largest i <= n such that the corresponding block
// can be mapped to a node in merged branch (pointer m_j).
// * The full path to m_j in the merged branch is m_1/m_2/m_3/.../m_j
// * For a rename cycle to occur, some m_x where x <= j must be a
// descendant of P's original pointer.
// * The full merged path to the parent of the second copy of P will
// then be: m_1/m_2/.../m_x/.../m_j/u_i+1/.../u_n.
// * Then, the symlink to put under P's name in u_n is "../"*((n-i)+(j-x))
// In the case that u_n is a directory that was newly-created in the
// unmerged branch, we also need to construct a complete corresponding
// merged path, for use in later stages (like executing actions). This
// merged path is just m_1/.../m_j/u_i+1/.../u_n, using the most recent
// unmerged pointers.
var newUnmergedPaths []path
var removeRenames []BlockPointer
var doubleRenames []BlockPointer // merged most recent ptrs
for ptr, info := range unmergedChains.renamedOriginals {
// Also, we need to get the merged paths for anything that was
// renamed in both branches, if they are different.
if mergedInfo, ok := mergedChains.renamedOriginals[ptr]; ok &&
(info.originalNewParent != mergedInfo.originalNewParent ||
info.newName != mergedInfo.newName) {
mergedMostRecent, err :=
mergedChains.mostRecentFromOriginalOrSame(ptr)
if err != nil {
return nil, err
}
doubleRenames = append(doubleRenames, mergedMostRecent)
continue
}
// The merged path is keyed by the most recent unmerged tail
// pointer.
parent, err :=
unmergedChains.mostRecentFromOriginal(info.originalNewParent)
if err != nil {
return nil, err
}
mergedPath, ok := mergedPaths[parent]
unmergedWalkBack := 0 // (n-i) in the equation above
var unmergedPath path
if !ok {
// If this parent was newly created in the unmerged
// branch, we need to look up its earliest parent that
// existed in both branches.
if !unmergedChains.isCreated(info.originalNewParent) {
// There should definitely be a merged path for this
// parent, since it has a create operation.
return nil, fmt.Errorf("fixRenameConflicts: couldn't find "+
"merged path for %v", parent)
}
// Reuse some code by creating a new chains object
// consisting of only this node.
newChains := newCRChainsEmpty()
chain := unmergedChains.byOriginal[info.originalNewParent]
newChains.byOriginal[chain.original] = chain
newChains.byMostRecent[chain.mostRecent] = chain
// Fake out the rest of the chains to populate newPtrs
for _, c := range unmergedChains.byOriginal {
if c.original == chain.original {
continue
}
newChain := &crChain{
original: c.original,
mostRecent: c.mostRecent,
}
newChains.byOriginal[c.original] = newChain
newChains.byMostRecent[c.mostRecent] = newChain
}
newChains.mostRecentMD = unmergedChains.mostRecentMD
unmergedPaths, err := cr.getPathsFromChains(ctx, newChains,
cr.fbo.nodeCache)
if err != nil {
return nil, err
}
if len(unmergedPaths) != 1 {
return nil, fmt.Errorf("fixRenameConflicts: couldn't find the "+
"unmerged path for %v", info.originalNewParent)
}
unmergedPath = unmergedPaths[0]
// Look backwards to find the first parent with a merged path.
n := len(unmergedPath.path) - 1
for i := n; i >= 0; i-- {
mergedPath, ok = mergedPaths[unmergedPath.path[i].BlockPointer]
if ok {
unmergedWalkBack = n - i
break
}
}
if !ok {
return nil, fmt.Errorf("fixRenameConflicts: couldn't find any "+
"merged path for any parents of %v", parent)
}
}
for x, pn := range mergedPath.path {
original, err :=
mergedChains.originalFromMostRecent(pn.BlockPointer)
if err != nil {
// This node wasn't changed in the merged branch
original = pn.BlockPointer
}
if original != ptr {
continue
}
// If any node on this path matches the renamed pointer,
// we have a cycle.
chain, ok := unmergedChains.byMostRecent[parent]
if !ok {
return nil, fmt.Errorf("fixRenameConflicts: no chain for "+
"parent %v", parent)
}
j := len(mergedPath.path) - 1
// (j-x) in the above equation
mergedWalkBack := j - x
walkBack := unmergedWalkBack + mergedWalkBack
// Mark this as a symlink, and the resolver
// will take care of making it a symlink in
// the merged branch later. No need to copy
// since this createOp must have been created
// as part of conflict resolution.
symPath := "./" + strings.Repeat("../", walkBack)
cr.log.CDebugf(ctx, "Creating symlink %s at "+
"merged path %s", symPath, mergedPath)
err = cr.convertCreateIntoSymlinkOrCopy(ctx, ptr, info, chain,
unmergedChains, mergedChains, symPath)
if err != nil {
return nil, err
}
if unmergedWalkBack > 0 {
cr.log.CDebugf(ctx, "Adding new unmerged path %s",
unmergedPath)
newUnmergedPaths = append(newUnmergedPaths,
unmergedPath)
// Fake a merged path to make sure these
// actions will be taken.
mergedLen := len(mergedPath.path)
pLen := mergedLen + unmergedWalkBack
p := path{
FolderBranch: mergedPath.FolderBranch,
path: make([]pathNode, pLen),
}
unmergedStart := len(unmergedPath.path) -
unmergedWalkBack
copy(p.path[:mergedLen], mergedPath.path)
copy(p.path[mergedLen:],
unmergedPath.path[unmergedStart:])
mergedPaths[unmergedPath.tailPointer()] = p
}
removeRenames = append(removeRenames, ptr)
}
}
for _, ptr := range removeRenames {
delete(unmergedChains.renamedOriginals, ptr)
}
if len(doubleRenames) == 0 {
return newUnmergedPaths, nil
}
// Make chains for the new merged parents of all the double renames.
newPtrs := make(map[BlockPointer]bool)
ptrs := make([]BlockPointer, len(doubleRenames))
copy(ptrs, doubleRenames)
// Fake out the rest of the chains to populate newPtrs
for ptr := range mergedChains.byMostRecent {
newPtrs[ptr] = true
}
mergedNodeCache := newNodeCacheStandard(cr.fbo.folderBranch)
mergedNodeCache.GetOrCreate(mergedChains.mostRecentMD.data.Dir.BlockPointer,
string(mergedChains.mostRecentMD.GetTlfHandle().GetCanonicalName()), nil)
nodeMap, err := cr.fbo.blocks.SearchForNodes(
ctx, mergedNodeCache, ptrs, newPtrs, mergedChains.mostRecentMD)
if err != nil {
return nil, err
}
for _, ptr := range doubleRenames {
// Find the merged paths
node, ok := nodeMap[ptr]
if !ok || node == nil {
return nil, fmt.Errorf("Couldn't find merged path for "+
"doubly-renamed pointer %v", ptr)
}
original, err :=
mergedChains.originalFromMostRecentOrSame(ptr)
if err != nil {
return nil, err
}
unmergedInfo, ok := unmergedChains.renamedOriginals[original]
if !ok {
return nil, fmt.Errorf("fixRenameConflicts: can't find the "+
"unmerged rename info for %v during double-rename resolution",
original)
}
mergedInfo, ok := mergedChains.renamedOriginals[original]
if !ok {
return nil, fmt.Errorf("fixRenameConflicts: can't find the "+
"merged rename info for %v during double-rename resolution",
original)
}
// If any node on this path matches the renamed pointer,
// we have a cycle.
chain, ok := unmergedChains.byOriginal[unmergedInfo.originalNewParent]
if !ok {
return nil, fmt.Errorf("fixRenameConflicts: no chain for "+
"parent %v", unmergedInfo.originalNewParent)
}
// For directories, the symlinks traverse down the merged path
// to the first common node, and then back up to the new
// parent/name. TODO: what happens when some element along
// the merged path also got renamed by the unmerged branch?
// The symlink would likely be wrong in that case.
mergedPathOldParent, ok := mergedPaths[chain.mostRecent]
if !ok {
return nil, fmt.Errorf("fixRenameConflicts: couldn't find "+
"merged path for old parent %v", chain.mostRecent)
}
mergedPathNewParent := mergedNodeCache.PathFromNode(node)
symPath := "./"
newParentStart := 0
outer:
for i := len(mergedPathOldParent.path) - 1; i >= 0; i-- {
mostRecent := mergedPathOldParent.path[i].BlockPointer
for j, pnode := range mergedPathNewParent.path {
original, err :=
unmergedChains.originalFromMostRecentOrSame(mostRecent)
if err != nil {
return nil, err
}
mergedMostRecent, err :=
mergedChains.mostRecentFromOriginalOrSame(original)
if err != nil {
return nil, err
}
if pnode.BlockPointer == mergedMostRecent {
newParentStart = j
break outer
}
}
symPath += "../"
}
// Move up directories starting from beyond the common parent,
// to right before the actual node.
for i := newParentStart + 1; i < len(mergedPathNewParent.path)-1; i++ {
symPath += mergedPathNewParent.path[i].Name + "/"
}
symPath += mergedInfo.newName
err = cr.convertCreateIntoSymlinkOrCopy(ctx, original, unmergedInfo,
chain, unmergedChains, mergedChains, symPath)
if err != nil {
return nil, err
}
}
return newUnmergedPaths, nil
}
// addMergedRecreates drops any unmerged operations that remove a node
// that was modified in the merged branch, and adds a create op to the
// merged chain so that the node will be re-created locally.
func (cr *ConflictResolver) addMergedRecreates(ctx context.Context,
unmergedChains *crChains, mergedChains *crChains) error {
for _, unmergedChain := range unmergedChains.byMostRecent {
// First check for nodes that have been deleted in the unmerged
// branch, but modified in the merged branch, and drop those
// unmerged operations.
for _, untypedOp := range unmergedChain.ops {
ro, ok := untypedOp.(*rmOp)
if !ok {
continue
}
// Perhaps the rm target has been renamed somewhere else,
// before eventually being deleted. In this case, we have
// to look up the original by iterating over
// renamedOriginals.
if len(ro.Unrefs()) == 0 {
for original, info := range unmergedChains.renamedOriginals {
if info.originalOldParent == unmergedChain.original &&
info.oldName == ro.OldName &&
unmergedChains.isDeleted(original) {
ro.AddUnrefBlock(original)
break
}
}
}
for _, ptr := range ro.Unrefs() {
unrefOriginal, err :=
unmergedChains.originalFromMostRecentOrSame(ptr)
if err != nil {
return err
}
if c, ok := mergedChains.byOriginal[unrefOriginal]; ok {
ro.dropThis = true
// Need to prepend a create here to the merged parent,
// in order catch any conflicts.
parentOriginal := unmergedChain.original
name := ro.OldName
if newParent, newName, ok :=
mergedChains.renamedParentAndName(unrefOriginal); ok {
// It was renamed in the merged branch, so
// recreate with the new parent and new name.
parentOriginal = newParent
name = newName
} else if info, ok :=
unmergedChains.renamedOriginals[unrefOriginal]; ok {
// It was only renamed in the old parent, so
// use the old parent and original name.
parentOriginal = info.originalOldParent
name = info.oldName
}
chain, ok := mergedChains.byOriginal[parentOriginal]
if !ok {
return fmt.Errorf("Couldn't find chain for parent %v "+
"of merged entry %v we're trying to recreate",
parentOriginal, unrefOriginal)
}
t := Dir
if c.isFile() {
// TODO: how to fix this up for executables
// and symlinks? Only matters for checking
// conflicts if something with the same name
// is created on the unmerged branch.
t = File
}
co := newCreateOp(name, chain.original, t)
co.Dir.Ref = chain.original
co.AddRefBlock(c.mostRecent)
winfo, err := newWriterInfo(ctx, cr.config,
mergedChains.mostRecentMD.LastModifyingWriter,
mergedChains.mostRecentMD.writerKID())
if err != nil {
return err
}
co.setWriterInfo(winfo)
chain.ops = append([]op{co}, chain.ops...)
cr.log.CDebugf(ctx, "Re-created rm'd merge-modified node "+
"%v with operation %s in parent %v", unrefOriginal, co,
parentOriginal)
}
}
}
}
return nil
}
// getActionsToMerge returns the set of actions needed to merge each
// unmerged chain of operations, in a map keyed by the tail pointer of
// the corresponding merged path.
func (cr *ConflictResolver) getActionsToMerge(unmergedChains *crChains,
mergedChains *crChains, mergedPaths map[BlockPointer]path) (
map[BlockPointer]crActionList, error) {
actionMap := make(map[BlockPointer]crActionList)
for unmergedMostRecent, unmergedChain := range unmergedChains.byMostRecent {
original := unmergedChain.original
// If this is a file that has been deleted in the merged
// branch, a corresponding recreate op will take care of it,
// no need to do anything here.
// We don't need the "ok" value from this lookup because it's
// fine to pass a nil mergedChain into crChain.getActionsToMerge.
mergedChain := mergedChains.byOriginal[original]
mergedPath, ok := mergedPaths[unmergedMostRecent]
if !ok {
// This most likely means that the file was created or
// deleted in the unmerged branch and thus has no
// corresponding merged path yet.
continue
}
actions, err := unmergedChain.getActionsToMerge(
cr.config.ConflictRenamer(), mergedPath, mergedChain)
if err != nil {
return nil, err
}
if len(actions) > 0 {
actionMap[mergedPath.tailPointer()] = actions
}
}
return actionMap, nil
}
// collapseActions combines file updates with their parent directory
// updates, because conflict resolution only happens within a
// directory (i.e., files are merged directly, they are just
// renamed/copied). It also collapses each action list to get rid of
// redundant actions.
func collapseActions(unmergedChains *crChains,
mergedPaths map[BlockPointer]path,
actionMap map[BlockPointer]crActionList) {
for unmergedMostRecent, chain := range unmergedChains.byMostRecent {
if !chain.isFile() {
continue
}
// Find the parent directory path and combine
p, ok := mergedPaths[unmergedMostRecent]
if !ok {
continue
}
fileActions, ok := actionMap[p.tailPointer()]
if !ok {
continue
}
parentPath := *p.parentPath()
mergedParent := parentPath.tailPointer()
parentActions := actionMap[mergedParent]
combinedActions := append(parentActions, fileActions...)
actionMap[mergedParent] = combinedActions
mergedPaths[unmergedMostRecent] = parentPath
delete(actionMap, p.tailPointer())
}
for ptr, actions := range actionMap {
actionMap[ptr] = actions.collapse()
}
}
func (cr *ConflictResolver) computeActions(ctx context.Context,
unmergedChains *crChains, mergedChains *crChains,
mergedPaths map[BlockPointer]path, recreateOps []*createOp) (
map[BlockPointer]crActionList, []path, error) {
// Process all the recreateOps, adding them to the appropriate
// unmerged chains.
newUnmergedPaths, err := cr.addRecreateOpsToUnmergedChains(
ctx, recreateOps, unmergedChains, mergedChains, mergedPaths)
if err != nil {
return nil, nil, err
}
// Fix any rename cycles by turning the corresponding unmerged
// createOp into a symlink entry type.
moreNewUnmergedPaths, err := cr.fixRenameConflicts(ctx, unmergedChains,
mergedChains, mergedPaths)
if err != nil {
return nil, nil, err
}
newUnmergedPaths = append(newUnmergedPaths, moreNewUnmergedPaths...)
// Recreate any modified merged nodes that were rm'd in the
// unmerged branch.
if err := cr.addMergedRecreates(
ctx, unmergedChains, mergedChains); err != nil {
return nil, nil, err
}
actionMap, err :=
cr.getActionsToMerge(unmergedChains, mergedChains, mergedPaths)
if err != nil {
return nil, nil, err
}
// Finally, merged the file actions back into their parent
// directory action list, and collapse everything together.
collapseActions(unmergedChains, mergedPaths, actionMap)
return actionMap, newUnmergedPaths, nil
}
func (cr *ConflictResolver) fetchDirBlockCopy(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, lbc localBcache) (
*DirBlock, error) {
ptr := dir.tailPointer()
// TODO: lock lbc if we parallelize
if block, ok := lbc[ptr]; ok {
return block, nil
}
dblock, err := cr.fbo.blocks.GetDirBlockForReading(
ctx, lState, md, ptr, dir.Branch, dir)
if err != nil {
return nil, err
}
dblock, err = dblock.DeepCopy(cr.config.Codec())
if err != nil {
return nil, err
}
lbc[ptr] = dblock
return dblock, nil
}
// fileBlockMap maps latest merged block pointer to a map of final
// merged name -> file block.
type fileBlockMap map[BlockPointer]map[string]*FileBlock
func (cr *ConflictResolver) makeFileBlockDeepCopy(ctx context.Context,
lState *lockState, chains *crChains, mergedMostRecent BlockPointer, parentPath path,
name string, ptr BlockPointer, blocks fileBlockMap) (
BlockPointer, error) {
md := chains.mostRecentMD
fblock, err := cr.fbo.blocks.GetFileBlockForReading(ctx, lState, md, ptr,
parentPath.Branch, parentPath.ChildPath(name, ptr))
if err != nil {
return BlockPointer{}, err
}
fblock, err = fblock.DeepCopy(cr.config.Codec())
if err != nil {
return BlockPointer{}, err
}
newPtr := ptr
_, uid, err := cr.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return BlockPointer{}, err
}
if fblock.IsInd {
newID, err := cr.config.Crypto().MakeTemporaryBlockID()
if err != nil {
return BlockPointer{}, err
}
newPtr = BlockPointer{
ID: newID,
KeyGen: md.LatestKeyGeneration(),
DataVer: cr.config.DataVersion(),
Creator: uid,
RefNonce: zeroBlockRefNonce,
}
} else {
newPtr.RefNonce, err = cr.config.Crypto().MakeBlockRefNonce()
if err != nil {
return BlockPointer{}, err
}
newPtr.SetWriter(uid)
}
cr.log.CDebugf(ctx, "Deep copying file %s: %v -> %v", name, ptr, newPtr)
// Mark this as having been created during this chain, so that
// later during block accounting we can infer the origin of the
// block.
chains.createdOriginals[newPtr] = true
// If this file was created within the branch, we should clean up
// all the old block pointers.
original, err := chains.originalFromMostRecentOrSame(ptr)
if err != nil {
return BlockPointer{}, err
}
newlyCreated := chains.isCreated(original)
if newlyCreated {
chains.toUnrefPointers[original] = true
}
if _, ok := blocks[mergedMostRecent]; !ok {
blocks[mergedMostRecent] = make(map[string]*FileBlock)
}
// Dup all of the leaf blocks.
// TODO: deal with multiple levels of indirection.
if fblock.IsInd {
for i, iptr := range fblock.IPtrs {
if newlyCreated {
chains.toUnrefPointers[iptr.BlockPointer] = true
}
// Generate a new nonce for each one.
iptr.RefNonce, err = cr.config.Crypto().MakeBlockRefNonce()
if err != nil {
return BlockPointer{}, err
}
iptr.SetWriter(uid)
fblock.IPtrs[i] = iptr
chains.createdOriginals[iptr.BlockPointer] = true
}
}
blocks[mergedMostRecent][name] = fblock
return newPtr, nil
}
func (cr *ConflictResolver) doActions(ctx context.Context,
lState *lockState, unmergedChains *crChains, mergedChains *crChains,
unmergedPaths []path, mergedPaths map[BlockPointer]path,
actionMap map[BlockPointer]crActionList, lbc localBcache,
newFileBlocks fileBlockMap) error {
// For each set of actions:
// * Find the corresponding chains
// * Make a reference to each slice of ops
// * Get the unmerged block.
// * Get the merged block if it's not already in the local cache, and
// make a copy.
// * Get the merged block
// * Do each action, updating the ops references to the returned ones
// At the end, the local block cache should contain all the
// updated merged blocks. A future phase will update the pointers
// in standard Merkle-tree-fashion.
doneActions := make(map[BlockPointer]bool)
for _, unmergedPath := range unmergedPaths {
unmergedMostRecent := unmergedPath.tailPointer()
unmergedChain, ok :=
unmergedChains.byMostRecent[unmergedMostRecent]
if !ok {
return fmt.Errorf("Couldn't find unmerged chain for %v",
unmergedMostRecent)
}
// If this is a file that has been deleted in the merged
// branch, a corresponding recreate op will take care of it,
// no need to do anything here.
// find the corresponding merged path
mergedPath, ok := mergedPaths[unmergedMostRecent]
if !ok {
// This most likely means that the file was created or
// deleted in the unmerged branch and thus has no
// corresponding merged path yet.
continue
}
if unmergedChain.isFile() {
// The unmerged path is actually the parent (the merged
// path was already corrected above).
unmergedPath = *unmergedPath.parentPath()
}
actions := actionMap[mergedPath.tailPointer()]
// Now get the directory blocks.
unmergedBlock, err := cr.fetchDirBlockCopy(ctx,
lState, unmergedChains.mostRecentMD, unmergedPath, lbc)
if err != nil {
return err
}
// recreateOps update the merged paths using original
// pointers; but if other stuff happened in the block before
// it was deleted (such as other removes) we want to preserve
// those.
var mergedBlock *DirBlock
if mergedChains.isDeleted(mergedPath.tailPointer()) {
mergedBlock = NewDirBlock().(*DirBlock)
lbc[mergedPath.tailPointer()] = mergedBlock
} else {
mergedBlock, err = cr.fetchDirBlockCopy(ctx,
lState, mergedChains.mostRecentMD, mergedPath,
lbc)
if err != nil {
return err
}
}
if len(actions) > 0 && !doneActions[mergedPath.tailPointer()] {
// Make sure we don't try to execute the same actions twice.
doneActions[mergedPath.tailPointer()] = true
// Any file block copies, keyed by their new temporary block
// IDs, and later we will ready them.
unmergedFetcher := func(ctx context.Context, name string,
ptr BlockPointer) (BlockPointer, error) {
return cr.makeFileBlockDeepCopy(ctx, lState, unmergedChains,
mergedPath.tailPointer(), unmergedPath, name, ptr,
newFileBlocks)
}
mergedFetcher := func(ctx context.Context, name string,
ptr BlockPointer) (BlockPointer, error) {
return cr.makeFileBlockDeepCopy(ctx, lState, mergedChains,
mergedPath.tailPointer(), mergedPath, name,
ptr, newFileBlocks)
}
// Execute each action and save the modified ops back into
// each chain.
for _, action := range actions {
swap, newPtr, err := action.swapUnmergedBlock(unmergedChains,
mergedChains, unmergedBlock)
if err != nil {
return err
}
uBlock := unmergedBlock
if swap {
cr.log.CDebugf(ctx, "Swapping out block %v for %v",
newPtr, unmergedPath.tailPointer())
if newPtr == zeroPtr {
// Use this merged block
uBlock = mergedBlock
} else {
// Fetch the specified one. Don't need to make
// a copy since this will just be a source
// block.
dBlock, err := cr.fbo.blocks.GetDirBlockForReading(ctx, lState,
mergedChains.mostRecentMD, newPtr,
mergedPath.Branch, path{})
if err != nil {
return err
}
uBlock = dBlock
}
}
err = action.do(ctx, unmergedFetcher, mergedFetcher, uBlock,
mergedBlock)
if err != nil {
return err
}
}
}
// Now update the ops related to this exact path (not the ops
// for its parent!).
for _, action := range actions {
// unmergedMostRecent is for the correct pointer, but
// mergedPath may be for the parent in the case of files
// so we need to find the real mergedMostRecent pointer.
mergedMostRecent := unmergedChain.original
mergedChain, ok := mergedChains.byOriginal[unmergedChain.original]
if ok {
mergedMostRecent = mergedChain.mostRecent
}
err := action.updateOps(unmergedMostRecent, mergedMostRecent,
unmergedBlock, mergedBlock, unmergedChains, mergedChains)
if err != nil {
return err
}
}
}
return nil
}
type crRenameHelperKey struct {
parentOriginal BlockPointer
name string
}
// makeRevertedOps changes the BlockPointers of the corresponding
// operations for the given set of paths back to their originals,
// which allows other parts of conflict resolution to more easily
// build up the local and remote notifications needed. Also, it
// reverts rm/create pairs back into complete rename operations, for
// the purposes of notification, so this should only be called after
// all conflicts and actions have been resolved. It returns the
// complete slice of reverted operations.
func (cr *ConflictResolver) makeRevertedOps(ctx context.Context,
lState *lockState, sortedPaths []path, chains *crChains,
otherChains *crChains) ([]op, error) {
var ops []op
// Build a map of directory {original, name} -> renamed original.
// This will help us map create ops to the corresponding old
// parent.
renames := make(map[crRenameHelperKey]BlockPointer)
for original, ri := range chains.renamedOriginals {
renames[crRenameHelperKey{ri.originalNewParent, ri.newName}] = original
}
// Insert the operations starting closest to the root, so
// necessary directories are created first.
for i := len(sortedPaths) - 1; i >= 0; i-- {
ptr := sortedPaths[i].tailPointer()
chain, ok := chains.byMostRecent[ptr]
if !ok {
return nil, fmt.Errorf("makeRevertedOps: Couldn't find chain "+
"for %v", ptr)
}
for _, op := range chain.ops {
// Skip any rms that were part of a rename
if rop, ok := op.(*rmOp); ok && len(rop.Unrefs()) == 0 {
continue
}
// Turn the create half of a rename back into a full rename.
if cop, ok := op.(*createOp); ok && cop.renamed {
renameOriginal, ok := renames[crRenameHelperKey{
chain.original, cop.NewName}]
if !ok {
if cop.crSymPath != "" || cop.Type == Sym {
// For symlinks created by the CR process, we
// expect the rmOp to have been removed. For
// existing symlinks that were simply moved,
// there is no benefit in combining their
// create and rm ops back together since there
// is no corresponding node.
continue
}
return nil, fmt.Errorf("Couldn't find corresponding "+
"renamed original for %v, %s",
chain.original, cop.NewName)
}
if otherChains.isDeleted(renameOriginal) {
// If we are re-instating a deleted node, just use
// the create op.
op = chains.copyOpAndRevertUnrefsToOriginals(cop)
if cop.Type != Dir {
err := cr.addChildBlocksIfIndirectFile(ctx, lState,
renameOriginal, chains, cop.getFinalPath(), op)
if err != nil {
return nil, err
}
}
} else {
ri, ok := chains.renamedOriginals[renameOriginal]
if !ok {
return nil, fmt.Errorf("Couldn't find the rename info "+
"for original %v", renameOriginal)
}
rop := newRenameOp(ri.oldName, ri.originalOldParent,
ri.newName, ri.originalNewParent, renameOriginal,
cop.Type)
// Set the Dir.Ref fields to be the same as the Unref
// -- they will be fixed up later.
rop.AddUpdate(ri.originalOldParent, ri.originalOldParent)
if ri.originalNewParent != ri.originalOldParent {
rop.AddUpdate(ri.originalNewParent,
ri.originalNewParent)
}
for _, ptr := range cop.Unrefs() {
origPtr, err := chains.originalFromMostRecentOrSame(ptr)
if err != nil {
return nil, err
}
rop.AddUnrefBlock(origPtr)
}
op = rop
}
} else {
op = chains.copyOpAndRevertUnrefsToOriginals(op)
}
ops = append(ops, op)
}
}
return ops, nil
}
// createResolvedMD creates a MD update that will be merged into the
// main folder as the resolving commit. It contains all of the
// unmerged operations, as well as a "dummy" operation at the end
// which will catch all of the BlockPointer updates. A later phase
// will move all of those updates into their proper locations within
// the other operations.
func (cr *ConflictResolver) createResolvedMD(ctx context.Context,
lState *lockState, unmergedPaths []path, unmergedChains *crChains,
mergedChains *crChains) (*RootMetadata, error) {
currMD := mergedChains.mostRecentMD
newMD, err := currMD.MakeSuccessor(cr.config, true)
if err != nil {
return nil, err
}
// We also need to add in any creates that happened within
// newly-created directories (which aren't being merged with other
// newly-created directories), to ensure that the overall Refs are
// correct and that future CR processes can check those create ops
// for conflicts.
var newPaths []path
for original, chain := range unmergedChains.byOriginal {
if !unmergedChains.isCreated(original) ||
mergedChains.isCreated(original) {
continue
}
added := false
for i, op := range chain.ops {
if cop, ok := op.(*createOp); ok {
// Shallowly copy the create op and update its
// directory to the most recent pointer -- this won't
// work with the usual revert ops process because that
// skips chains which are newly-created within this
// branch.
newCreateOp := *cop
newCreateOp.Dir.Unref = chain.mostRecent
newCreateOp.Dir.Ref = chain.mostRecent
chain.ops[i] = &newCreateOp
if !added {
newPaths = append(newPaths, path{
FolderBranch: cr.fbo.folderBranch,
path: []pathNode{{
BlockPointer: chain.mostRecent}},
})
added = true
}
if cop.Type == Dir || len(cop.Refs()) == 0 {
continue
}
// Make sure to add any direct file blocks too,
// originating in later syncs.
ptr, err :=
unmergedChains.mostRecentFromOriginalOrSame(cop.Refs()[0])
if err != nil {
return nil, err
}
file := path{
FolderBranch: cr.fbo.folderBranch,
path: []pathNode{{BlockPointer: ptr}},
}
fblock, err := cr.fbo.blocks.GetFileBlockForReading(ctx, lState,
unmergedChains.mostRecentMD, ptr, file.Branch, file)
if err != nil {
return nil, err
}
if fblock.IsInd {
newCreateOp.RefBlocks = make([]BlockPointer,
len(fblock.IPtrs)+1)
newCreateOp.RefBlocks[0] = cop.Refs()[0]
for j, iptr := range fblock.IPtrs {
newCreateOp.RefBlocks[j+1] = iptr.BlockPointer
}
}
}
}
}
if len(newPaths) > 0 {
// Put the new paths at the beginning so they are processed
// last in sorted order.
unmergedPaths = append(newPaths, unmergedPaths...)
}
ops, err := cr.makeRevertedOps(
ctx, lState, unmergedPaths, unmergedChains, mergedChains)
if err != nil {
return nil, err
}
cr.log.CDebugf(ctx, "Remote notifications: %v", ops)
for _, op := range ops {
cr.log.CDebugf(ctx, "%s: refs %v", op, op.Refs())
newMD.AddOp(op)
}
// Add a final dummy operation to collect all of the block updates.
newMD.AddOp(newResolutionOp())
return newMD, nil
}
// crFixOpPointers takes in a slice of "reverted" ops (all referring
// to the original BlockPointers) and a map of BlockPointer updates
// (from original to the new most recent pointer), and corrects all
// the ops to use the new most recent pointers instead. It returns a
// new slice of these operations with room in the first slot for a
// dummy operation containing all the updates.
func crFixOpPointers(oldOps []op, updates map[BlockPointer]BlockPointer,
chains *crChains) (
[]op, error) {
newOps := make([]op, 0, len(oldOps)+1)
newOps = append(newOps, nil) // placeholder for dummy op
for _, op := range oldOps {
var updatesToFix []*blockUpdate
var ptrsToFix []*BlockPointer
switch realOp := op.(type) {
case *createOp:
updatesToFix = append(updatesToFix, &realOp.Dir)
// Since the created node was made exclusively during this
// branch, we can use the most recent pointer for that
// node as its ref.
refs := realOp.Refs()
realOp.RefBlocks = make([]BlockPointer, len(refs))
for i, ptr := range refs {
mostRecent, err := chains.mostRecentFromOriginalOrSame(ptr)
if err != nil {
return nil, err
}
realOp.RefBlocks[i] = mostRecent
ptrsToFix = append(ptrsToFix, &realOp.RefBlocks[i])
}
// The leading resolutionOp will take care of the updates.
realOp.Updates = nil
case *rmOp:
updatesToFix = append(updatesToFix, &realOp.Dir)
// Since the rm'd node was made exclusively during this
// branch, we can use the original pointer for that
// node as its unref.
unrefs := realOp.Unrefs()
realOp.UnrefBlocks = make([]BlockPointer, len(unrefs))
for i, ptr := range unrefs {
original, err := chains.originalFromMostRecentOrSame(ptr)
if err != nil {
return nil, err
}
realOp.UnrefBlocks[i] = original
}
// The leading resolutionOp will take care of the updates.
realOp.Updates = nil
case *renameOp:
updatesToFix = append(updatesToFix, &realOp.OldDir, &realOp.NewDir)
ptrsToFix = append(ptrsToFix, &realOp.Renamed)
// Hack: we need to fixup local conflict renames so that the block
// update changes to the new block pointer.
for i := range realOp.Updates {
ptrsToFix = append(ptrsToFix, &realOp.Updates[i].Ref)
}
// Note: Unrefs from the original renameOp are now in a
// separate rm operation.
case *syncOp:
updatesToFix = append(updatesToFix, &realOp.File)
realOp.Updates = nil
case *setAttrOp:
updatesToFix = append(updatesToFix, &realOp.Dir)
ptrsToFix = append(ptrsToFix, &realOp.File)
// The leading resolutionOp will take care of the updates.
realOp.Updates = nil
}
for _, update := range updatesToFix {
newPtr, ok := updates[update.Unref]
if !ok {
continue
}
// Since the first op does all the heavy lifting of
// updating pointers, we can set these to both just be the
// new pointer
update.Unref = newPtr
update.Ref = newPtr
}
for _, ptr := range ptrsToFix {
newPtr, ok := updates[*ptr]
if !ok {
continue
}
*ptr = newPtr
}
newOps = append(newOps, op)
}
return newOps, nil
}
// resolveOnePath figures out the new merged path, in the resolved
// folder, for a given unmerged pointer. For each node on the path,
// see if the node has been renamed. If so, see if there's a
// resolution for it yet. If there is, complete the path using that
// resolution. If not, recurse.
func (cr *ConflictResolver) resolveOnePath(ctx context.Context,
unmergedMostRecent BlockPointer, unmergedChains *crChains,
mergedChains *crChains, resolvedChains *crChains,
mergedPaths map[BlockPointer]path,
resolvedPaths map[BlockPointer]path) (path, error) {
if p, ok := resolvedPaths[unmergedMostRecent]; ok {
return p, nil
}
// There should always be a merged path, because we should only be
// calling this with pointers that were updated in the unmerged
// branch.
resolvedPath, ok := mergedPaths[unmergedMostRecent]
if !ok {
var ptrsToAppend []BlockPointer
var namesToAppend []string
next := unmergedMostRecent
for len(mergedPaths[next].path) == 0 {
newPtrs := make(map[BlockPointer]bool)
ptrs := []BlockPointer{unmergedMostRecent}
for ptr := range unmergedChains.byMostRecent {
newPtrs[ptr] = true
}
nodeMap, err := cr.fbo.blocks.SearchForNodes(
ctx, cr.fbo.nodeCache, ptrs, newPtrs,
unmergedChains.mostRecentMD)
if err != nil {
return path{}, err
}
n := nodeMap[unmergedMostRecent]
if n == nil {
return path{}, fmt.Errorf("resolveOnePath: Couldn't find "+
"merged path for %v", unmergedMostRecent)
}
p := cr.fbo.nodeCache.PathFromNode(n)
ptrsToAppend = append(ptrsToAppend, next)
namesToAppend = append(namesToAppend, p.tailName())
next = p.parentPath().tailPointer()
}
resolvedPath = mergedPaths[next]
for i, ptr := range ptrsToAppend {
resolvedPath = resolvedPath.ChildPath(namesToAppend[i], ptr)
}
}
i := len(resolvedPath.path) - 1
for i >= 0 {
mergedMostRecent := resolvedPath.path[i].BlockPointer
original, err :=
mergedChains.originalFromMostRecentOrSame(mergedMostRecent)
if err != nil {
return path{}, err
}
origNewParent, newName, renamed :=
resolvedChains.renamedParentAndName(original)
if !renamed {
i--
continue
}
unmergedNewParent, err :=
unmergedChains.mostRecentFromOriginalOrSame(origNewParent)
if err != nil {
return path{}, err
}
// Is the new parent resolved yet?
parentPath, err := cr.resolveOnePath(ctx, unmergedNewParent,
unmergedChains, mergedChains, resolvedChains, mergedPaths,
resolvedPaths)
if err != nil {
return path{}, err
}
// Reset the resolved path
newPathLen := len(parentPath.path) + len(resolvedPath.path) - i
newResolvedPath := path{
FolderBranch: resolvedPath.FolderBranch,
path: make([]pathNode, newPathLen),
}
copy(newResolvedPath.path[:len(parentPath.path)], parentPath.path)
copy(newResolvedPath.path[len(parentPath.path):], resolvedPath.path[i:])
i = len(parentPath.path) - 1
newResolvedPath.path[i+1].Name = newName
resolvedPath = newResolvedPath
}
resolvedPaths[unmergedMostRecent] = resolvedPath
return resolvedPath, nil
}
// makePostResolutionPaths returns the full paths to each unmerged
// pointer, taking into account any rename operations that occurred in
// the merged branch.
func (cr *ConflictResolver) makePostResolutionPaths(ctx context.Context,
md *RootMetadata, unmergedChains *crChains, mergedChains *crChains,
mergedPaths map[BlockPointer]path) (map[BlockPointer]path, error) {
resolvedChains, err := newCRChains(ctx, cr.config,
[]*RootMetadata{md})
if err != nil {
return nil, err
}
// If there are no renames, we don't need to fix any of the paths
if len(resolvedChains.renamedOriginals) == 0 {
return mergedPaths, nil
}
resolvedPaths := make(map[BlockPointer]path)
for ptr, oldP := range mergedPaths {
p, err := cr.resolveOnePath(ctx, ptr, unmergedChains, mergedChains,
resolvedChains, mergedPaths, resolvedPaths)
if err != nil {
return nil, err
}
cr.log.CDebugf(ctx, "Resolved path for %v from %v to %v",
ptr, oldP.path, p.path)
}
return resolvedPaths, nil
}
// crPathTreeNode represents a particular node in the part of the FS
// tree affected by the conflict resolution, which needs to be sync'd.
type crPathTreeNode struct {
ptr BlockPointer
parent *crPathTreeNode
children map[string]*crPathTreeNode
mergedPath path
}
// syncTree, given a node in part of the FS tree that needs to be
// sync'd, either calls FolderBranchOps.syncBlock on it if the node
// has no children of its own, or it calls syncTree recursively for
// all children. When calling itself recursively on its children, it
// instructs each child to sync only up to this node, except for the
// last child which may sync back to the given stopAt pointer. This
// ensures that the sync process will ready blocks that are complete
// (with all child changes applied) before readying any parent blocks.
// syncTree returns the merged blockPutState for itself and all of its
// children.
func (cr *ConflictResolver) syncTree(ctx context.Context, lState *lockState,
newMD *RootMetadata, uid keybase1.UID, node *crPathTreeNode,
stopAt BlockPointer, lbc localBcache, newFileBlocks fileBlockMap) (
*blockPutState, error) {
// If this has no children, then sync it, as far back as stopAt.
if len(node.children) == 0 {
// Look for the directory block or the new file block.
entryType := Dir
var block Block
var ok bool
block, ok = lbc[node.ptr]
// non-nil exactly when entryType != Dir.
var fblock *FileBlock
if !ok {
// This must be a file, so look it up in the parent
if node.parent == nil {
return nil, fmt.Errorf("No parent found for node %v while "+
"syncing path %v", node.ptr, node.mergedPath.path)
}
fileBlocks, ok := newFileBlocks[node.parent.ptr]
if !ok {
return nil, fmt.Errorf("No file blocks found for parent %v",
node.parent.ptr)
}
fblock, ok = fileBlocks[node.mergedPath.tailName()]
if !ok {
return nil, fmt.Errorf("No file block found name %s under "+
"parent %v", node.mergedPath.tailName(), node.parent.ptr)
}
block = fblock
entryType = File // TODO: FIXME for Ex and Sym
}
// TODO: fix mtime and ctime?
_, _, bps, err := cr.fbo.syncBlockForConflictResolution(
ctx, lState, uid, newMD, block,
*node.mergedPath.parentPath(), node.mergedPath.tailName(),
entryType, false, false, stopAt, lbc)
if err != nil {
return nil, err
}
if entryType != Dir {
if fblock.IsInd {
// For an indirect file block, make sure a new
// reference is made for every child block.
for _, iptr := range fblock.IPtrs {
bps.addNewBlock(iptr.BlockPointer, nil, ReadyBlockData{})
// TODO: add block updates to the op chain for these guys
// (need encoded size!)
newMD.AddRefBlock(iptr.BlockInfo)
}
}
}
return bps, nil
}
// If there is more than one child, use this node as the stopAt
// since it is the branch point, except for the last child.
bps := newBlockPutState(len(lbc))
count := 0
for _, child := range node.children {
localStopAt := node.ptr
count++
if count == len(node.children) {
localStopAt = stopAt
}
childBps, err := cr.syncTree(
ctx, lState, newMD, uid, child, localStopAt, lbc,
newFileBlocks)
if err != nil {
return nil, err
}
bps.mergeOtherBps(childBps)
}
return bps, nil
}
// calculateResolutionBytes figured out how many bytes are referenced
// and unreferenced in the merged branch by this resolution. It
// should be called before the block changes are unembedded in md.
func (cr *ConflictResolver) calculateResolutionUsage(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
unmergedChains *crChains, mergedChains *crChains) error {
md.RefBytes = 0
md.UnrefBytes = 0
md.DiskUsage = mergedChains.mostRecentMD.DiskUsage
// Track the refs and unrefs in a set, to ensure no duplicates
refs := make(map[BlockPointer]bool)
unrefs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, ptr := range op.Refs() {
// Don't add usage it's an unembedded block change pointer.
if _, ok := unmergedChains.blockChangePointers[ptr]; !ok {
refs[ptr] = true
}
}
for _, ptr := range op.Unrefs() {
unrefs[ptr] = true
delete(refs, ptr)
}
for _, update := range op.AllUpdates() {
if update.Unref != update.Ref {
unrefs[update.Unref] = true
delete(refs, update.Unref)
}
refs[update.Ref] = true
}
}
localBlocks := make(map[BlockPointer]Block)
for _, bs := range bps.blockStates {
if bs.block != nil {
localBlocks[bs.blockPtr] = bs.block
}
}
// Add bytes for every ref'd block.
var err error
for ptr := range refs {
block, ok := localBlocks[ptr]
if !ok {
// Look up the block to get its size. Since we don't know
// whether this is a file or directory, just look it up
// generically.
//
// TODO: If the block wasn't already in the cache, this
// call won't cache it, so it's kind of wasting work.
// Furthermore, we might be able to get the encoded size
// from other sources as well (such as its directory entry
// or its indirect file block) if we happened to have come
// across it before.
block, err = cr.fbo.blocks.GetBlockForReading(ctx, lState, md, ptr,
cr.fbo.branch())
if err != nil {
return err
}
}
cr.log.CDebugf(ctx, "Ref'ing block %v", ptr)
size := uint64(block.GetEncodedSize())
md.RefBytes += size
md.DiskUsage += size
}
// Subtract bytes for every unref'd block that wasn't created in
// the unmerged branch
for ptr := range unrefs {
original, ok := unmergedChains.originals[ptr]
if !ok {
original = ptr
}
if original != ptr || unmergedChains.isCreated(original) {
// Only unref pointers that weren't created as part of the
// unmerged branch. Either they existed already or they
// were created as part of the merged branch.
continue
}
// Also make sure this wasn't already removed on the merged branch.
original, ok = mergedChains.originals[ptr]
if !ok {
original = ptr
}
if mergedChains.isDeleted(original) {
continue
}
block, err := cr.fbo.blocks.GetBlockForReading(ctx, lState,
mergedChains.mostRecentMD, ptr, cr.fbo.branch())
if err != nil {
return err
}
cr.log.CDebugf(ctx, "Unref'ing block %v", ptr)
size := uint64(block.GetEncodedSize())
md.UnrefBytes += size
md.DiskUsage -= size
}
// Any blocks that were created on the unmerged branch, but didn't
// survive the resolution, should be marked as unreferenced in the
// resolution.
toUnref := make(map[BlockPointer]bool)
for ptr := range unmergedChains.originals {
if !refs[ptr] && !unrefs[ptr] {
toUnref[ptr] = true
}
}
for ptr := range unmergedChains.createdOriginals {
if !refs[ptr] && !unrefs[ptr] && unmergedChains.byOriginal[ptr] != nil {
toUnref[ptr] = true
} else if _, ok := unmergedChains.blockChangePointers[ptr]; ok {
toUnref[ptr] = true
} else if _, ok := unmergedChains.toUnrefPointers[ptr]; ok {
toUnref[ptr] = true
}
}
for ptr := range toUnref {
// Put the unrefs on the final operations, to cancel out any
// stray refs in earlier ops.
cr.log.CDebugf(ctx, "Unreferencing dropped block %v", ptr)
md.data.Changes.Ops[len(md.data.Changes.Ops)-1].AddUnrefBlock(ptr)
}
cr.log.CDebugf(ctx, "New md byte usage: %d ref, %d unref, %d total usage "+
"(previously %d)", md.RefBytes, md.UnrefBytes, md.DiskUsage,
mergedChains.mostRecentMD.DiskUsage)
return nil
}
// syncBlocks takes in the complete set of paths affected by this
// conflict resolution, and organizes them into a tree, which it then
// syncs using syncTree. It also puts all the resulting blocks to the
// servers. It returns a map describing how blocks were updated in
// the merged branch, as well as the complete set of blocks that were
// put to the server as a result of this resolution (for later
// caching).
func (cr *ConflictResolver) syncBlocks(ctx context.Context, lState *lockState,
md *RootMetadata, unmergedChains *crChains, mergedChains *crChains,
resolvedPaths map[BlockPointer]path, lbc localBcache,
newFileBlocks fileBlockMap) (
map[BlockPointer]BlockPointer, *blockPutState, error) {
// Construct a tree out of the merged paths, and do a sync at each leaf.
var root *crPathTreeNode
for _, p := range resolvedPaths {
cr.log.CDebugf(ctx, "Creating tree from merged path: %v", p.path)
var parent *crPathTreeNode
for i, pnode := range p.path {
var nextNode *crPathTreeNode
if parent != nil {
nextNode = parent.children[pnode.Name]
} else if root != nil {
nextNode = root
}
if nextNode == nil {
cr.log.CDebugf(ctx, "Creating node with pointer %v",
pnode.BlockPointer)
nextNode = &crPathTreeNode{
ptr: pnode.BlockPointer,
parent: parent,
children: make(map[string]*crPathTreeNode),
// save the full path, since we'll only use this
// at the leaves anyway.
mergedPath: p,
}
if parent != nil {
parent.children[pnode.Name] = nextNode
}
}
if parent == nil && root == nil {
root = nextNode
}
parent = nextNode
// If this node is a directory that has files to sync,
// make nodes for them as well. (Because of
// collapseActions, these files won't have their own
// mergedPath.)
blocks, ok := newFileBlocks[pnode.BlockPointer]
if !ok {
continue
}
dblock, ok := lbc[pnode.BlockPointer]
if !ok {
continue
}
for name := range blocks {
if _, ok := nextNode.children[name]; ok {
continue
}
// Try to lookup the block pointer, but this might be
// for a new file.
var filePtr BlockPointer
if de, ok := dblock.Children[name]; ok {
filePtr = de.BlockPointer
}
cr.log.CDebugf(ctx, "Creating child node for name %s for "+
"parent %v", name, pnode.BlockPointer)
childPath := path{
FolderBranch: p.FolderBranch,
path: make([]pathNode, i+2),
}
copy(childPath.path[0:i+1], p.path[0:i+1])
childPath.path[i+1] = pathNode{Name: name}
childNode := &crPathTreeNode{
ptr: filePtr,
parent: nextNode,
children: make(map[string]*crPathTreeNode),
mergedPath: childPath,
}
nextNode.children[name] = childNode
}
}
}
updates := make(map[BlockPointer]BlockPointer)
if root == nil {
return updates, newBlockPutState(0), nil
}
_, uid, err := cr.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, nil, err
}
// Now do a depth-first walk, and syncBlock back up to the fork on
// every branch
bps, err := cr.syncTree(ctx, lState, md, uid, root, BlockPointer{},
lbc, newFileBlocks)
if err != nil {
return nil, nil, err
}
oldOps := md.data.Changes.Ops
resOp, ok := oldOps[len(oldOps)-1].(*resolutionOp)
if !ok {
return nil, nil, fmt.Errorf("dummy op is not gc: %s",
oldOps[len(oldOps)-1])
}
// Create an update map, and fix up the gc ops.
for i, update := range resOp.Updates {
// The unref should represent the most recent merged pointer
// for the block. However, the other ops will be using the
// original pointer as the unref, so use that as the key.
updates[update.Unref] = update.Ref
if chain, ok := mergedChains.byMostRecent[update.Unref]; ok {
updates[chain.original] = update.Ref
}
// Fix the gc updates to make sure they all unref the most
// recent block pointer. In cases where the two users create
// the same directory independently, the update might
// currently unref the unmerged most recent pointer.
if chain, ok := unmergedChains.byMostRecent[update.Unref]; ok {
// In case there was no merged chain above, map the
// original to the ref again.
updates[chain.original] = update.Ref
mergedMostRecent, err :=
mergedChains.mostRecentFromOriginalOrSame(chain.original)
if err != nil {
return nil, nil, err
}
cr.log.CDebugf(ctx, "Fixing resOp update from unmerged most "+
"recent %v to merged most recent %v",
update.Unref, mergedMostRecent)
update.Unref = mergedMostRecent
resOp.Updates[i] = update
updates[update.Unref] = update.Ref
}
}
// For all chains that were created only in the unmerged branch,
// make sure we update all the pointers to their most recent
// version.
for original, chain := range unmergedChains.byOriginal {
if !unmergedChains.isCreated(original) ||
mergedChains.isCreated(original) {
continue
}
if _, ok := updates[chain.original]; !ok {
updates[chain.original] = chain.mostRecent
}
}
// For all chains that were renamed only in the unmerged branch,
// make sure we update all the pointers to their most recent
// version.
for original := range unmergedChains.renamedOriginals {
mergedChain, ok := mergedChains.byOriginal[original]
if !ok {
continue
}
updates[original] = mergedChain.mostRecent
}
// Consolidate any chains of updates
for k, v := range updates {
if v2, ok := updates[v]; ok {
updates[k] = v2
delete(updates, v)
}
}
newOps, err := crFixOpPointers(oldOps[:len(oldOps)-1], updates,
unmergedChains)
if err != nil {
return nil, nil, err
}
// Clean up any gc updates that don't refer to blocks that exist
// in the merged branch.
var newUpdates []blockUpdate
for _, update := range resOp.Updates {
// Ignore it if it doesn't descend from an original block
// pointer or one created in the merged branch.
if _, ok := unmergedChains.originals[update.Unref]; !ok &&
unmergedChains.byOriginal[update.Unref] == nil &&
mergedChains.byMostRecent[update.Unref] == nil {
cr.log.CDebugf(ctx, "Turning update from %v into just a ref for %v",
update.Unref, update.Ref)
resOp.AddRefBlock(update.Ref)
continue
}
newUpdates = append(newUpdates, update)
}
resOp.Updates = newUpdates
newOps[0] = resOp // move the dummy ops to the front
md.data.Changes.Ops = newOps
// TODO: only perform this loop if debugging is enabled.
for _, op := range newOps {
cr.log.CDebugf(ctx, "remote op %s: refs: %v", op, op.Refs())
cr.log.CDebugf(ctx, "remote op %s: unrefs: %v", op, op.Unrefs())
for _, update := range op.AllUpdates() {
cr.log.CDebugf(ctx, "remote op %s: update: %v -> %v", op,
update.Unref, update.Ref)
}
}
err = cr.calculateResolutionUsage(ctx, lState, md, bps, unmergedChains,
mergedChains)
if err != nil {
return nil, nil, err
}
// do the block changes need their own blocks?
bsplit := cr.config.BlockSplitter()
if !bsplit.ShouldEmbedBlockChanges(&md.data.Changes) {
err = cr.fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, uid)
if err != nil {
return nil, nil, err
}
}
// Put all the blocks. TODO: deal with recoverable block errors?
_, err = cr.fbo.doBlockPuts(ctx, md, *bps)
if err != nil {
return nil, nil, err
}
return updates, bps, nil
}
// getOpsForLocalNotification returns the set of operations that this
// node will need to send local notifications for, in order to
// transition from the staged state to the merged state.
func (cr *ConflictResolver) getOpsForLocalNotification(ctx context.Context,
lState *lockState, md *RootMetadata, unmergedChains *crChains,
mergedChains *crChains, updates map[BlockPointer]BlockPointer) (
[]op, error) {
dummyOp := newResolutionOp()
newPtrs := make(map[BlockPointer]bool)
for original, newMostRecent := range updates {
chain, ok := unmergedChains.byOriginal[original]
if ok {
// If this unmerged node was updated in the resolution,
// track that update here.
dummyOp.AddUpdate(chain.mostRecent, newMostRecent)
} else {
dummyOp.AddUpdate(original, newMostRecent)
}
newPtrs[newMostRecent] = true
}
var ptrs []BlockPointer
chainsToUpdate := make(map[BlockPointer]BlockPointer)
chainsToAdd := make(map[BlockPointer]*crChain)
for ptr, chain := range mergedChains.byMostRecent {
if newMostRecent, ok := updates[chain.original]; ok {
ptrs = append(ptrs, newMostRecent)
chainsToUpdate[chain.mostRecent] = newMostRecent
// This update was already handled above.
continue
}
// If the node changed in both branches, but NOT in the
// resolution, make sure the local notification uses the
// unmerged most recent pointer as the unref.
original := chain.original
if c, ok := unmergedChains.byOriginal[chain.original]; ok {
original = c.mostRecent
updates[chain.original] = chain.mostRecent
// If the node pointer didn't change in the merged chain
// (e.g., due to a setattr), fast forward its most-recent
// pointer to be the unmerged most recent pointer, so that
// local notifications work correctly.
if chain.original == chain.mostRecent {
ptrs = append(ptrs, c.mostRecent)
chainsToAdd[c.mostRecent] = chain
delete(mergedChains.byMostRecent, chain.mostRecent)
chain.mostRecent = c.mostRecent
}
}
newPtrs[ptr] = true
dummyOp.AddUpdate(original, chain.mostRecent)
updates[original] = chain.mostRecent
ptrs = append(ptrs, chain.mostRecent)
}
for ptr, chain := range chainsToAdd {
mergedChains.byMostRecent[ptr] = chain
}
// If any nodes changed only in the unmerged branch, make sure we
// update the pointers in the local ops (e.g., renameOp.Renamed)
// to the latest local most recent.
for original, chain := range unmergedChains.byOriginal {
if _, ok := updates[original]; !ok {
updates[original] = chain.mostRecent
}
}
// Update the merged chains so they all have the new most recent
// pointer.
for mostRecent, newMostRecent := range chainsToUpdate {
chain, ok := mergedChains.byMostRecent[mostRecent]
if !ok {
continue
}
delete(mergedChains.byMostRecent, mostRecent)
chain.mostRecent = newMostRecent
mergedChains.byMostRecent[newMostRecent] = chain
}
// We need to get the complete set of updated merged paths, so
// that we can correctly order the chains from the root outward.
mergedNodeCache := newNodeCacheStandard(cr.fbo.folderBranch)
// Initialize the root node.
mergedNodeCache.GetOrCreate(md.data.Dir.BlockPointer,
string(md.GetTlfHandle().GetCanonicalName()), nil)
nodeMap, err := cr.fbo.blocks.SearchForNodes(
ctx, mergedNodeCache, ptrs, newPtrs, md)
if err != nil {
return nil, err
}
mergedPaths := make([]path, 0, len(nodeMap))
for _, node := range nodeMap {
if node == nil {
continue
}
mergedPaths = append(mergedPaths, mergedNodeCache.PathFromNode(node))
}
sort.Sort(crSortedPaths(mergedPaths))
ops, err := cr.makeRevertedOps(
ctx, lState, mergedPaths, mergedChains, unmergedChains)
if err != nil {
return nil, err
}
newOps, err := crFixOpPointers(ops, updates, mergedChains)
if err != nil {
return nil, err
}
newOps[0] = dummyOp
return newOps, err
}
// finalizeResolution finishes the resolution process, making the
// resolution visible to any nodes on the merged branch, and taking
// the local node out of staged mode.
func (cr *ConflictResolver) finalizeResolution(ctx context.Context,
lState *lockState, md *RootMetadata, unmergedChains *crChains,
mergedChains *crChains, updates map[BlockPointer]BlockPointer,
bps *blockPutState) error {
// Fix up all the block pointers in the merged ops to work well
// for local notifications. Make a dummy op at the beginning to
// convert all the merged most recent pointers into unmerged most
// recent pointers.
newOps, err := cr.getOpsForLocalNotification(
ctx, lState, md, unmergedChains,
mergedChains, updates)
if err != nil {
return err
}
cr.log.CDebugf(ctx, "Local notifications: %v", newOps)
return cr.fbo.finalizeResolution(ctx, lState, md, bps, newOps)
}
// completeResolution pushes all the resolved blocks to the servers,
// computes all remote and local notifications, and finalizes the
// resolution process.
func (cr *ConflictResolver) completeResolution(ctx context.Context,
lState *lockState, unmergedChains *crChains, mergedChains *crChains,
unmergedPaths []path, mergedPaths map[BlockPointer]path, lbc localBcache,
newFileBlocks fileBlockMap, unmergedMDs []*RootMetadata) error {
md, err := cr.createResolvedMD(ctx, lState, unmergedPaths, unmergedChains,
mergedChains)
if err != nil {
return err
}
resolvedPaths, err := cr.makePostResolutionPaths(ctx, md, unmergedChains,
mergedChains, mergedPaths)
if err != nil {
return err
}
updates, bps, err := cr.syncBlocks(
ctx, lState, md, unmergedChains, mergedChains,
resolvedPaths, lbc, newFileBlocks)
if err != nil {
return err
}
err = cr.finalizeResolution(ctx, lState, md, unmergedChains,
mergedChains, updates, bps)
if err != nil {
return err
}
return nil
}
// maybeUnstageAfterFailure abandons this branch if there was a
// conflict resolution failure due to missing blocks, caused by a
// concurrent gcOp on the main branch.
func (cr *ConflictResolver) maybeUnstageAfterFailure(ctx context.Context,
lState *lockState, mergedMDs []*RootMetadata, err error) error {
// Make sure the error is related to a missing block.
_, isBlockNotFound := err.(BServerErrorBlockNonExistent)
_, isBlockDeleted := err.(BServerErrorBlockDeleted)
if !isBlockNotFound && !isBlockDeleted {
return err
}
// Make sure there was a gcOp on the main branch.
foundGCOp := false
outer:
for _, rmd := range mergedMDs {
for _, op := range rmd.data.Changes.Ops {
if _, ok := op.(*gcOp); ok {
foundGCOp = true
break outer
}
}
}
if !foundGCOp {
return err
}
cr.log.CDebugf(ctx, "Unstaging due to a failed resolution: %v", err)
reportedError := CRAbandonStagedBranchError{err, cr.fbo.bid}
unstageErr := cr.fbo.unstageAfterFailedResolution(ctx, lState)
if unstageErr != nil {
cr.log.CDebugf(ctx, "Couldn't unstage: %v", unstageErr)
return err
}
handle := cr.fbo.getHead(lState).GetTlfHandle()
cr.config.Reporter().ReportErr(ctx,
handle.GetCanonicalName(), handle.IsPublic(),
WriteMode, reportedError)
return nil
}
// CRWrapError wraps an error that happens during conflict resolution.
type CRWrapError struct {
err error
}
// Error implements the error interface for CRWrapError.
func (e CRWrapError) Error() string {
return "Conflict resolution error: " + e.err.Error()
}
func (cr *ConflictResolver) doResolve(ctx context.Context, ci conflictInput) {
cr.log.CDebugf(ctx, "Starting conflict resolution with input %v", ci)
var err error
lState := makeFBOLockState()
defer func() {
cr.log.CDebugf(ctx, "Finished conflict resolution: %v", err)
if err != nil {
handle := cr.fbo.getHead(lState).GetTlfHandle()
cr.config.Reporter().ReportErr(ctx,
handle.GetCanonicalName(), handle.IsPublic(),
WriteMode, CRWrapError{err})
}
}()
// Canceled before we even got started?
err = cr.checkDone(ctx)
if err != nil {
return
}
var mergedMDs []*RootMetadata
defer func() {
if err != nil {
err = cr.maybeUnstageAfterFailure(ctx, lState, mergedMDs, err)
}
}()
// Step 1: Build the chains for each branch, as well as the paths
// and necessary extra recreate ops. The result of this step is:
// * A set of conflict resolution "chains" for both the unmerged and
// merged branches
// * A map containing, for each changed unmerged node, the full path to
// the corresponding merged node.
// * A set of "recreate" ops that must be applied on the merged branch
// to recreate any directories that were modified in the unmerged
// branch but removed in the merged branch.
unmergedChains, mergedChains, unmergedPaths, mergedPaths, recOps,
unmergedMDs, mergedMDs, err := cr.buildChainsAndPaths(ctx, lState)
if err != nil {
return
}
if len(mergedPaths) == 0 {
// nothing to do
cr.log.CDebugf(ctx, "No updates to resolve, so finishing")
lbc := make(localBcache)
newFileBlocks := make(fileBlockMap)
err = cr.completeResolution(ctx, lState, unmergedChains, mergedChains,
unmergedPaths, mergedPaths, lbc, newFileBlocks, unmergedMDs)
return
}
err = cr.checkDone(ctx)
if err != nil {
return
}
if status, _, err := cr.fbo.status.getStatus(ctx); err == nil {
if statusString, err := json.Marshal(status); err == nil {
ci := func() conflictInput {
cr.inputLock.Lock()
defer cr.inputLock.Unlock()
return cr.currInput
}()
cr.log.CInfof(ctx, "Current status during conflict resolution "+
"(input %v): %s", ci, statusString)
}
}
cr.log.CDebugf(ctx, "Recreate ops: %s", recOps)
// Step 2: Figure out which actions need to be taken in the merged
// branch to best reflect the unmerged changes. The result of
// this step is a map containing, for each node in the merged path
// that will be updated during conflict resolution, a set of
// "actions" to be applied to the merged branch. Each of these
// actions contains the logic needed to manipulate the data into
// the final merged state, including the resolution of any
// conflicts that occurred between the two branches.
actionMap, newUnmergedPaths, err := cr.computeActions(ctx, unmergedChains,
mergedChains, mergedPaths, recOps)
if err != nil {
return
}
// Insert the new unmerged paths as needed
if len(newUnmergedPaths) > 0 {
unmergedPaths = append(unmergedPaths, newUnmergedPaths...)
sort.Sort(crSortedPaths(unmergedPaths))
}
err = cr.checkDone(ctx)
if err != nil {
return
}
cr.log.CDebugf(ctx, "Action map: %v", actionMap)
// Step 3: Apply the actions by looking up the corresponding
// unmerged dir entry and copying it to a copy of the
// corresponding merged block. Keep these dirty block copies in a
// local dirty cache, keyed by corresponding merged most recent
// pointer.
//
// At the same time, construct two sets of ops: one that will be
// put into the final MD object that gets merged, and one that
// needs to be played through as notifications locally to get any
// local caches synchronized with the final merged state.
//
// * This will be taken care of by each crAction.updateOps()
// method, which modifies the unmerged and merged ops for a
// particular chain. After all the crActions are applied, the
// "unmerged" ops need to be pushed as part of the MD update,
// while the "merged" ops need to be applied locally.
// lbc contains the modified directory blocks we need to sync
lbc := make(localBcache)
// newFileBlocks contains the copies of the file blocks we need to
// sync. If a block is indirect, we need to put it and add new
// references for all indirect pointers inside it. If it is not
// an indirect block, just add a new reference to the block.
newFileBlocks := make(fileBlockMap)
err = cr.doActions(ctx, lState, unmergedChains, mergedChains,
unmergedPaths, mergedPaths, actionMap, lbc, newFileBlocks)
if err != nil {
return
}
err = cr.checkDone(ctx)
if err != nil {
return
}
cr.log.CDebugf(ctx, "Executed all actions, %d updated directory blocks",
len(lbc))
// Step 4: finish up by syncing all the blocks, computing and
// putting the final resolved MD, and issuing all the local
// notifications.
err = cr.completeResolution(ctx, lState, unmergedChains, mergedChains,
unmergedPaths, mergedPaths, lbc, newFileBlocks, unmergedMDs)
if err != nil {
return
}
// TODO: If conflict resolution fails after some blocks were put,
// remember these and include them in the later resolution so they
// don't count against the quota forever. (Though of course if we
// completely fail, we'll need to rely on a future complete scan
// to clean up the quota anyway . . .)
}
| 1 | 11,280 | Why not make `SearchForNodes` do a `GetOrCreate` for the root node? (Not advocating for it, but just wondering if there's another reason than avoiding having to pass in the path.) | keybase-kbfs | go |
@@ -224,6 +224,7 @@ public class DefaultP2PNetwork implements P2PNetwork {
}
dnsPeers.set(peers);
});
+ dnsDaemon.start();
}
final int listeningPort = rlpxAgent.start().join(); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.network;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import org.hyperledger.besu.crypto.NodeKey;
import org.hyperledger.besu.ethereum.core.Util;
import org.hyperledger.besu.ethereum.p2p.config.NetworkingConfiguration;
import org.hyperledger.besu.ethereum.p2p.discovery.DiscoveryPeer;
import org.hyperledger.besu.ethereum.p2p.discovery.PeerDiscoveryAgent;
import org.hyperledger.besu.ethereum.p2p.discovery.PeerDiscoveryEvent.PeerBondedEvent;
import org.hyperledger.besu.ethereum.p2p.discovery.PeerDiscoveryStatus;
import org.hyperledger.besu.ethereum.p2p.discovery.VertxPeerDiscoveryAgent;
import org.hyperledger.besu.ethereum.p2p.peers.DefaultPeerPrivileges;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURLImpl;
import org.hyperledger.besu.ethereum.p2p.peers.LocalNode;
import org.hyperledger.besu.ethereum.p2p.peers.MaintainedPeers;
import org.hyperledger.besu.ethereum.p2p.peers.MutableLocalNode;
import org.hyperledger.besu.ethereum.p2p.peers.Peer;
import org.hyperledger.besu.ethereum.p2p.peers.PeerPrivileges;
import org.hyperledger.besu.ethereum.p2p.permissions.PeerPermissions;
import org.hyperledger.besu.ethereum.p2p.permissions.PeerPermissionsDenylist;
import org.hyperledger.besu.ethereum.p2p.rlpx.ConnectCallback;
import org.hyperledger.besu.ethereum.p2p.rlpx.DisconnectCallback;
import org.hyperledger.besu.ethereum.p2p.rlpx.MessageCallback;
import org.hyperledger.besu.ethereum.p2p.rlpx.RlpxAgent;
import org.hyperledger.besu.ethereum.p2p.rlpx.connections.PeerConnection;
import org.hyperledger.besu.ethereum.p2p.rlpx.connections.netty.TLSConfiguration;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.Capability;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.messages.DisconnectMessage.DisconnectReason;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.nat.NatMethod;
import org.hyperledger.besu.nat.NatService;
import org.hyperledger.besu.nat.core.domain.NatServiceType;
import org.hyperledger.besu.nat.core.domain.NetworkProtocol;
import org.hyperledger.besu.nat.upnp.UpnpNatManager;
import org.hyperledger.besu.plugin.data.EnodeURL;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.google.common.annotations.VisibleForTesting;
import io.vertx.core.Vertx;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.devp2p.EthereumNodeRecord;
import org.apache.tuweni.discovery.DNSDaemon;
/**
* The peer network service (defunct PeerNetworkingService) is the entrypoint to the peer-to-peer
* components of the Ethereum client. It implements the devp2p framework from the Ethereum
* specifications.
*
* <p>This component manages the peer discovery layer, the RLPx wire protocol and the subprotocols
* supported by this client.
*
* <h2>Peer Discovery</h2>
*
* Ethereum nodes discover one another via a simple UDP protocol that follows some of the techniques
* described in the Kademlia DHT paper. Particularly nodes are classified in a k-bucket table
* composed of 256 buckets, where each bucket contains at most 16 peers whose <i>XOR(SHA3(x))</i>
* distance from us is equal to the index of the bucket. The value <i>x</i> in the distance function
* corresponds to our node ID (public key).
*
* <p>Upper layers in the stack subscribe to events from the peer discovery layer and initiate/drop
* connections accordingly.
*
* <h2>RLPx Wire Protocol</h2>
*
* The RLPx wire protocol is responsible for selecting peers to engage with, authenticating and
* encrypting communications with peers, multiplexing subprotocols, framing messages, controlling
* legality of messages, keeping connections alive, and keeping track of peer reputation.
*
* <h2>Subprotocols</h2>
*
* Subprotocols are pluggable elements on top of the RLPx framework, which can handle a specific set
* of messages. Each subprotocol has a 3-char ASCII denominator and a version number, and statically
* defines a count of messages it can handle.
*
* <p>The RLPx wire protocol dispatches messages to subprotocols based on the capabilities agreed by
* each of the two peers during the protocol handshake.
*
* @see <a href="https://pdos.csail.mit.edu/~petar/papers/maymounkov-kademlia-lncs.pdf">Kademlia DHT
* paper</a>
* @see <a href="https://github.com/ethereum/wiki/wiki/Kademlia-Peer-Selection">Kademlia Peer
* Selection</a>
* @see <a href="https://github.com/ethereum/devp2p/blob/master/rlpx.md">devp2p RLPx</a>
*/
public class DefaultP2PNetwork implements P2PNetwork {
private static final Logger LOG = LogManager.getLogger();
private final ScheduledExecutorService peerConnectionScheduler =
Executors.newSingleThreadScheduledExecutor();
private final PeerDiscoveryAgent peerDiscoveryAgent;
private final RlpxAgent rlpxAgent;
private final NetworkingConfiguration config;
private final Bytes nodeId;
private final MutableLocalNode localNode;
private final PeerPermissions peerPermissions;
private final MaintainedPeers maintainedPeers;
private final NatService natService;
private OptionalLong peerBondedObserverId = OptionalLong.empty();
private final AtomicBoolean started = new AtomicBoolean(false);
private final AtomicBoolean stopped = new AtomicBoolean(false);
private final CountDownLatch shutdownLatch = new CountDownLatch(2);
private final Duration shutdownTimeout = Duration.ofMinutes(1);
private final AtomicReference<List<DiscoveryPeer>> dnsPeers = new AtomicReference<>();
private DNSDaemon dnsDaemon;
/**
* Creates a peer networking service for production purposes.
*
* <p>The caller is expected to provide the IP address to be advertised (normally this node's
* public IP address), as well as TCP and UDP port numbers for the RLPx agent and the discovery
* agent, respectively.
*
* @param localNode A representation of the local node
* @param peerDiscoveryAgent The agent responsible for discovering peers on the network.
* @param nodeKey The node key through which cryptographic operations can be performed
* @param config The network configuration to use.
* @param peerPermissions An object that determines whether peers are allowed to connect
* @param natService The NAT environment manager.
* @param maintainedPeers A collection of peers for which we are expected to maintain connections
* @param reputationManager An object that inspect disconnections for misbehaving peers that can
* then be blacklisted.
*/
DefaultP2PNetwork(
final MutableLocalNode localNode,
final PeerDiscoveryAgent peerDiscoveryAgent,
final RlpxAgent rlpxAgent,
final NodeKey nodeKey,
final NetworkingConfiguration config,
final PeerPermissions peerPermissions,
final NatService natService,
final MaintainedPeers maintainedPeers,
final PeerReputationManager reputationManager) {
this.localNode = localNode;
this.peerDiscoveryAgent = peerDiscoveryAgent;
this.rlpxAgent = rlpxAgent;
this.config = config;
this.natService = natService;
this.maintainedPeers = maintainedPeers;
this.nodeId = nodeKey.getPublicKey().getEncodedBytes();
this.peerPermissions = peerPermissions;
final int maxPeers = config.getRlpx().getMaxPeers();
peerDiscoveryAgent.addPeerRequirement(() -> rlpxAgent.getConnectionCount() >= maxPeers);
subscribeDisconnect(reputationManager);
}
public static Builder builder() {
return new Builder();
}
@Override
public void start() {
if (!started.compareAndSet(false, true)) {
LOG.warn("Attempted to start an already started " + getClass().getSimpleName());
return;
}
final String address = config.getDiscovery().getAdvertisedHost();
final int configuredDiscoveryPort = config.getDiscovery().getBindPort();
final int configuredRlpxPort = config.getRlpx().getBindPort();
if (config.getDiscovery().getDNSDiscoveryURL() != null) {
LOG.info("Starting DNS discovery with URL {}", config.getDiscovery().getDNSDiscoveryURL());
dnsDaemon =
new DNSDaemon(
config.getDiscovery().getDNSDiscoveryURL(),
(seq, records) -> {
List<DiscoveryPeer> peers = new ArrayList<>();
for (EthereumNodeRecord enr : records) {
EnodeURL enodeURL =
EnodeURLImpl.builder()
.ipAddress(enr.ip())
.nodeId(enr.publicKey().bytes())
.discoveryPort(Optional.ofNullable(enr.udp()))
.listeningPort(Optional.ofNullable(enr.tcp()))
.build();
DiscoveryPeer peer = DiscoveryPeer.fromEnode(enodeURL);
peers.add(peer);
rlpxAgent.connect(peer);
}
dnsPeers.set(peers);
});
}
final int listeningPort = rlpxAgent.start().join();
final int discoveryPort =
peerDiscoveryAgent
.start(
(configuredDiscoveryPort == 0 && configuredRlpxPort == 0)
? listeningPort
: configuredDiscoveryPort)
.join();
natService.ifNatEnvironment(
NatMethod.UPNP,
natManager -> {
UpnpNatManager upnpNatManager = (UpnpNatManager) natManager;
upnpNatManager.requestPortForward(
discoveryPort, NetworkProtocol.UDP, NatServiceType.DISCOVERY);
upnpNatManager.requestPortForward(
listeningPort, NetworkProtocol.TCP, NatServiceType.RLPX);
});
setLocalNode(address, listeningPort, discoveryPort);
peerBondedObserverId =
OptionalLong.of(peerDiscoveryAgent.observePeerBondedEvents(this::handlePeerBondedEvent));
// Periodically check maintained connections
final int checkMaintainedConnectionsSec = config.getCheckMaintainedConnectionsFrequencySec();
peerConnectionScheduler.scheduleWithFixedDelay(
this::checkMaintainedConnectionPeers, 2, checkMaintainedConnectionsSec, TimeUnit.SECONDS);
// Periodically initiate outgoing connections to discovered peers
final int checkConnectionsSec = config.getInitiateConnectionsFrequencySec();
peerConnectionScheduler.scheduleWithFixedDelay(
this::attemptPeerConnections, checkConnectionsSec, checkConnectionsSec, TimeUnit.SECONDS);
}
@Override
public void stop() {
if (!this.started.get() || !stopped.compareAndSet(false, true)) {
// We haven't started, or we've started and stopped already
return;
}
if (dnsDaemon != null) {
dnsDaemon.close();
}
peerConnectionScheduler.shutdownNow();
peerDiscoveryAgent.stop().whenComplete((res, err) -> shutdownLatch.countDown());
rlpxAgent.stop().whenComplete((res, err) -> shutdownLatch.countDown());
peerBondedObserverId.ifPresent(peerDiscoveryAgent::removePeerBondedObserver);
peerBondedObserverId = OptionalLong.empty();
peerPermissions.close();
}
@Override
public void awaitStop() {
try {
if (!peerConnectionScheduler.awaitTermination(
shutdownTimeout.getSeconds(), TimeUnit.SECONDS)) {
LOG.error(
"{} did not shutdown cleanly: peerConnectionScheduler executor did not fully terminate.",
this.getClass().getSimpleName());
}
if (!shutdownLatch.await(shutdownTimeout.getSeconds(), TimeUnit.SECONDS)) {
LOG.error(
"{} did not shutdown cleanly: some internal services failed to fully terminate.",
this.getClass().getSimpleName());
}
} catch (final InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
@Override
public boolean addMaintainConnectionPeer(final Peer peer) {
if (localNode.isReady()
&& localNode.getPeer() != null
&& localNode.getPeer().getEnodeURL() != null
&& peer.getEnodeURL().getNodeId().equals(localNode.getPeer().getEnodeURL().getNodeId())) {
return false;
}
final boolean wasAdded = maintainedPeers.add(peer);
peerDiscoveryAgent.bond(peer);
rlpxAgent.connect(peer);
return wasAdded;
}
@Override
public boolean removeMaintainedConnectionPeer(final Peer peer) {
final boolean wasRemoved = maintainedPeers.remove(peer);
peerDiscoveryAgent.dropPeer(peer);
LOG.debug("Disconnect requested for peer {}.", peer);
rlpxAgent.disconnect(peer.getId(), DisconnectReason.REQUESTED);
return wasRemoved;
}
@VisibleForTesting
void checkMaintainedConnectionPeers() {
if (!localNode.isReady()) {
return;
}
maintainedPeers
.streamPeers()
.filter(p -> !rlpxAgent.getPeerConnection(p).isPresent())
.forEach(rlpxAgent::connect);
}
@VisibleForTesting
void attemptPeerConnections() {
LOG.trace("Initiating connections to discovered peers.");
rlpxAgent.connect(
streamDiscoveredPeers()
.filter(peer -> peer.getStatus() == PeerDiscoveryStatus.BONDED)
.sorted(Comparator.comparing(DiscoveryPeer::getLastAttemptedConnection)));
}
@Override
public Collection<PeerConnection> getPeers() {
return rlpxAgent.streamConnections().collect(Collectors.toList());
}
@Override
public Stream<DiscoveryPeer> streamDiscoveredPeers() {
List<DiscoveryPeer> peers = dnsPeers.get();
if (peers != null) {
return Stream.concat(peerDiscoveryAgent.streamDiscoveredPeers(), peers.stream());
}
return peerDiscoveryAgent.streamDiscoveredPeers();
}
@Override
public CompletableFuture<PeerConnection> connect(final Peer peer) {
return rlpxAgent.connect(peer);
}
@Override
public void subscribe(final Capability capability, final MessageCallback callback) {
rlpxAgent.subscribeMessage(capability, callback);
}
@Override
public void subscribeConnect(final ConnectCallback callback) {
rlpxAgent.subscribeConnect(callback);
}
@Override
public void subscribeDisconnect(final DisconnectCallback callback) {
rlpxAgent.subscribeDisconnect(callback);
}
private void handlePeerBondedEvent(final PeerBondedEvent peerBondedEvent) {
rlpxAgent.connect(peerBondedEvent.getPeer());
}
@Override
public void close() {
stop();
}
@Override
public boolean isListening() {
return localNode.isReady();
}
@Override
public boolean isP2pEnabled() {
return true;
}
@Override
public boolean isDiscoveryEnabled() {
return peerDiscoveryAgent.isActive();
}
@Override
public Optional<EnodeURL> getLocalEnode() {
if (!localNode.isReady()) {
return Optional.empty();
}
return Optional.of(localNode.getPeer().getEnodeURL());
}
private void setLocalNode(
final String address, final int listeningPort, final int discoveryPort) {
if (localNode.isReady()) {
// Already set
return;
}
// override advertised host if we detect an external IP address via NAT manager
final String advertisedAddress = natService.queryExternalIPAddress(address);
final EnodeURL localEnode =
EnodeURLImpl.builder()
.nodeId(nodeId)
.ipAddress(advertisedAddress)
.listeningPort(listeningPort)
.discoveryPort(discoveryPort)
.build();
LOG.info("Enode URL {}", localEnode.toString());
LOG.info("Node address {}", Util.publicKeyToAddress(localEnode.getNodeId()));
localNode.setEnode(localEnode);
}
@Override
public void updateNodeRecord() {
peerDiscoveryAgent.updateNodeRecord();
}
public static class Builder {
private Vertx vertx;
private PeerDiscoveryAgent peerDiscoveryAgent;
private RlpxAgent rlpxAgent;
private NetworkingConfiguration config = NetworkingConfiguration.create();
private List<Capability> supportedCapabilities;
private NodeKey nodeKey;
private MaintainedPeers maintainedPeers = new MaintainedPeers();
private PeerPermissions peerPermissions = PeerPermissions.noop();
private NatService natService = new NatService(Optional.empty());
private boolean randomPeerPriority;
private MetricsSystem metricsSystem;
private StorageProvider storageProvider;
private Supplier<List<Bytes>> forkIdSupplier;
private Optional<TLSConfiguration> p2pTLSConfiguration = Optional.empty();
public P2PNetwork build() {
validate();
return doBuild();
}
private P2PNetwork doBuild() {
// Set up permissions
// Fold peer reputation into permissions
final PeerPermissionsDenylist misbehavingPeers = PeerPermissionsDenylist.create(500);
final PeerReputationManager reputationManager = new PeerReputationManager(misbehavingPeers);
peerPermissions = PeerPermissions.combine(peerPermissions, misbehavingPeers);
final MutableLocalNode localNode =
MutableLocalNode.create(config.getRlpx().getClientId(), 5, supportedCapabilities);
final PeerPrivileges peerPrivileges = new DefaultPeerPrivileges(maintainedPeers);
peerDiscoveryAgent = peerDiscoveryAgent == null ? createDiscoveryAgent() : peerDiscoveryAgent;
rlpxAgent = rlpxAgent == null ? createRlpxAgent(localNode, peerPrivileges) : rlpxAgent;
return new DefaultP2PNetwork(
localNode,
peerDiscoveryAgent,
rlpxAgent,
nodeKey,
config,
peerPermissions,
natService,
maintainedPeers,
reputationManager);
}
private void validate() {
checkState(nodeKey != null, "NodeKey must be set.");
checkState(config != null, "NetworkingConfiguration must be set.");
checkState(
supportedCapabilities != null && supportedCapabilities.size() > 0,
"Supported capabilities must be set and non-empty.");
checkState(metricsSystem != null, "MetricsSystem must be set.");
checkState(storageProvider != null, "StorageProvider must be set.");
checkState(peerDiscoveryAgent != null || vertx != null, "Vertx must be set.");
checkState(forkIdSupplier != null, "ForkIdSupplier must be set.");
}
private PeerDiscoveryAgent createDiscoveryAgent() {
return new VertxPeerDiscoveryAgent(
vertx,
nodeKey,
config.getDiscovery(),
peerPermissions,
natService,
metricsSystem,
storageProvider,
forkIdSupplier);
}
private RlpxAgent createRlpxAgent(
final LocalNode localNode, final PeerPrivileges peerPrivileges) {
return RlpxAgent.builder()
.nodeKey(nodeKey)
.config(config.getRlpx())
.peerPermissions(peerPermissions)
.peerPrivileges(peerPrivileges)
.localNode(localNode)
.metricsSystem(metricsSystem)
.randomPeerPriority(randomPeerPriority)
.p2pTLSConfiguration(p2pTLSConfiguration)
.build();
}
public Builder peerDiscoveryAgent(final PeerDiscoveryAgent peerDiscoveryAgent) {
checkNotNull(peerDiscoveryAgent);
this.peerDiscoveryAgent = peerDiscoveryAgent;
return this;
}
public Builder rlpxAgent(final RlpxAgent rlpxAgent) {
checkNotNull(rlpxAgent);
this.rlpxAgent = rlpxAgent;
return this;
}
public Builder randomPeerPriority(final boolean randomPeerPriority) {
this.randomPeerPriority = randomPeerPriority;
return this;
}
public Builder vertx(final Vertx vertx) {
checkNotNull(vertx);
this.vertx = vertx;
return this;
}
public Builder nodeKey(final NodeKey nodeKey) {
checkNotNull(nodeKey);
this.nodeKey = nodeKey;
return this;
}
public Builder config(final NetworkingConfiguration config) {
checkNotNull(config);
this.config = config;
return this;
}
public Builder supportedCapabilities(final List<Capability> supportedCapabilities) {
checkNotNull(supportedCapabilities);
this.supportedCapabilities = supportedCapabilities;
return this;
}
public Builder supportedCapabilities(final Capability... supportedCapabilities) {
this.supportedCapabilities = Arrays.asList(supportedCapabilities);
return this;
}
public Builder peerPermissions(final PeerPermissions peerPermissions) {
checkNotNull(peerPermissions);
this.peerPermissions = peerPermissions;
return this;
}
public Builder metricsSystem(final MetricsSystem metricsSystem) {
checkNotNull(metricsSystem);
this.metricsSystem = metricsSystem;
return this;
}
public Builder natService(final NatService natService) {
checkNotNull(natService);
this.natService = natService;
return this;
}
public Builder maintainedPeers(final MaintainedPeers maintainedPeers) {
checkNotNull(maintainedPeers);
this.maintainedPeers = maintainedPeers;
return this;
}
public Builder storageProvider(final StorageProvider storageProvider) {
checkNotNull(storageProvider);
this.storageProvider = storageProvider;
return this;
}
public Builder forkIdSupplier(final Supplier<List<Bytes>> forkIdSupplier) {
checkNotNull(forkIdSupplier);
this.forkIdSupplier = forkIdSupplier;
return this;
}
public Builder p2pTLSConfiguration(final Optional<TLSConfiguration> p2pTLSConfiguration) {
checkNotNull(p2pTLSConfiguration);
this.p2pTLSConfiguration = p2pTLSConfiguration;
return this;
}
}
}
| 1 | 26,617 | :+1: amazing that this has been missing since it was originally implemented | hyperledger-besu | java |
@@ -101,7 +101,7 @@ class OrcFileAppender<D> implements FileAppender<D> {
public long length() {
Preconditions.checkState(isClosed,
"Cannot return length while appending to an open file.");
- return writer.getRawDataSize();
+ return file.toInputFile().getLength();
}
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.orc;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.BiFunction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.MetricsConfig;
import org.apache.iceberg.Schema;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.hadoop.HadoopOutputFile;
import org.apache.iceberg.io.FileAppender;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.StripeInformation;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
/**
* Create a file appender for ORC.
*/
class OrcFileAppender<D> implements FileAppender<D> {
private final int batchSize;
private final OutputFile file;
private final Writer writer;
private final VectorizedRowBatch batch;
private final OrcRowWriter<D> valueWriter;
private boolean isClosed = false;
private final Configuration conf;
private final MetricsConfig metricsConfig;
OrcFileAppender(Schema schema, OutputFile file,
BiFunction<Schema, TypeDescription, OrcRowWriter<?>> createWriterFunc,
Configuration conf, Map<String, byte[]> metadata,
int batchSize, MetricsConfig metricsConfig) {
this.conf = conf;
this.file = file;
this.batchSize = batchSize;
this.metricsConfig = metricsConfig;
TypeDescription orcSchema = ORCSchemaUtil.convert(schema);
this.batch = orcSchema.createRowBatch(this.batchSize);
OrcFile.WriterOptions options = OrcFile.writerOptions(conf).useUTCTimestamp(true);
if (file instanceof HadoopOutputFile) {
options.fileSystem(((HadoopOutputFile) file).getFileSystem());
}
options.setSchema(orcSchema);
this.writer = newOrcWriter(file, options, metadata);
this.valueWriter = newOrcRowWriter(schema, orcSchema, createWriterFunc);
}
@Override
public void add(D datum) {
try {
valueWriter.write(datum, batch);
if (batch.size == this.batchSize) {
writer.addRowBatch(batch);
batch.reset();
}
} catch (IOException ioe) {
throw new RuntimeIOException(ioe, "Problem writing to ORC file %s", file.location());
}
}
@Override
public Metrics metrics() {
Preconditions.checkState(isClosed,
"Cannot return metrics while appending to an open file.");
return OrcMetrics.fromWriter(writer, metricsConfig);
}
@Override
public long length() {
Preconditions.checkState(isClosed,
"Cannot return length while appending to an open file.");
return writer.getRawDataSize();
}
@Override
public List<Long> splitOffsets() {
Preconditions.checkState(isClosed, "File is not yet closed");
try (Reader reader = ORC.newFileReader(file.toInputFile(), conf)) {
List<StripeInformation> stripes = reader.getStripes();
return Collections.unmodifiableList(Lists.transform(stripes, StripeInformation::getOffset));
} catch (IOException e) {
throw new RuntimeIOException(e, "Can't close ORC reader %s", file.location());
}
}
@Override
public void close() throws IOException {
if (!isClosed) {
try {
if (batch.size > 0) {
writer.addRowBatch(batch);
batch.reset();
}
} finally {
writer.close();
this.isClosed = true;
}
}
}
private static Writer newOrcWriter(OutputFile file,
OrcFile.WriterOptions options, Map<String, byte[]> metadata) {
final Path locPath = new Path(file.location());
final Writer writer;
try {
writer = OrcFile.createWriter(locPath, options);
} catch (IOException ioe) {
throw new RuntimeIOException(ioe, "Can't create file %s", locPath);
}
metadata.forEach((key, value) -> writer.addUserMetadata(key, ByteBuffer.wrap(value)));
return writer;
}
@SuppressWarnings("unchecked")
private static <D> OrcRowWriter<D> newOrcRowWriter(Schema schema,
TypeDescription orcSchema,
BiFunction<Schema, TypeDescription, OrcRowWriter<?>>
createWriterFunc) {
return (OrcRowWriter<D>) createWriterFunc.apply(schema, orcSchema);
}
}
| 1 | 27,255 | Just want to note here that although we do check `length` while writing to choose whether to close and start a new file, that doesn't happen for ORC already so it is fine to use a FS call in this method. | apache-iceberg | java |
@@ -67,8 +67,7 @@ public class PartitionField implements Serializable {
public boolean equals(Object other) {
if (this == other) {
return true;
- }
- if (other == null || getClass() != other.getClass()) {
+ } else if (!(other instanceof PartitionField)) {
return false;
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.base.Objects;
import java.io.Serializable;
import org.apache.iceberg.transforms.Transform;
/**
* Represents a single field in a {@link PartitionSpec}.
*/
public class PartitionField implements Serializable {
private final int sourceId;
private final String name;
private final Transform<?, ?> transform;
PartitionField(int sourceId, String name, Transform<?, ?> transform) {
this.sourceId = sourceId;
this.name = name;
this.transform = transform;
}
/**
* @return the field id of the source field in the {@link PartitionSpec spec's} table schema
*/
public int sourceId() {
return sourceId;
}
/**
* @return the name of this partition field
*/
public String name() {
return name;
}
/**
* @return the transform used to produce partition values from source values
*/
public Transform<?, ?> transform() {
return transform;
}
@Override
public String toString() {
return name + ": " + transform + "(" + sourceId + ")";
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
PartitionField that = (PartitionField) other;
return sourceId == that.sourceId &&
name.equals(that.name) &&
transform.equals(that.transform);
}
@Override
public int hashCode() {
return Objects.hashCode(sourceId, name, transform);
}
}
| 1 | 16,507 | nit: I feel multiple `if (...) { return ... }` is more readable than `if ... else ...`. | apache-iceberg | java |
@@ -32,7 +32,7 @@ def setup_test_logging():
root.debug("Already set up logging")
-setup_test_logging()
+# setup_test_logging()
logging.info("Bootstrapped test")
| 1 | """ unit test """
import difflib
import inspect
import json
import logging
import os
import sys
import tempfile
from io import StringIO
from logging import Handler
from random import random
from unittest.case import TestCase
from bzt.cli import CLI
from bzt.engine import SelfDiagnosable
from bzt.modules.aggregator import DataPoint, KPISet
from bzt.six import u
from bzt.utils import run_once, EXE_SUFFIX, get_full_path
TestCase.shortDescription = lambda self: None # suppress nose habit to show docstring instead of method name
@run_once
def setup_test_logging():
""" set up test logging for convenience in IDE """
root = logging.getLogger('')
if not root.handlers:
CLI.log = None
CLI.verbose = True
CLI.setup_logging(CLI)
else:
root.debug("Already set up logging")
setup_test_logging()
logging.info("Bootstrapped test")
def __dir__():
filename = inspect.getouterframes(inspect.currentframe())[1][1]
return os.path.dirname(filename)
# execute tests regardless of working directory
root_dir = __dir__() + '/../'
os.chdir(root_dir)
RESOURCES_DIR = os.path.join(__dir__(), 'resources') + os.path.sep
BUILD_DIR = __dir__() + "/../build/tmp/"
TEST_DIR = __dir__() + "/../build/test/"
BASE_CONFIG = __dir__() + "/../bzt/resources/base-config.yml"
def r(mul=5):
return 1 + int(mul * random()) / 1000.0
def rc():
return "%s00" % (int(4 * random()) + 1)
def err():
if int(50 * random()) == 0:
return "Some Error"
else:
return None
def random_sample(ts, label='', conc=1):
return ts, label, conc, r(), r(), r(), rc(), err()
def random_datapoint(n):
point = DataPoint(n)
overall = point[DataPoint.CURRENT].setdefault('', KPISet())
overall[KPISet.CONCURRENCY] = r(100)
overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000)) + 1
overall[KPISet.SUCCESSES] = int(overall[KPISet.SAMPLE_COUNT] * random())
overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] - overall[KPISet.SUCCESSES]
overall[KPISet.PERCENTILES]['25.0'] = r(10)
overall[KPISet.PERCENTILES]['50.0'] = r(20)
overall[KPISet.PERCENTILES]['75.0'] = r(30)
overall[KPISet.PERCENTILES]['90.0'] = r(40)
overall[KPISet.PERCENTILES]['99.0'] = r(50)
overall[KPISet.PERCENTILES]['100.0'] = r(100)
overall[KPISet.RESP_CODES][rc()] = 1
overall[KPISet.AVG_RESP_TIME] = r(100)
overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0
overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0
overall.sum_rt = overall[KPISet.AVG_RESP_TIME] * overall[KPISet.SAMPLE_COUNT]
overall.sum_cn = overall[KPISet.AVG_CONN_TIME] * overall[KPISet.SAMPLE_COUNT]
overall.sum_lt = overall[KPISet.AVG_LATENCY] * overall[KPISet.SAMPLE_COUNT]
cumul = point[DataPoint.CUMULATIVE].setdefault('', KPISet())
cumul.merge_kpis(overall)
cumul.recalculate()
point.recalculate()
overall[KPISet.AVG_RESP_TIME] = r(100)
overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0
overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0
return point
def close_reader_file(obj):
if obj and obj.file and obj.file.fds:
obj.file.fds.close()
class BZTestCase(TestCase):
def setUp(self):
self.captured_logger = None
self.log_recorder = None
self.func_args = []
self.func_results = None
def func_mock(self, *args, **kwargs):
self.func_args.append({'args': args, 'kargs': kwargs})
if isinstance(self.func_results, list):
return self.func_results.pop(0)
else:
return self.func_results
def sniff_log(self, log):
self.log_recorder = RecordingHandler()
self.captured_logger = log
self.captured_logger.addHandler(self.log_recorder)
def tearDown(self):
exc, _, _ = sys.exc_info()
if exc:
try:
if hasattr(self, 'obj') and isinstance(self.obj, SelfDiagnosable):
diags = self.obj.get_error_diagnostics()
if diags:
for line in diags:
logging.info(line)
except BaseException:
pass
if self.captured_logger:
self.captured_logger.removeHandler(self.log_recorder)
self.log_recorder.close()
def assertFilesEqual(self, expected, actual, replace_str="", replace_with=""):
# import shutil; shutil.copy(actual, expected)
with open(expected) as exp, open(actual) as act:
act_lines = [x.replace(replace_str, replace_with).rstrip() for x in act.readlines()]
exp_lines = [x.replace(replace_str, replace_with).rstrip() for x in exp.readlines()]
diff = list(difflib.unified_diff(exp_lines, act_lines))
if diff:
logging.info("Replacements are: %s => %s", replace_str, replace_with)
msg = "Failed asserting that two files are equal:\n" + actual + "\nversus\n" + expected + "\nDiff is:\n"
raise AssertionError(msg + "\n".join(diff))
def assertPathsEqual(self, p1, p2):
if not isinstance(p1, list):
p1 = [p1]
if not isinstance(p2, list):
p2 = [p2]
for num in range(len(p1)):
self.assertEqual(get_full_path(p1[num]), get_full_path(p2[num]))
def local_paths_config():
""" to fix relative paths """
dirname = os.path.dirname(__file__)
fds, fname = tempfile.mkstemp()
os.close(fds)
settings = {
"modules": {
"jmeter": {
"path": RESOURCES_DIR + "jmeter/jmeter-loader" + EXE_SUFFIX,
},
"grinder": {
"path": RESOURCES_DIR + "grinder/fake_grinder.jar",
},
"gatling": {
"path": RESOURCES_DIR + "gatling/gatling" + EXE_SUFFIX,
},
"junit": {
"path": dirname + "/../build/selenium/tools/junit/junit.jar",
"selenium-server": dirname + "/../build/selenium/selenium-server.jar"
}
}
}
jstring = json.dumps(settings)
with open(fname, 'w') as fds:
fds.write(jstring)
return fname
class RecordingHandler(Handler):
def __init__(self):
super(RecordingHandler, self).__init__()
self.info_buff = StringIO()
self.err_buff = StringIO()
self.debug_buff = StringIO()
self.warn_buff = StringIO()
def emit(self, record):
"""
:type record: logging.LogRecord
:return:
"""
if record.levelno == logging.INFO:
self.write_log(self.info_buff, record.msg, record.args)
elif record.levelno == logging.ERROR:
self.write_log(self.err_buff, record.msg, record.args)
elif record.levelno == logging.WARNING:
self.write_log(self.warn_buff, record.msg, record.args)
elif record.levelno == logging.DEBUG:
self.write_log(self.debug_buff, record.msg, record.args)
def write_log(self, buff, str_template, args):
str_template += "\n"
if args:
buff.write(u(str_template % args))
else:
buff.write(u(str_template))
| 1 | 15,123 | This is needed for us to work in IDE UTs | Blazemeter-taurus | py |
@@ -282,6 +282,12 @@ commands (copy, sync, etc), and with all other commands too.`,
Default: false,
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Advanced: true,
+ }, {
+ }, {
+ Name: "starred_only",
+ Default: false,
+ Help: "Only show files that are starred.",
+ Advanced: true,
}, {
Name: "formats",
Default: "", | 1 | // Package drive interfaces with the Google Drive object storage system
package drive
// FIXME need to deal with some corner cases
// * multiple files with the same name
// * files can be in multiple directories
// * can have directory loops
// * files with / in name
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
drive_v2 "google.golang.org/api/drive/v2"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
// Constants
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
driveFolderType = "application/vnd.google-apps.folder"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
defaultMinSleep = fs.Duration(100 * time.Millisecond)
defaultBurst = 100
defaultExportExtensions = "docx,xlsx,pptx,svg"
scopePrefix = "https://www.googleapis.com/auth/"
defaultScope = "drive"
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = 256 * fs.KibiByte
defaultChunkSize = 8 * fs.MebiByte
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
)
// Globals
var (
// Description of how to auth for this app
driveConfig = &oauth2.Config{
Scopes: []string{scopePrefix + "drive"},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
_mimeTypeToExtensionDuplicates = map[string]string{
"application/x-vnd.oasis.opendocument.presentation": ".odp",
"application/x-vnd.oasis.opendocument.spreadsheet": ".ods",
"application/x-vnd.oasis.opendocument.text": ".odt",
"image/jpg": ".jpg",
"image/x-bmp": ".bmp",
"image/x-png": ".png",
"text/rtf": ".rtf",
}
_mimeTypeToExtension = map[string]string{
"application/epub+zip": ".epub",
"application/json": ".json",
"application/msword": ".doc",
"application/pdf": ".pdf",
"application/rtf": ".rtf",
"application/vnd.ms-excel": ".xls",
"application/vnd.oasis.opendocument.presentation": ".odp",
"application/vnd.oasis.opendocument.spreadsheet": ".ods",
"application/vnd.oasis.opendocument.text": ".odt",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
"application/x-msmetafile": ".wmf",
"application/zip": ".zip",
"image/bmp": ".bmp",
"image/jpeg": ".jpg",
"image/pjpeg": ".pjpeg",
"image/png": ".png",
"image/svg+xml": ".svg",
"text/csv": ".csv",
"text/html": ".html",
"text/plain": ".txt",
"text/tab-separated-values": ".tsv",
}
_mimeTypeToExtensionLinks = map[string]string{
"application/x-link-desktop": ".desktop",
"application/x-link-html": ".link.html",
"application/x-link-url": ".url",
"application/x-link-webloc": ".webloc",
}
_mimeTypeCustomTransform = map[string]string{
"application/vnd.google-apps.script+json": "application/json",
}
fetchFormatsOnce sync.Once // make sure we fetch the export/import formats only once
_exportFormats map[string][]string // allowed export MIME type conversions
_importFormats map[string][]string // allowed import MIME type conversions
templatesOnce sync.Once // parse link templates only once
_linkTemplates map[string]*template.Template // available link types
)
// Parse the scopes option returning a slice of scopes
func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" {
scopesString = defaultScope
}
for _, scope := range strings.Split(scopesString, ",") {
scope = strings.TrimSpace(scope)
scopes = append(scopes, scopePrefix+scope)
}
return scopes
}
// Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder(scopes []string) bool {
for _, scope := range scopes {
if scope == scopePrefix+"drive.appfolder" {
return true
}
}
return false
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "drive",
Description: "Google Drive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
}
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" {
err = oauthutil.Config("drive", name, m, driveConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
}
err = configTeamDrive(ctx, opt, m, name)
if err != nil {
log.Fatalf("Failed to configure team drive: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nSetting your own is recommended.",
}, {
Name: "scope",
Help: "Scope that rclone should use when requesting access from drive.",
Examples: []fs.OptionExample{{
Value: "drive",
Help: "Full access all files, excluding Application Data Folder.",
}, {
Value: "drive.readonly",
Help: "Read-only access to file metadata and file contents.",
}, {
Value: "drive.file",
Help: "Access to files created by rclone only.\nThese are visible in the drive website.\nFile authorization is revoked when the user deauthorizes the app.",
}, {
Value: "drive.appfolder",
Help: "Allows read and write access to the Application Data folder.\nThis is not visible in the drive website.",
}, {
Value: "drive.metadata.readonly",
Help: "Allows read-only access to file metadata but\ndoes not allow any access to read or download file content.",
}},
}, {
Name: "root_folder_id",
Help: `ID of the root folder
Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
Note that if this is blank, the first time rclone runs it will fill it
in with the ID of the root folder.
`,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "team_drive",
Help: "ID of the Team Drive",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "auth_owner_only",
Default: false,
Help: "Only consider files owned by the authenticated user.",
Advanced: true,
}, {
Name: "use_trash",
Default: true,
Help: "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
Advanced: true,
}, {
Name: "skip_gdocs",
Default: false,
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
Advanced: true,
}, {
Name: "skip_checksum_gphotos",
Default: false,
Help: `Skip MD5 checksum on Google photos and videos only.
Use this if you get checksum errors when transferring Google photos or
videos.
Setting this flag will cause Google photos and videos to return a
blank MD5 checksum.
Google photos are identifed by being in the "photos" space.
Corrupted checksums are caused by Google modifying the image/video but
not updating the checksum.`,
Advanced: true,
}, {
Name: "shared_with_me",
Default: false,
Help: `Only show files that are shared with me.
Instructs rclone to operate on your "Shared with me" folder (where
Google Drive lets you access the files and folders others have shared
with you).
This works both with the "list" (lsd, lsl, etc) and the "copy"
commands (copy, sync, etc), and with all other commands too.`,
Advanced: true,
}, {
Name: "trashed_only",
Default: false,
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Advanced: true,
}, {
Name: "formats",
Default: "",
Help: "Deprecated: see export_formats",
Advanced: true,
Hide: fs.OptionHideConfigurator,
}, {
Name: "export_formats",
Default: defaultExportExtensions,
Help: "Comma separated list of preferred formats for downloading Google docs.",
Advanced: true,
}, {
Name: "import_formats",
Default: "",
Help: "Comma separated list of preferred formats for uploading Google docs.",
Advanced: true,
}, {
Name: "allow_import_name_change",
Default: false,
Help: "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
Advanced: true,
}, {
Name: "use_created_date",
Default: false,
Help: `Use file created date instead of modified date.,
Useful when downloading data and you want the creation date used in
place of the last modified date.
**WARNING**: This flag may have some unexpected consequences.
When uploading to your drive all files will be overwritten unless they
haven't been modified since their creation. And the inverse will occur
while downloading. This side effect can be avoided by using the
"--checksum" flag.
This feature was implemented to retain photos capture date as recorded
by google photos. You will first need to check the "Create a Google
Photos folder" option in your google drive settings. You can then copy
or move the photos locally and use the date the image was taken
(created) set as the modification date.`,
Advanced: true,
Hide: fs.OptionHideConfigurator,
}, {
Name: "use_shared_date",
Default: false,
Help: `Use date file was shared instead of modified date.
Note that, as with "--drive-use-created-date", this flag may have
unexpected consequences when uploading/downloading files.
If both this flag and "--drive-use-created-date" are set, the created
date is used.`,
Advanced: true,
Hide: fs.OptionHideConfigurator,
}, {
Name: "list_chunk",
Default: 1000,
Help: "Size of listing chunk 100-1000. 0 to disable.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: "Impersonate this user when using a service account.",
Advanced: true,
}, {
Name: "alternate_export",
Default: false,
Help: `Use alternate export URLs for google documents export.,
If this option is set this instructs rclone to use an alternate set of
export URLs for drive documents. Users have reported that the
official export URLs can't export large documents, whereas these
unofficial ones can.
See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background,
[this google drive issue](https://issuetracker.google.com/issues/36761333) and
[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
Advanced: true,
}, {
Name: "upload_cutoff",
Default: defaultChunkSize,
Help: "Cutoff for switching to chunked upload",
Advanced: true,
}, {
Name: "chunk_size",
Default: defaultChunkSize,
Help: `Upload chunk size. Must a power of 2 >= 256k.
Making this larger will improve performance, but note that each chunk
is buffered in memory one per transfer.
Reducing this will reduce memory usage but decrease performance.`,
Advanced: true,
}, {
Name: "acknowledge_abuse",
Default: false,
Help: `Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
If downloading a file returns the error "This file has been identified
as malware or spam and cannot be downloaded" with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone
will download it anyway.`,
Advanced: true,
}, {
Name: "keep_revision_forever",
Default: false,
Help: "Keep new head revision of each file forever.",
Advanced: true,
}, {
Name: "size_as_quota",
Default: false,
Help: `Show sizes as storage quota usage, not actual size.
Show the size of a file as the the storage quota used. This is the
current version plus any older versions that have been set to keep
forever.
**WARNING**: This flag may have some unexpected consequences.
It is not recommended to set this flag in your config - the
recommended usage is using the flag form --drive-size-as-quota when
doing rclone ls/lsl/lsf/lsjson/etc only.
If you do use this flag for syncing (not recommended) then you will
need to use --ignore size also.`,
Advanced: true,
Hide: fs.OptionHideConfigurator,
}, {
Name: "v2_download_min_size",
Default: fs.SizeSuffix(-1),
Help: "If Object's are greater, use drive v2 API to download.",
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
Help: "Minimum time to sleep between API calls.",
Advanced: true,
}, {
Name: "pacer_burst",
Default: defaultBurst,
Help: "Number of API calls to allow without sleeping.",
Advanced: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different drive configs.
This can be useful if you wish to do a server side copy between two
different Google drives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
configurations.`,
Advanced: true,
}, {
Name: "disable_http2",
Default: true,
Help: `Disable drive using http2
There is currently an unsolved issue with the google drive backend and
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
but can be re-enabled here. When the issue is solved this flag will
be removed.
See: https://github.com/rclone/rclone/issues/3631
`,
Advanced: true,
}, {
Name: "stop_on_upload_limit",
Default: false,
Help: `Make upload limit errors be fatal
At the time of writing it is only possible to upload 750GB of data to
Google Drive a day (this is an undocumented limit). When this limit is
reached Google Drive produces a slightly different error message. When
this flag is set it causes these errors to be fatal. These will stop
the in-progress sync.
Note that this detection is relying on error message strings which
Google don't document so it may break in the future.
See: https://github.com/rclone/rclone/issues/3857
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8,
}},
})
// register duplicate MIME types first
// this allows them to be used with mime.ExtensionsByType() but
// mime.TypeByExtension() will return the later registered MIME type
for _, m := range []map[string]string{
_mimeTypeToExtensionDuplicates, _mimeTypeToExtension, _mimeTypeToExtensionLinks,
} {
for mimeType, extension := range m {
if err := mime.AddExtensionType(extension, mimeType); err != nil {
log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
}
}
}
}
// Options defines the configuration for this backend
type Options struct {
Scope string `config:"scope"`
RootFolderID string `config:"root_folder_id"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
TeamDriveID string `config:"team_drive"`
AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"`
SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
TrashedOnly bool `config:"trashed_only"`
Extensions string `config:"formats"`
ExportExtensions string `config:"export_formats"`
ImportExtensions string `config:"import_formats"`
AllowImportNameChange bool `config:"allow_import_name_change"`
UseCreatedDate bool `config:"use_created_date"`
UseSharedDate bool `config:"use_shared_date"`
ListChunk int64 `config:"list_chunk"`
Impersonate string `config:"impersonate"`
AlternateExport bool `config:"alternate_export"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
KeepRevisionForever bool `config:"keep_revision_forever"`
SizeAsQuota bool `config:"size_as_quota"`
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
PacerBurst int `config:"pacer_burst"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
DisableHTTP2 bool `config:"disable_http2"`
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote drive server
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
svc *drive.Service // the connection to the drive server
v2Svc *drive_v2.Service // used to create download links for the v2 api
client *http.Client // authorized client
rootFolderID string // the id of the root folder
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // To pace the API calls
exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive
}
type baseObject struct {
fs *Fs // what this object is part of
remote string // The remote path
id string // Drive Id of this object
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
}
type documentObject struct {
baseObject
url string // Download URL of this object
documentMimeType string // the original document MIME type
extLen int // The length of the added export extension
}
type linkObject struct {
baseObject
content []byte // The file content generated by a link template
extLen int // The length of the added export extension
}
// Object describes a drive object
type Object struct {
baseObject
url string // Download URL of this object
md5sum string // md5sum of the object
v2Download bool // generate v2 download link ondemand
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Google drive root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry determines whether a given err rates being retried
func (f *Fs) shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
if fserrors.ShouldRetry(err) {
return true, err
}
switch gerr := err.(type) {
case *googleapi.Error:
if gerr.Code >= 500 && gerr.Code < 600 {
// All 5xx errors should be retried
return true, err
}
if len(gerr.Errors) > 0 {
reason := gerr.Errors[0].Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
if f.opt.StopOnUploadLimit && gerr.Errors[0].Message == "User rate limit exceeded." {
fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err)
}
return true, err
}
}
}
return false, err
}
// parseParse parses a drive 'url'
func parseDrivePath(path string) (root string, err error) {
root = strings.Trim(path, "/")
return
}
// User function to process a File item from list
//
// Should return true to finish processing
type listFn func(*drive.File) bool
func containsString(slice []string, s string) bool {
for _, e := range slice {
if e == s {
return true
}
}
return false
}
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID() (string, error) {
var info *drive.File
var err error
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get("root").
Fields("id").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
}
return info.Id, nil
}
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
//
// Search params: https://developers.google.com/drive/search-parameters
func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
var query []string
if !includeAll {
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
if f.opt.TrashedOnly {
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
}
query = append(query, q)
}
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
// If we need to list file inside those shared folders, we must search it without sharedWithMe
parentsQuery := bytes.NewBufferString("(")
for _, dirID := range dirIDs {
if dirID == "" {
continue
}
if parentsQuery.Len() > 1 {
_, _ = parentsQuery.WriteString(" or ")
}
if f.opt.SharedWithMe && dirID == f.rootFolderID {
_, _ = parentsQuery.WriteString("sharedWithMe=true")
} else {
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
}
}
if parentsQuery.Len() > 1 {
_ = parentsQuery.WriteByte(')')
query = append(query, parentsQuery.String())
}
var stems []string
if title != "" {
searchTitle := f.opt.Enc.FromStandardName(title)
// Escaping the backslash isn't documented but seems to work
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
var titleQuery bytes.Buffer
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
if !directoriesOnly && !f.opt.SkipGdocs {
// If the search title has an extension that is in the export extensions add a search
// for the filename without the extension.
// Assume that export extensions don't contain escape sequences.
for _, ext := range f.exportExtensions {
if strings.HasSuffix(searchTitle, ext) {
stems = append(stems, title[:len(title)-len(ext)])
_, _ = fmt.Fprintf(&titleQuery, " or name='%s'", searchTitle[:len(searchTitle)-len(ext)])
}
}
}
_ = titleQuery.WriteByte(')')
query = append(query, titleQuery.String())
}
if directoriesOnly {
query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
}
if filesOnly {
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
}
list := f.svc.Files.List()
if len(query) > 0 {
list.Q(strings.Join(query, " and "))
// fmt.Printf("list Query = %q\n", query)
}
if f.opt.ListChunk > 0 {
list.PageSize(f.opt.ListChunk)
}
list.SupportsAllDrives(true)
list.IncludeItemsFromAllDrives(true)
if f.isTeamDrive {
list.DriveId(f.opt.TeamDriveID)
list.Corpora("drive")
}
// If using appDataFolder then need to add Spaces
if f.rootFolderID == "appDataFolder" {
list.Spaces("appDataFolder")
}
var fields = partialFields
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
if f.opt.SizeAsQuota {
fields += ",quotaBytesUsed"
}
fields = fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", fields)
OUTER:
for {
var files *drive.FileList
err = f.pacer.Call(func() (bool, error) {
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
return f.shouldRetry(err)
})
if err != nil {
return false, errors.Wrap(err, "couldn't list directory")
}
if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE")
}
for _, item := range files.Files {
item.Name = f.opt.Enc.ToStandardName(item.Name)
// Check the case of items is correct since
// the `=` operator is case insensitive.
if title != "" && title != item.Name {
found := false
for _, stem := range stems {
if stem == item.Name {
found = true
break
}
}
if !found {
continue
}
_, exportName, _, _ := f.findExportFormat(item)
if exportName == "" || exportName != title {
continue
}
}
if fn(item) {
found = true
break OUTER
}
}
if files.NextPageToken == "" {
break
}
list.PageToken(files.NextPageToken)
}
return
}
// Returns true of x is a power of 2 or zero
func isPowerOfTwo(x int64) bool {
switch {
case x == 0:
return true
case x < 0:
return false
default:
return (x & (x - 1)) == 0
}
}
// add a charset parameter to all text/* MIME types
func fixMimeType(mimeTypeIn string) string {
if mimeTypeIn == "" {
return ""
}
mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
if err != nil {
return mimeTypeIn
}
mimeTypeOut := mimeTypeIn
if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
param["charset"] = "utf-8"
mimeTypeOut = mime.FormatMediaType(mediaType, param)
}
if mimeTypeOut == "" {
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
}
return mimeTypeOut
}
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
out = make(map[string][]string, len(in))
for k, v := range in {
for i, mt := range v {
v[i] = fixMimeType(mt)
}
out[fixMimeType(k)] = v
}
return out
}
func isInternalMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
}
func isLinkMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/x-link-")
}
// parseExtensions parses a list of comma separated extensions
// into a list of unique extensions with leading "." and a list of associated MIME types
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
for _, extensionText := range extensionsIn {
for _, extension := range strings.Split(extensionText, ",") {
extension = strings.ToLower(strings.TrimSpace(extension))
if extension == "" {
continue
}
if len(extension) > 0 && extension[0] != '.' {
extension = "." + extension
}
mt := mime.TypeByExtension(extension)
if mt == "" {
return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
}
if !containsString(extensions, extension) {
extensions = append(extensions, extension)
mimeTypes = append(mimeTypes, mt)
}
}
}
return
}
// Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
// Stop if we are running non-interactive config
if fs.Config.AutoConfirm {
return nil
}
if opt.TeamDriveID == "" {
fmt.Printf("Configure this as a team drive?\n")
} else {
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm(false) {
return nil
}
client, err := createOAuthClient(opt, name, m)
if err != nil {
return errors.Wrap(err, "config team drive failed to create oauth client")
}
svc, err := drive.New(client)
if err != nil {
return errors.Wrap(err, "config team drive failed to make drive client")
}
fmt.Printf("Fetching team drive list...\n")
var driveIDs, driveNames []string
listTeamDrives := svc.Teamdrives.List().PageSize(100)
listFailed := false
var defaultFs Fs // default Fs with default Options
for {
var teamDrives *drive.TeamDriveList
err = newPacer(opt).Call(func() (bool, error) {
teamDrives, err = listTeamDrives.Context(ctx).Do()
return defaultFs.shouldRetry(err)
})
if err != nil {
fmt.Printf("Listing team drives failed: %v\n", err)
listFailed = true
break
}
for _, drive := range teamDrives.TeamDrives {
driveIDs = append(driveIDs, drive.Id)
driveNames = append(driveNames, drive.Name)
}
if teamDrives.NextPageToken == "" {
break
}
listTeamDrives.PageToken(teamDrives.NextPageToken)
}
var driveID string
if !listFailed && len(driveIDs) == 0 {
fmt.Printf("No team drives found in your account")
} else {
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
}
m.Set("team_drive", driveID)
opt.TeamDriveID = driveID
return nil
}
// newPacer makes a pacer configured for drive
func newPacer(opt *Options) *fs.Pacer {
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
}
// getClient makes an http client according to the options
func getClient(opt *Options) *http.Client {
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
if opt.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
}
})
return &http.Client{
Transport: t,
}
}
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
if opt.Impersonate != "" {
conf.Subject = opt.Impersonate
}
ctxWithSpecialClient := oauthutil.Context(getClient(opt))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
var oAuthClient *http.Client
var err error
// try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client from service account")
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client")
}
}
return oAuthClient, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if !isPowerOfTwo(int64(cs)) {
return errors.Errorf("%v isn't a power of two", cs)
}
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "drive: upload cutoff")
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "drive: chunk size")
}
oAuthClient, err := createOAuthClient(opt, name, m)
if err != nil {
return nil, errors.Wrap(err, "drive: failed when making oauth client")
}
root, err := parseDrivePath(path)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: newPacer(opt),
}
f.isTeamDrive = opt.TeamDriveID != ""
f.features = (&fs.Features{
DuplicateFiles: true,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f)
// Create a new authorized Drive client.
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive client")
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Drive v2 client")
}
}
// set root folder for a team drive or query the user root folder
if opt.RootFolderID != "" {
// override root folder if set or cached in the config
f.rootFolderID = opt.RootFolderID
} else if f.isTeamDrive {
f.rootFolderID = f.opt.TeamDriveID
} else {
// Look up the root ID and cache it in the config
rootID, err := f.getRootID()
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
} else {
return nil, err
}
}
f.rootFolderID = rootID
m.Set("root_folder_id", rootID)
}
f.dirCache = dircache.New(root, f.rootFolderID, f)
// Parse extensions
if opt.Extensions != "" {
if opt.ExportExtensions != defaultExportExtensions {
return nil, errors.New("only one of 'formats' and 'export_formats' can be specified")
}
opt.Extensions, opt.ExportExtensions = "", opt.Extensions
}
f.exportExtensions, _, err = parseExtensions(opt.ExportExtensions, defaultExportExtensions)
if err != nil {
return nil, err
}
_, f.importMimeTypes, err = parseExtensions(opt.ImportExtensions)
if err != nil {
return nil, err
}
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
// unable to list folder so return old f
return f, nil
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
return f, fs.ErrorIsFile
}
// fmt.Printf("Root id %s", f.dirCache.RootID())
return f, nil
}
func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate := info.ModifiedTime
if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
modifiedDate = info.SharedWithMeTime
}
size := info.Size
if f.opt.SizeAsQuota {
size = info.QuotaBytesUsed
}
return baseObject{
fs: f,
remote: remote,
id: info.Id,
modifiedDate: modifiedDate,
mimeType: info.MimeType,
bytes: size,
}
}
// newRegularObject creates a fs.Object for a normal drive.File
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos {
for _, space := range info.Spaces {
if space == "photos" {
info.Md5Checksum = ""
break
}
}
}
return &Object{
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
md5sum: strings.ToLower(info.Md5Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
}
}
// newDocumentObject creates a fs.Object for a google docs drive.File
func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
mediaType, _, err := mime.ParseMediaType(exportMimeType)
if err != nil {
return nil, err
}
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, info.Id, url.QueryEscape(mediaType))
if f.opt.AlternateExport {
switch info.MimeType {
case "application/vnd.google-apps.drawing":
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension[1:])
case "application/vnd.google-apps.document":
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension[1:])
case "application/vnd.google-apps.spreadsheet":
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension[1:])
case "application/vnd.google-apps.presentation":
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension[1:])
}
}
baseObject := f.newBaseObject(remote+extension, info)
baseObject.bytes = -1
baseObject.mimeType = exportMimeType
return &documentObject{
baseObject: baseObject,
url: url,
documentMimeType: info.MimeType,
extLen: len(extension),
}, nil
}
// newLinkObject creates a fs.Object that represents a link a google docs drive.File
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
t := linkTemplate(exportMimeType)
if t == nil {
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
}
var buf bytes.Buffer
err := t.Execute(&buf, struct {
URL, Title string
}{
info.WebViewLink, info.Name,
})
if err != nil {
return nil, errors.Wrap(err, "executing template failed")
}
baseObject := f.newBaseObject(remote+extension, info)
baseObject.bytes = int64(buf.Len())
baseObject.mimeType = exportMimeType
return &linkObject{
baseObject: baseObject,
content: buf.Bytes(),
extLen: len(extension),
}, nil
}
// newObjectWithInfo creates a fs.Object for any drive.File
//
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum or a length it is a file stored on drive
if info.Md5Checksum != "" || info.Size > 0 {
return f.newRegularObject(remote, info), nil
}
extension, exportName, exportMimeType, isDocument := f.findExportFormat(info)
return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
}
// newObjectWithExportInfo creates a fs.Object for any drive.File and the result of findExportFormat
//
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo(
remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool) (fs.Object, error) {
switch {
case info.Md5Checksum != "" || info.Size > 0:
// If item has MD5 sum or a length it is a file stored on drive
return f.newRegularObject(remote, info), nil
case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
return nil, nil
default:
// If item MimeType is in the ExportFormats then it is a google doc
if !isDocument {
fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType)
return nil, nil
}
if extension == "" {
fs.Debugf(remote, "No export formats found for %q", info.MimeType)
return nil, nil
}
if isLinkMimeType(exportMimeType) {
return f.newLinkObject(remote, info, extension, exportMimeType)
}
return f.newDocumentObject(remote, info, extension, exportMimeType)
}
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
if err != nil {
return nil, err
}
remote = remote[:len(remote)-len(extension)]
obj, err := f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
switch {
case err != nil:
return nil, err
case obj == nil:
return nil, fs.ErrorObjectNotFound
default:
return obj, nil
}
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
_, exportName, _, isDocument := f.findExportFormat(item)
if exportName == leaf {
pathIDOut = item.Id
return true
}
if isDocument {
return false
}
}
if item.Name == leaf {
pathIDOut = item.Id
return true
}
return false
})
return pathIDOut, found, err
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
leaf = f.opt.Enc.FromStandardName(leaf)
// fmt.Println("Making", path)
// Define the metadata for the directory we are going to create.
createInfo := &drive.File{
Name: leaf,
Description: leaf,
MimeType: driveFolderType,
Parents: []string{pathID},
}
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo).
Fields("id").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
})
if err != nil {
return "", err
}
return info.Id, nil
}
// isAuthOwned checks if any of the item owners is the authenticated owner
func isAuthOwned(item *drive.File) bool {
for _, owner := range item.Owners {
if owner.Me {
return true
}
}
return false
}
// linkTemplate returns the Template for a MIME type or nil if the
// MIME type does not represent a link
func linkTemplate(mt string) *template.Template {
templatesOnce.Do(func() {
_linkTemplates = map[string]*template.Template{
"application/x-link-desktop": template.Must(
template.New("application/x-link-desktop").Parse(desktopTemplate)),
"application/x-link-html": template.Must(
template.New("application/x-link-html").Parse(htmlTemplate)),
"application/x-link-url": template.Must(
template.New("application/x-link-url").Parse(urlTemplate)),
"application/x-link-webloc": template.Must(
template.New("application/x-link-webloc").Parse(weblocTemplate)),
}
})
return _linkTemplates[mt]
}
func (f *Fs) fetchFormats() {
fetchFormatsOnce.Do(func() {
var about *drive.About
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().
Fields("exportFormats,importFormats").
Do()
return f.shouldRetry(err)
})
if err != nil {
fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
_exportFormats = map[string][]string{}
_importFormats = map[string][]string{}
return
}
_exportFormats = fixMimeTypeMap(about.ExportFormats)
_importFormats = fixMimeTypeMap(about.ImportFormats)
})
}
// exportFormats returns the export formats from drive, fetching them
// if necessary.
//
// if the fetch fails then it will not export any drive formats
func (f *Fs) exportFormats() map[string][]string {
f.fetchFormats()
return _exportFormats
}
// importFormats returns the import formats from drive, fetching them
// if necessary.
//
// if the fetch fails then it will not import any drive formats
func (f *Fs) importFormats() map[string][]string {
f.fetchFormats()
return _importFormats
}
// findExportFormatByMimeType works out the optimum export settings
// for the given MIME type.
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false)
func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
extension, mimeType string, isDocument bool) {
exportMimeTypes, isDocument := f.exportFormats()[itemMimeType]
if isDocument {
for _, _extension := range f.exportExtensions {
_mimeType := mime.TypeByExtension(_extension)
if isLinkMimeType(_mimeType) {
return _extension, _mimeType, true
}
for _, emt := range exportMimeTypes {
if emt == _mimeType {
return _extension, emt, true
}
if _mimeType == _mimeTypeCustomTransform[emt] {
return _extension, emt, true
}
}
}
}
// else return empty
return "", "", isDocument
}
// findExportFormatByMimeType works out the optimum export settings
// for the given drive.File.
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
extension, mimeType, isDocument = f.findExportFormatByMimeType(item.MimeType)
if extension != "" {
filename = item.Name + extension
}
return
}
// findImportFormat finds the matching upload MIME type for a file
// If the given MIME type is in importMimeTypes, the matching upload
// MIME type is returned
//
// When no match is found "" is returned.
func (f *Fs) findImportFormat(mimeType string) string {
mimeType = fixMimeType(mimeType)
ifs := f.importFormats()
for _, mt := range f.importMimeTypes {
if mt == mimeType {
importMimeTypes := ifs[mimeType]
if l := len(importMimeTypes); l > 0 {
if l > 1 {
fs.Infof(f, "found %d import formats for %q: %q", l, mimeType, importMimeTypes)
}
return importMimeTypes[0]
}
}
}
return ""
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
var iErr error
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
if err != nil {
iErr = err
return true
}
if entry != nil {
entries = append(entries, entry)
}
return false
})
if err != nil {
return nil, err
}
if iErr != nil {
return nil, iErr
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
}
return entries, nil
}
// listREntry is a task to be executed by a litRRunner
type listREntry struct {
id, path string
}
// listRSlices is a helper struct to sort two slices at once
type listRSlices struct {
dirs []string
paths []string
}
func (s listRSlices) Sort() {
sort.Sort(s)
}
func (s listRSlices) Len() int {
return len(s.dirs)
}
func (s listRSlices) Swap(i, j int) {
s.dirs[i], s.dirs[j] = s.dirs[j], s.dirs[i]
s.paths[i], s.paths[j] = s.paths[j], s.paths[i]
}
func (s listRSlices) Less(i, j int) bool {
return s.dirs[i] < s.dirs[j]
}
// listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry.
//
// In each cycle it will read up to grouping entries from the in channel without blocking.
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
// nil is send to the out channel and the function returns.
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
var dirs []string
var paths []string
for dir := range in {
dirs = append(dirs[:0], dir.id)
paths = append(paths[:0], dir.path)
waitloop:
for i := 1; i < grouping; i++ {
select {
case d, ok := <-in:
if !ok {
break waitloop
}
dirs = append(dirs, d.id)
paths = append(paths, d.path)
default:
}
}
listRSlices{dirs, paths}.Sort()
var iErr error
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
for _, parent := range item.Parents {
var i int
// If only one item in paths then no need to search for the ID
// assuming google drive is doing its job properly.
//
// Note that we at the root when len(paths) == 1 && paths[0] == ""
if len(paths) == 1 {
// don't check parents at root because
// - shared with me items have no parents at the root
// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
i = 0
} else {
// only handle parents that are in the requested dirs list if not at root
i = sort.SearchStrings(dirs, parent)
if i == len(dirs) || dirs[i] != parent {
continue
}
}
remote := path.Join(paths[i], item.Name)
entry, err := f.itemToDirEntry(remote, item)
if err != nil {
iErr = err
return true
}
err = cb(entry)
if err != nil {
iErr = err
return true
}
}
return false
})
for range dirs {
wg.Done()
}
if iErr != nil {
out <- iErr
return
}
if err != nil {
out <- err
return
}
}
out <- nil
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
const (
grouping = 50
inputBuffer = 1000
)
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
mu := sync.Mutex{} // protects in and overflow
wg := sync.WaitGroup{}
in := make(chan listREntry, inputBuffer)
out := make(chan error, fs.Config.Checkers)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
listed := 0
cb := func(entry fs.DirEntry) error {
mu.Lock()
defer mu.Unlock()
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
select {
case in <- listREntry{d.ID(), d.Remote()}:
wg.Add(1)
default:
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
}
}
listed++
return list.Add(entry)
}
wg.Add(1)
in <- listREntry{directoryID, dir}
for i := 0; i < fs.Config.Checkers; i++ {
go f.listRRunner(ctx, &wg, in, out, cb, grouping)
}
go func() {
// wait until the all directories are processed
wg.Wait()
// if the input channel overflowed add the collected entries to the channel now
for len(overflow) > 0 {
mu.Lock()
l := len(overflow)
// only fill half of the channel to prevent entries beeing put into overflow again
if l > inputBuffer/2 {
l = inputBuffer / 2
}
wg.Add(l)
for _, d := range overflow[:l] {
in <- d
}
overflow = overflow[l:]
mu.Unlock()
// wait again for the completion of all directories
wg.Wait()
}
mu.Lock()
if in != nil {
// notify all workers to exit
close(in)
in = nil
}
mu.Unlock()
}()
// wait until the all workers to finish
for i := 0; i < fs.Config.Checkers; i++ {
e := <-out
mu.Lock()
// if one worker returns an error early, close the input so all other workers exit
if e != nil && in != nil {
err = e
close(in)
in = nil
}
mu.Unlock()
}
close(out)
if err != nil {
return err
}
err = list.Flush()
if err != nil {
return err
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return err
}
}
return nil
}
// itemToDirEntry converts a drive.File to a fs.DirEntry.
// When the drive.File cannot be represented as a fs.DirEntry
// (nil, nil) is returned.
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
switch {
case item.MimeType == driveFolderType:
// cache the directory ID for later lookups
f.dirCache.Put(remote, item.Id)
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
d := fs.NewDir(remote, when).SetID(item.Id)
return d, nil
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
// ignore object
default:
return f.newObjectWithInfo(remote, item)
}
return nil, nil
}
// Creates a drive.File info from the parameters passed in.
//
// Used to create new objects
func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
leaf = f.opt.Enc.FromStandardName(leaf)
// Define the metadata for the file we are going to create.
createInfo := &drive.File{
Name: leaf,
Description: leaf,
Parents: []string{directoryID},
ModifiedTime: modTime.Format(timeFormatOut),
}
return createInfo, nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
modTime := src.ModTime(ctx)
srcMimeType := fs.MimeTypeFromName(remote)
srcExt := path.Ext(remote)
exportExt := ""
importMimeType := ""
if f.importMimeTypes != nil && !f.opt.SkipGdocs {
importMimeType = f.findImportFormat(srcMimeType)
if isInternalMimeType(importMimeType) {
remote = remote[:len(remote)-len(srcExt)]
exportExt, _, _ = f.findExportFormatByMimeType(importMimeType)
if exportExt == "" {
return nil, errors.Errorf("No export format found for %q", importMimeType)
}
if exportExt != srcExt && !f.opt.AllowImportNameChange {
return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
}
}
}
createInfo, err := f.createFileInfo(ctx, remote, modTime)
if err != nil {
return nil, err
}
if importMimeType != "" {
createInfo.MimeType = importMimeType
} else {
createInfo.MimeType = fs.MimeTypeFromName(remote)
}
var info *drive.File
if size >= 0 && size < int64(f.opt.UploadCutoff) {
// Make the API request to upload metadata and file data.
// Don't retry, return a retry error instead
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo).
Media(in, googleapi.ContentType(srcMimeType)).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
} else {
// Upload the file in chunks
info, err = f.Upload(ctx, in, size, srcMimeType, "", remote, createInfo)
if err != nil {
return nil, err
}
}
return f.newObjectWithInfo(remote, info)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) < 2 {
return nil
}
dstDir := dirs[0]
for _, srcDir := range dirs[1:] {
// list the the objects
infos := []*drive.File{}
_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
infos = append(infos, info)
return false
})
if err != nil {
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
}
// move them into place
for _, info := range infos {
fs.Infof(srcDir, "merging %q", info.Name)
// Move the file into the destination
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(info.Id, nil).
RemoveParents(srcDir.ID()).
AddParents(dstDir.ID()).
Fields("").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.rmdir(ctx, srcDir.ID(), true)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
}
}
return nil
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// Rmdir deletes a directory unconditionally by ID
func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error {
return f.pacer.Call(func() (bool, error) {
var err error
if useTrash {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(directoryID, &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = f.svc.Files.Delete(directoryID).
Fields("").
SupportsAllDrives(true).
Do()
}
return f.shouldRetry(err)
})
}
// Rmdir deletes a directory
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
root := path.Join(f.root, dir)
dc := f.dirCache
directoryID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
}
var trashedFiles = false
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
if !item.Trashed {
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
return true
}
fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
trashedFiles = true
return false
})
if err != nil {
return err
}
if found {
return errors.Errorf("directory not empty")
}
if root != "" {
// trash the directory if it had trashed files
// in or the user wants to trash, otherwise
// delete it.
err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash)
if err != nil {
return err
}
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}
// Precision of the object storage system
func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
var srcObj *baseObject
ext := ""
switch src := src.(type) {
case *Object:
srcObj = &src.baseObject
case *documentObject:
srcObj, ext = &src.baseObject, src.ext()
case *linkObject:
srcObj, ext = &src.baseObject, src.ext()
default:
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
if ext != "" {
if !strings.HasSuffix(remote, ext) {
fs.Debugf(src, "Can't copy - not same document type")
return nil, fs.ErrorCantCopy
}
remote = remote[:len(remote)-len(ext)]
}
// Look to see if there is an existing object
existingObject, _ := f.NewObject(ctx, remote)
createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
if err != nil {
return nil, err
}
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(srcObj.id, createInfo).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
newObject, err := f.newObjectWithInfo(remote, info)
if err != nil {
return nil, err
}
if existingObject != nil {
err = existingObject.Remove(ctx)
if err != nil {
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
}
}
return newObject, nil
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
if f.root == "" {
return errors.New("can't purge root directory")
}
if f.opt.TrashedOnly {
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
}
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
err = f.pacer.Call(func() (bool, error) {
if f.opt.UseTrash {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = f.svc.Files.Delete(f.dirCache.RootID()).
Fields("").
SupportsAllDrives(true).
Do()
}
return f.shouldRetry(err)
})
f.dirCache.ResetRoot()
if err != nil {
return err
}
return nil
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
err := f.pacer.Call(func() (bool, error) {
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return f.shouldRetry(err)
})
if err != nil {
return err
}
return nil
}
// teamDriveOK checks to see if we can access the team drive
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
if !f.isTeamDrive {
return nil
}
var td *drive.Drive
err = f.pacer.Call(func() (bool, error) {
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to get Team/Shared Drive info")
}
fs.Debugf(f, "read info from team drive %q", td.Name)
return err
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.isTeamDrive {
err := f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
// Teamdrives don't appear to have a usage API so just return empty
return &fs.Usage{}, nil
}
var about *drive.About
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
}
q := about.StorageQuota
usage := &fs.Usage{
Used: fs.NewUsageValue(q.UsageInDrive), // bytes in use
Trashed: fs.NewUsageValue(q.UsageInDriveTrash), // bytes in trash
Other: fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage eg gmail in drive
}
if q.Limit > 0 {
usage.Total = fs.NewUsageValue(q.Limit) // quota of bytes that can be used
usage.Free = fs.NewUsageValue(q.Limit - q.Usage) // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
var srcObj *baseObject
ext := ""
switch src := src.(type) {
case *Object:
srcObj = &src.baseObject
case *documentObject:
srcObj, ext = &src.baseObject, src.ext()
case *linkObject:
srcObj, ext = &src.baseObject, src.ext()
default:
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
if ext != "" {
if !strings.HasSuffix(remote, ext) {
fs.Debugf(src, "Can't move - not same document type")
return nil, fs.ErrorCantMove
}
remote = remote[:len(remote)-len(ext)]
}
_, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false)
if err != nil {
return nil, err
}
// Temporary Object under construction
dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
if err != nil {
return nil, err
}
dstParents := strings.Join(dstInfo.Parents, ",")
dstInfo.Parents = nil
// Do the move
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Update(srcObj.id, dstInfo).
RemoveParents(srcParentID).
AddParents(dstParents).
Fields(partialFields).
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
return f.newObjectWithInfo(remote, info)
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
id, err := f.dirCache.FindDir(ctx, remote, false)
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(ctx, remote)
if err != nil {
return "", err
}
id = o.(fs.IDer).ID()
}
permission := &drive.Permission{
AllowFileDiscovery: false,
Role: "reader",
Type: "anyone",
}
err = f.pacer.Call(func() (bool, error) {
// TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
_, err = f.svc.Permissions.Create(id, permission).
Fields("").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
})
if err != nil {
return "", err
}
return fmt.Sprintf("https://drive.google.com/open?id=%s", id), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, dstDirectoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src parent
var srcDirectoryID string
if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false)
}
if err != nil {
return err
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
// Do the move
patch := drive.File{
Name: leaf,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(srcID, &patch).
RemoveParents(srcDirectoryID).
AddParents(dstDirectoryID).
Fields("").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
})
if err != nil {
return err
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the StartPageToken early so all changes from now on get processed
startPageToken, err := f.changeNotifyStartPageToken()
if err != nil {
fs.Infof(f, "Failed to get StartPageToken: %s", err)
}
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if startPageToken == "" {
startPageToken, err = f.changeNotifyStartPageToken()
if err != nil {
fs.Infof(f, "Failed to get StartPageToken: %s", err)
continue
}
}
fs.Debugf(f, "Checking for changes on remote")
startPageToken, err = f.changeNotifyRunner(ctx, notifyFunc, startPageToken)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) {
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
if f.isTeamDrive {
changes.DriveId(f.opt.TeamDriveID)
}
startPageToken, err = changes.Do()
return f.shouldRetry(err)
})
if err != nil {
return
}
return startPageToken.StartPageToken, nil
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
pageToken := startPageToken
for {
var changeList *drive.ChangeList
err = f.pacer.Call(func() (bool, error) {
changesCall := f.svc.Changes.List(pageToken).
Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
if f.opt.ListChunk > 0 {
changesCall.PageSize(f.opt.ListChunk)
}
changesCall.SupportsAllDrives(true)
changesCall.IncludeItemsFromAllDrives(true)
if f.isTeamDrive {
changesCall.DriveId(f.opt.TeamDriveID)
}
// If using appDataFolder then need to add Spaces
if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder")
}
changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(err)
})
if err != nil {
return
}
type entryType struct {
path string
entryType fs.EntryType
}
var pathsToClear []entryType
for _, change := range changeList.Changes {
// find the previous path
if path, ok := f.dirCache.GetInv(change.FileId); ok {
if change.File != nil && change.File.MimeType != driveFolderType {
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
} else {
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
}
}
// find the new path
if change.File != nil {
change.File.Name = f.opt.Enc.ToStandardName(change.File.Name)
changeType := fs.EntryDirectory
if change.File.MimeType != driveFolderType {
changeType = fs.EntryObject
}
// translate the parent dir of this object
if len(change.File.Parents) > 0 {
for _, parent := range change.File.Parents {
if parentPath, ok := f.dirCache.GetInv(parent); ok {
// and append the drive file name to compute the full file name
newPath := path.Join(parentPath, change.File.Name)
// this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: newPath, entryType: changeType})
}
}
} else { // a true root object that is changed
pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: changeType})
}
}
}
visitedPaths := make(map[string]struct{})
for _, entry := range pathsToClear {
if _, ok := visitedPaths[entry.path]; ok {
continue
}
visitedPaths[entry.path] = struct{}{}
notifyFunc(entry.path, entry.entryType)
}
switch {
case changeList.NewStartPageToken != "":
return changeList.NewStartPageToken, nil
case changeList.NextPageToken != "":
pageToken = changeList.NextPageToken
default:
return
}
}
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
f.dirCache.ResetRoot()
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *baseObject) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *baseObject) String() string {
return o.remote
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *baseObject) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return "", nil
}
// Size returns the size of an object in bytes
func (o *baseObject) Size() int64 {
return o.bytes
}
// getRemoteInfo returns a drive.File for the remote
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
return
}
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, "", "", "", false, fs.ErrorObjectNotFound
}
return nil, "", "", "", false, err
}
found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
if exportName == leaf {
info = item
return true
}
if isDocument {
return false
}
}
if item.Name == leaf {
info = item
return true
}
return false
})
if err != nil {
return nil, "", "", "", false, err
}
if !found {
return nil, "", "", "", false, fs.ErrorObjectNotFound
}
return
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *baseObject) ModTime(ctx context.Context) time.Time {
modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
if err != nil {
fs.Debugf(o, "Failed to read mtime from object: %v", err)
return time.Now()
}
return modTime
}
// SetModTime sets the modification time of the drive fs object
func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
// New metadata
updateInfo := &drive.File{
ModifiedTime: modTime.Format(timeFormatOut),
}
// Set modified date
var info *drive.File
err := o.fs.pacer.Call(func() (bool, error) {
var err error
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Fields(partialFields).
SupportsAllDrives(true).
Do()
return o.fs.shouldRetry(err)
})
if err != nil {
return err
}
// Update info from read data
o.modifiedDate = info.ModifiedTime
return nil
}
// Storable returns a boolean as to whether this object is storable
func (o *baseObject) Storable() bool {
return true
}
// httpResponse gets an http.Response object for the object
// using the url and method passed in
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
if url == "" {
return nil, nil, errors.New("forbidden to download - check sharing permission")
}
req, err = http.NewRequest(method, url, nil)
if err != nil {
return req, nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.OpenOptionAddHTTPHeaders(req.Header, options)
if o.bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
delete(req.Header, "Range")
}
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
if err == nil {
err = googleapi.CheckResponse(res)
if err != nil {
_ = res.Body.Close() // ignore error
}
}
return o.fs.shouldRetry(err)
})
if err != nil {
return req, nil, err
}
return req, res, nil
}
// openDocumentFile represents an documentObject open for reading.
// Updates the object size after read successfully.
type openDocumentFile struct {
o *documentObject // Object we are reading for
in io.ReadCloser // reading from here
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
errored bool // whether we have encountered an error during reading
}
// Read bytes from the object - see io.Reader
func (file *openDocumentFile) Read(p []byte) (n int, err error) {
n, err = file.in.Read(p)
file.bytes += int64(n)
if err != nil && err != io.EOF {
file.errored = true
}
if err == io.EOF {
file.eof = true
}
return
}
// Close the object and update bytes read
func (file *openDocumentFile) Close() (err error) {
// If end of file, update bytes read
if file.eof && !file.errored {
fs.Debugf(file.o, "Updating size of doc after download to %v", file.bytes)
file.o.bytes = file.bytes
}
return file.in.Close()
}
// Check it satisfies the interfaces
var _ io.ReadCloser = (*openDocumentFile)(nil)
// Checks to see if err is a googleapi.Error with of type what
func isGoogleError(err error, what string) bool {
if gerr, ok := err.(*googleapi.Error); ok {
for _, error := range gerr.Errors {
if error.Reason == what {
return true
}
}
}
return false
}
// open a url for reading
func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
_, res, err := o.httpResponse(ctx, url, "GET", options)
if err != nil {
if isGoogleError(err, "cannotDownloadAbusiveFile") {
if o.fs.opt.AcknowledgeAbuse {
// Retry acknowledging abuse
if strings.ContainsRune(url, '?') {
url += "&"
} else {
url += "?"
}
url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options)
} else {
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
}
}
if err != nil {
return nil, errors.Wrap(err, "open file failed")
}
}
return res.Body, nil
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.v2Download {
var v2File *drive_v2.File
err = o.fs.pacer.Call(func() (bool, error) {
v2File, err = o.fs.v2Svc.Files.Get(o.id).
Fields("downloadUrl").
SupportsAllDrives(true).
Do()
return o.fs.shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
o.url = v2File.DownloadUrl
o.v2Download = false
}
}
return o.baseObject.open(ctx, o.url, options...)
}
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted.
var offset, end int64 = 0, -1
var newOptions = options[:0]
for _, o := range options {
// Note that Range requests don't work on Google docs:
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
// So do a subset of them manually
switch x := o.(type) {
case *fs.RangeOption:
offset, end = x.Start, x.End
case *fs.SeekOption:
offset, end = x.Offset, -1
default:
newOptions = append(newOptions, o)
}
}
options = newOptions
if offset != 0 {
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
}
in, err = o.baseObject.open(ctx, o.url, options...)
if in != nil {
in = &openDocumentFile{o: o, in: in}
}
if end >= 0 {
in = readers.NewLimitedReadCloser(in, end-offset+1)
}
return
}
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
var data = o.content
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(int64(len(data)))
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if l := int64(len(data)); offset > l {
offset = l
}
data = data[offset:]
if limit != -1 && limit < int64(len(data)) {
data = data[:limit]
}
return ioutil.NopCloser(bytes.NewReader(data)), nil
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
src fs.ObjectInfo) (info *drive.File, err error) {
// Make the API request to upload metadata and file data.
size := src.Size()
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Media(in, googleapi.ContentType(uploadMimeType)).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
Do()
return o.fs.shouldRetry(err)
})
return
}
// Upload the file in chunks
return o.fs.Upload(ctx, in, size, uploadMimeType, o.id, o.remote, updateInfo)
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src)
updateInfo := &drive.File{
MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
}
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
if err != nil {
return err
}
switch newO := newO.(type) {
case *Object:
*o = *newO
default:
return errors.New("object type changed by update")
}
return nil
}
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src)
importMimeType := ""
updateInfo := &drive.File{
MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
}
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
return errors.Errorf("can't update google document type without --drive-import-formats")
}
importMimeType = o.fs.findImportFormat(updateInfo.MimeType)
if importMimeType == "" {
return errors.Errorf("no import format found for %q", srcMimeType)
}
if importMimeType != o.documentMimeType {
return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
}
updateInfo.MimeType = importMimeType
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
if err != nil {
return err
}
remote := src.Remote()
remote = remote[:len(remote)-o.extLen]
newO, err := o.fs.newObjectWithInfo(remote, info)
if err != nil {
return err
}
switch newO := newO.(type) {
case *documentObject:
*o = *newO
default:
return errors.New("object type changed by update")
}
return nil
}
func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errors.New("cannot update link files")
}
// Remove an object
func (o *baseObject) Remove(ctx context.Context) error {
var err error
err = o.fs.pacer.Call(func() (bool, error) {
if o.fs.opt.UseTrash {
info := drive.File{
Trashed: true,
}
_, err = o.fs.svc.Files.Update(o.id, &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = o.fs.svc.Files.Delete(o.id).
Fields("").
SupportsAllDrives(true).
Do()
}
return o.fs.shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *baseObject) MimeType(ctx context.Context) string {
return o.mimeType
}
// ID returns the ID of the Object if known, or "" if not
func (o *baseObject) ID() string {
return o.id
}
func (o *documentObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}
func (o *linkObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}
// templates for document link files
const (
urlTemplate = `[InternetShortcut]{{"\r"}}
URL={{ .URL }}{{"\r"}}
`
weblocTemplate = `<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>URL</key>
<string>{{ .URL }}</string>
</dict>
</plist>
`
desktopTemplate = `[Desktop Entry]
Encoding=UTF-8
Name={{ .Title }}
URL={{ .URL }}
Icon=text-html
Type=Link
`
htmlTemplate = `<html>
<head>
<meta http-equiv="refresh" content="0; url={{ .URL }}" />
<title>{{ .Title }}</title>
</head>
<body>
Loading <a href="{{ .URL }}">{{ .Title }}</a>
</body>
</html>
`
)
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.Object = (*documentObject)(nil)
_ fs.MimeTyper = (*documentObject)(nil)
_ fs.IDer = (*documentObject)(nil)
_ fs.Object = (*linkObject)(nil)
_ fs.MimeTyper = (*linkObject)(nil)
_ fs.IDer = (*linkObject)(nil)
)
| 1 | 10,396 | File is not `goimports`-ed (from `goimports`) | rclone-rclone | go |
@@ -486,14 +486,14 @@ func DupSecOpt(src string) []string {
con["level"] == "" {
return nil
}
- return []string{"label:user:" + con["user"],
- "label:role:" + con["role"],
- "label:type:" + con["type"],
- "label:level:" + con["level"]}
+ return []string{"label=user:" + con["user"],
+ "label=role:" + con["role"],
+ "label=type:" + con["type"],
+ "label=level:" + con["level"]}
}
// DisableSecOpt returns a security opt that can be used to disabling SELinux
// labeling support for future container processes
func DisableSecOpt() []string {
- return []string{"label:disable"}
+ return []string{"label=disable"}
} | 1 | // +build linux
package selinux
import (
"bufio"
"crypto/rand"
"encoding/binary"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"github.com/opencontainers/runc/libcontainer/system"
)
const (
Enforcing = 1
Permissive = 0
Disabled = -1
selinuxDir = "/etc/selinux/"
selinuxConfig = selinuxDir + "config"
selinuxTypeTag = "SELINUXTYPE"
selinuxTag = "SELINUX"
selinuxPath = "/sys/fs/selinux"
xattrNameSelinux = "security.selinux"
stRdOnly = 0x01
)
var (
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
mcsList = make(map[string]bool)
mcsLock sync.Mutex
selinuxfs = "unknown"
selinuxEnabled = false // Stores whether selinux is currently enabled
selinuxEnabledChecked = false // Stores whether selinux enablement has been checked or established yet
)
type SELinuxContext map[string]string
// SetDisabled disables selinux support for the package
func SetDisabled() {
selinuxEnabled, selinuxEnabledChecked = false, true
}
// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
// filesystem or an empty string if no mountpoint is found. Selinuxfs is
// a proc-like pseudo-filesystem that exposes the selinux policy API to
// processes. The existence of an selinuxfs mount is used to determine
// whether selinux is currently enabled or not.
func getSelinuxMountPoint() string {
if selinuxfs != "unknown" {
return selinuxfs
}
selinuxfs = ""
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return selinuxfs
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
txt := scanner.Text()
// Safe as mountinfo encodes mountpoints with spaces as \040.
sepIdx := strings.Index(txt, " - ")
if sepIdx == -1 {
continue
}
if !strings.Contains(txt[sepIdx:], "selinuxfs") {
continue
}
fields := strings.Split(txt, " ")
if len(fields) < 5 {
continue
}
selinuxfs = fields[4]
break
}
if selinuxfs != "" {
var buf syscall.Statfs_t
syscall.Statfs(selinuxfs, &buf)
if (buf.Flags & stRdOnly) == 1 {
selinuxfs = ""
}
}
return selinuxfs
}
// SelinuxEnabled returns whether selinux is currently enabled.
func SelinuxEnabled() bool {
if selinuxEnabledChecked {
return selinuxEnabled
}
selinuxEnabledChecked = true
if fs := getSelinuxMountPoint(); fs != "" {
if con, _ := Getcon(); con != "kernel" {
selinuxEnabled = true
}
}
return selinuxEnabled
}
func readConfig(target string) (value string) {
var (
val, key string
bufin *bufio.Reader
)
in, err := os.Open(selinuxConfig)
if err != nil {
return ""
}
defer in.Close()
bufin = bufio.NewReader(in)
for done := false; !done; {
var line string
if line, err = bufin.ReadString('\n'); err != nil {
if err != io.EOF {
return ""
}
done = true
}
line = strings.TrimSpace(line)
if len(line) == 0 {
// Skip blank lines
continue
}
if line[0] == ';' || line[0] == '#' {
// Skip comments
continue
}
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
if key == target {
return strings.Trim(val, "\"")
}
}
}
return ""
}
func getSELinuxPolicyRoot() string {
return selinuxDir + readConfig(selinuxTypeTag)
}
func readCon(name string) (string, error) {
var val string
in, err := os.Open(name)
if err != nil {
return "", err
}
defer in.Close()
_, err = fmt.Fscanf(in, "%s", &val)
return val, err
}
// Setfilecon sets the SELinux label for this path or returns an error.
func Setfilecon(path string, scon string) error {
return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0)
}
// Getfilecon returns the SELinux label for this path or returns an error.
func Getfilecon(path string) (string, error) {
con, err := system.Lgetxattr(path, xattrNameSelinux)
if err != nil {
return "", err
}
// Trim the NUL byte at the end of the byte buffer, if present.
if len(con) > 0 && con[len(con)-1] == '\x00' {
con = con[:len(con)-1]
}
return string(con), nil
}
func Setfscreatecon(scon string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon)
}
func Getfscreatecon() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()))
}
// Getcon returns the SELinux label of the current process thread, or an error.
func Getcon() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid()))
}
// Getpidcon returns the SELinux label of the given pid, or an error.
func Getpidcon(pid int) (string, error) {
return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
}
func Getexeccon() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
}
func writeCon(name string, val string) error {
out, err := os.OpenFile(name, os.O_WRONLY, 0)
if err != nil {
return err
}
defer out.Close()
if val != "" {
_, err = out.Write([]byte(val))
} else {
_, err = out.Write(nil)
}
return err
}
func Setexeccon(scon string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon)
}
func (c SELinuxContext) Get() string {
return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
}
func NewContext(scon string) SELinuxContext {
c := make(SELinuxContext)
if len(scon) != 0 {
con := strings.SplitN(scon, ":", 4)
c["user"] = con[0]
c["role"] = con[1]
c["type"] = con[2]
c["level"] = con[3]
}
return c
}
func ReserveLabel(scon string) {
if len(scon) != 0 {
con := strings.SplitN(scon, ":", 4)
mcsAdd(con[3])
}
}
func selinuxEnforcePath() string {
return fmt.Sprintf("%s/enforce", selinuxPath)
}
func SelinuxGetEnforce() int {
var enforce int
enforceS, err := readCon(selinuxEnforcePath())
if err != nil {
return -1
}
enforce, err = strconv.Atoi(string(enforceS))
if err != nil {
return -1
}
return enforce
}
func SelinuxSetEnforce(mode int) error {
return writeCon(selinuxEnforcePath(), fmt.Sprintf("%d", mode))
}
func SelinuxGetEnforceMode() int {
switch readConfig(selinuxTag) {
case "enforcing":
return Enforcing
case "permissive":
return Permissive
}
return Disabled
}
func mcsAdd(mcs string) error {
mcsLock.Lock()
defer mcsLock.Unlock()
if mcsList[mcs] {
return fmt.Errorf("MCS Label already exists")
}
mcsList[mcs] = true
return nil
}
func mcsDelete(mcs string) {
mcsLock.Lock()
mcsList[mcs] = false
mcsLock.Unlock()
}
func IntToMcs(id int, catRange uint32) string {
var (
SETSIZE = int(catRange)
TIER = SETSIZE
ORD = id
)
if id < 1 || id > 523776 {
return ""
}
for ORD > TIER {
ORD = ORD - TIER
TIER--
}
TIER = SETSIZE - TIER
ORD = ORD + TIER
return fmt.Sprintf("s0:c%d,c%d", TIER, ORD)
}
func uniqMcs(catRange uint32) string {
var (
n uint32
c1, c2 uint32
mcs string
)
for {
binary.Read(rand.Reader, binary.LittleEndian, &n)
c1 = n % catRange
binary.Read(rand.Reader, binary.LittleEndian, &n)
c2 = n % catRange
if c1 == c2 {
continue
} else {
if c1 > c2 {
t := c1
c1 = c2
c2 = t
}
}
mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
if err := mcsAdd(mcs); err != nil {
continue
}
break
}
return mcs
}
func FreeLxcContexts(scon string) {
if len(scon) != 0 {
con := strings.SplitN(scon, ":", 4)
mcsDelete(con[3])
}
}
func GetLxcContexts() (processLabel string, fileLabel string) {
var (
val, key string
bufin *bufio.Reader
)
if !SelinuxEnabled() {
return "", ""
}
lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot())
in, err := os.Open(lxcPath)
if err != nil {
return "", ""
}
defer in.Close()
bufin = bufio.NewReader(in)
for done := false; !done; {
var line string
if line, err = bufin.ReadString('\n'); err != nil {
if err == io.EOF {
done = true
} else {
goto exit
}
}
line = strings.TrimSpace(line)
if len(line) == 0 {
// Skip blank lines
continue
}
if line[0] == ';' || line[0] == '#' {
// Skip comments
continue
}
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
if key == "process" {
processLabel = strings.Trim(val, "\"")
}
if key == "file" {
fileLabel = strings.Trim(val, "\"")
}
}
}
if processLabel == "" || fileLabel == "" {
return "", ""
}
exit:
// mcs := IntToMcs(os.Getpid(), 1024)
mcs := uniqMcs(1024)
scon := NewContext(processLabel)
scon["level"] = mcs
processLabel = scon.Get()
scon = NewContext(fileLabel)
scon["level"] = mcs
fileLabel = scon.Get()
return processLabel, fileLabel
}
func SecurityCheckContext(val string) error {
return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
}
func CopyLevel(src, dest string) (string, error) {
if src == "" {
return "", nil
}
if err := SecurityCheckContext(src); err != nil {
return "", err
}
if err := SecurityCheckContext(dest); err != nil {
return "", err
}
scon := NewContext(src)
tcon := NewContext(dest)
mcsDelete(tcon["level"])
mcsAdd(scon["level"])
tcon["level"] = scon["level"]
return tcon.Get(), nil
}
// Prevent users from relabing system files
func badPrefix(fpath string) error {
var badprefixes = []string{"/usr"}
for _, prefix := range badprefixes {
if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
return fmt.Errorf("Relabeling content in %s is not allowed.", prefix)
}
}
return nil
}
// Chcon changes the fpath file object to the SELinux label scon.
// If the fpath is a directory and recurse is true Chcon will walk the
// directory tree setting the label
func Chcon(fpath string, scon string, recurse bool) error {
if scon == "" {
return nil
}
if err := badPrefix(fpath); err != nil {
return err
}
callback := func(p string, info os.FileInfo, err error) error {
return Setfilecon(p, scon)
}
if recurse {
return filepath.Walk(fpath, callback)
}
return Setfilecon(fpath, scon)
}
// DupSecOpt takes an SELinux process label and returns security options that
// can will set the SELinux Type and Level for future container processes
func DupSecOpt(src string) []string {
if src == "" {
return nil
}
con := NewContext(src)
if con["user"] == "" ||
con["role"] == "" ||
con["type"] == "" ||
con["level"] == "" {
return nil
}
return []string{"label:user:" + con["user"],
"label:role:" + con["role"],
"label:type:" + con["type"],
"label:level:" + con["level"]}
}
// DisableSecOpt returns a security opt that can be used to disabling SELinux
// labeling support for future container processes
func DisableSecOpt() []string {
return []string{"label:disable"}
}
| 1 | 11,054 | this `label=` is docker specific, while here in libcontainer there shouldn't be any mention to docker. `DisableSecOpt` and `DupSecOpt` should just deal with `disable,role,type,level`. Both CRI-O and docker should just pass `disable,role,type.level` stuff and not `label=...`. | opencontainers-runc | go |
@@ -31,6 +31,7 @@ func init() {
flags.BoolVarP(cmdFlags, ¬CreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist.")
flags.StringVarP(cmdFlags, &timeAsArgument, "timestamp", "t", "", "Use specified time instead of the current time of day.")
flags.BoolVarP(cmdFlags, &localTime, "localtime", "", false, "Use localtime for timestamp, not UTC.")
+ flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing.")
}
var commandDefinition = &cobra.Command{ | 1 | package touch
import (
"bytes"
"context"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/object"
"github.com/spf13/cobra"
)
var (
notCreateNewFile bool
timeAsArgument string
localTime bool
)
const (
defaultLayout string = "060102"
layoutDateWithTime = "2006-01-02T15:04:05"
layoutDateWithTimeNano = "2006-01-02T15:04:05.999999999"
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, ¬CreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist.")
flags.StringVarP(cmdFlags, &timeAsArgument, "timestamp", "t", "", "Use specified time instead of the current time of day.")
flags.BoolVarP(cmdFlags, &localTime, "localtime", "", false, "Use localtime for timestamp, not UTC.")
}
var commandDefinition = &cobra.Command{
Use: "touch remote:path",
Short: `Create new file or change file modification time.`,
Long: `
Set the modification time on object(s) as specified by remote:path to
have the current time.
If remote:path does not exist then a zero sized object will be created
unless the --no-create flag is provided.
If --timestamp is used then it will set the modification time to that
time instead of the current time. Times may be specified as one of:
- 'YYMMDD' - e.g. 17.10.30
- 'YYYY-MM-DDTHH:MM:SS' - e.g. 2006-01-02T15:04:05
- 'YYYY-MM-DDTHH:MM:SS.SSS' - e.g. 2006-01-02T15:04:05.123456789
Note that --timestamp is in UTC if you want local time then add the
--localtime flag.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc, srcFileName := cmd.NewFsDstFile(args)
cmd.Run(true, false, command, func() error {
return Touch(context.Background(), fsrc, srcFileName)
})
},
}
//Touch create new file or change file modification time.
func Touch(ctx context.Context, fsrc fs.Fs, srcFileName string) (err error) {
timeAtr := time.Now()
if timeAsArgument != "" {
layout := defaultLayout
if len(timeAsArgument) == len(layoutDateWithTime) {
layout = layoutDateWithTime
} else if len(timeAsArgument) > len(layoutDateWithTime) {
layout = layoutDateWithTimeNano
}
var timeAtrFromFlags time.Time
if localTime {
timeAtrFromFlags, err = time.ParseInLocation(layout, timeAsArgument, time.Local)
} else {
timeAtrFromFlags, err = time.Parse(layout, timeAsArgument)
}
if err != nil {
return errors.Wrap(err, "failed to parse date/time argument")
}
timeAtr = timeAtrFromFlags
}
file, err := fsrc.NewObject(ctx, srcFileName)
if err != nil {
if !notCreateNewFile {
var buffer []byte
src := object.NewStaticObjectInfo(srcFileName, timeAtr, int64(len(buffer)), true, nil, fsrc)
_, err = fsrc.Put(ctx, bytes.NewBuffer(buffer), src)
if err != nil {
return err
}
}
return nil
}
err = file.SetModTime(ctx, timeAtr)
if err != nil {
return errors.Wrap(err, "touch: couldn't set mod time")
}
return nil
}
| 1 | 13,624 | You need to declare variable `recurse` above, same place as `localTime`. | rclone-rclone | go |
@@ -1565,7 +1565,8 @@ class SpreadingOperation(LinkableOperation):
new_data[tuple(vd.name for vd in vdims)] = img
else:
new_data = array
- return element.clone(new_data, **kwargs)
+ return element.clone(new_data, xdensity=element.xdensity,
+ ydensity=element.ydensity, **kwargs)
| 1 | from __future__ import absolute_import, division
import warnings
from collections import Callable
from functools import partial
import param
import numpy as np
import pandas as pd
import xarray as xr
import datashader as ds
import datashader.reductions as rd
import datashader.transfer_functions as tf
import dask.dataframe as dd
from param.parameterized import bothmethod
try:
from datashader.bundling import (directly_connect_edges as connect_edges,
hammer_bundle)
except:
hammer_bundle, connect_edges = object, object
from ..core import (Operation, Element, Dimension, NdOverlay,
CompositeOverlay, Dataset, Overlay, OrderedDict)
from ..core.data import PandasInterface, XArrayInterface, DaskInterface, cuDFInterface
from ..core.util import (
Iterable, LooseVersion, basestring, cftime_types, cftime_to_timestamp,
datetime_types, dt_to_int, isfinite, get_param_values, max_range)
from ..element import (Image, Path, Curve, RGB, Graph, TriMesh,
QuadMesh, Contours, Spikes, Area, Spread,
Segments, Scatter, Points, Polygons)
from ..element.util import connect_tri_edges_pd
from ..streams import RangeXY, PlotSize
ds_version = LooseVersion(ds.__version__)
class LinkableOperation(Operation):
"""
Abstract baseclass for operations supporting linked inputs.
"""
link_inputs = param.Boolean(default=True, doc="""
By default, the link_inputs parameter is set to True so that
when applying an operation, backends that support linked
streams update RangeXY streams on the inputs of the operation.
Disable when you do not want the resulting plot to be
interactive, e.g. when trying to display an interactive plot a
second time.""")
_allow_extra_keywords=True
class ResamplingOperation(LinkableOperation):
"""
Abstract baseclass for resampling operations
"""
dynamic = param.Boolean(default=True, doc="""
Enables dynamic processing by default.""")
expand = param.Boolean(default=True, doc="""
Whether the x_range and y_range should be allowed to expand
beyond the extent of the data. Setting this value to True is
useful for the case where you want to ensure a certain size of
output grid, e.g. if you are doing masking or other arithmetic
on the grids. A value of False ensures that the grid is only
just as large as it needs to be to contain the data, which will
be faster and use less memory if the resulting aggregate is
being overlaid on a much larger background.""")
height = param.Integer(default=400, doc="""
The height of the output image in pixels.""")
width = param.Integer(default=400, doc="""
The width of the output image in pixels.""")
x_range = param.Tuple(default=None, length=2, doc="""
The x_range as a tuple of min and max x-value. Auto-ranges
if set to None.""")
y_range = param.Tuple(default=None, length=2, doc="""
The y-axis range as a tuple of min and max y value. Auto-ranges
if set to None.""")
x_sampling = param.Number(default=None, doc="""
Specifies the smallest allowed sampling interval along the x axis.""")
y_sampling = param.Number(default=None, doc="""
Specifies the smallest allowed sampling interval along the y axis.""")
target = param.ClassSelector(class_=Dataset, doc="""
A target Dataset which defines the desired x_range, y_range,
width and height.
""")
streams = param.List(default=[PlotSize, RangeXY], doc="""
List of streams that are applied if dynamic=True, allowing
for dynamic interaction with the plot.""")
element_type = param.ClassSelector(class_=(Dataset,), instantiate=False,
is_instance=False, default=Image,
doc="""
The type of the returned Elements, must be a 2D Dataset type.""")
precompute = param.Boolean(default=False, doc="""
Whether to apply precomputing operations. Precomputing can
speed up resampling operations by avoiding unnecessary
recomputation if the supplied element does not change between
calls. The cost of enabling this option is that the memory
used to represent this internal state is not freed between
calls.""")
@bothmethod
def instance(self_or_cls,**params):
filtered = {k:v for k,v in params.items() if k in self_or_cls.param}
inst = super(ResamplingOperation, self_or_cls).instance(**filtered)
inst._precomputed = {}
return inst
def _get_sampling(self, element, x, y, ndim=2, default=None):
target = self.p.target
if not isinstance(x, list) and x is not None:
x = [x]
if not isinstance(y, list) and y is not None:
y = [y]
if target:
x0, y0, x1, y1 = target.bounds.lbrt()
x_range, y_range = (x0, x1), (y0, y1)
height, width = target.dimension_values(2, flat=False).shape
else:
if x is None:
x_range = self.p.x_range or (-0.5, 0.5)
elif self.p.expand or not self.p.x_range:
if self.p.x_range and all(isfinite(v) for v in self.p.x_range):
x_range = self.p.x_range
else:
x_range = max_range([element.range(xd) for xd in x])
else:
x0, x1 = self.p.x_range
ex0, ex1 = max_range([element.range(xd) for xd in x])
x_range = (np.nanmin([np.nanmax([x0, ex0]), ex1]),
np.nanmax([np.nanmin([x1, ex1]), ex0]))
if (y is None and ndim == 2):
y_range = self.p.y_range or default or (-0.5, 0.5)
elif self.p.expand or not self.p.y_range:
if self.p.y_range and all(isfinite(v) for v in self.p.y_range):
y_range = self.p.y_range
elif default is None:
y_range = max_range([element.range(yd) for yd in y])
else:
y_range = default
else:
y0, y1 = self.p.y_range
if default is None:
ey0, ey1 = max_range([element.range(yd) for yd in y])
else:
ey0, ey1 = default
y_range = (np.nanmin([np.nanmax([y0, ey0]), ey1]),
np.nanmax([np.nanmin([y1, ey1]), ey0]))
width, height = self.p.width, self.p.height
(xstart, xend), (ystart, yend) = x_range, y_range
xtype = 'numeric'
if isinstance(xstart, datetime_types) or isinstance(xend, datetime_types):
xstart, xend = dt_to_int(xstart, 'ns'), dt_to_int(xend, 'ns')
xtype = 'datetime'
elif not np.isfinite(xstart) and not np.isfinite(xend):
xstart, xend = 0, 0
if x and element.get_dimension_type(x[0]) in datetime_types:
xtype = 'datetime'
ytype = 'numeric'
if isinstance(ystart, datetime_types) or isinstance(yend, datetime_types):
ystart, yend = dt_to_int(ystart, 'ns'), dt_to_int(yend, 'ns')
ytype = 'datetime'
elif not np.isfinite(ystart) and not np.isfinite(yend):
ystart, yend = 0, 0
if y and element.get_dimension_type(y[0]) in datetime_types:
ytype = 'datetime'
# Compute highest allowed sampling density
xspan = xend - xstart
yspan = yend - ystart
if self.p.x_sampling:
width = int(min([(xspan/self.p.x_sampling), width]))
if self.p.y_sampling:
height = int(min([(yspan/self.p.y_sampling), height]))
if xstart == xend or width == 0:
xunit, width = 0, 0
else:
xunit = float(xspan)/width
if ystart == yend or height == 0:
yunit, height = 0, 0
else:
yunit = float(yspan)/height
xs, ys = (np.linspace(xstart+xunit/2., xend-xunit/2., width),
np.linspace(ystart+yunit/2., yend-yunit/2., height))
return ((xstart, xend), (ystart, yend)), (xs, ys), (width, height), (xtype, ytype)
def _dt_transform(self, x_range, y_range, xs, ys, xtype, ytype):
(xstart, xend), (ystart, yend) = x_range, y_range
if xtype == 'datetime':
xstart, xend = (np.array([xstart, xend])/1e3).astype('datetime64[us]')
xs = (xs/1e3).astype('datetime64[us]')
if ytype == 'datetime':
ystart, yend = (np.array([ystart, yend])/1e3).astype('datetime64[us]')
ys = (ys/1e3).astype('datetime64[us]')
return ((xstart, xend), (ystart, yend)), (xs, ys)
class AggregationOperation(ResamplingOperation):
"""
AggregationOperation extends the ResamplingOperation defining an
aggregator parameter used to define a datashader Reduction.
"""
aggregator = param.ClassSelector(class_=(ds.reductions.Reduction, basestring),
default=ds.count(), doc="""
Datashader reduction function used for aggregating the data.
The aggregator may also define a column to aggregate; if
no column is defined the first value dimension of the element
will be used. May also be defined as a string.""")
_agg_methods = {
'any': rd.any,
'count': rd.count,
'first': rd.first,
'last': rd.last,
'mode': rd.mode,
'mean': rd.mean,
'sum': rd.sum,
'var': rd.var,
'std': rd.std,
'min': rd.min,
'max': rd.max
}
def _get_aggregator(self, element, add_field=True):
agg = self.p.aggregator
if isinstance(agg, basestring):
if agg not in self._agg_methods:
agg_methods = sorted(self._agg_methods)
raise ValueError("Aggregation method '%r' is not known; "
"aggregator must be one of: %r" %
(agg, agg_methods))
agg = self._agg_methods[agg]()
elements = element.traverse(lambda x: x, [Element])
if add_field and getattr(agg, 'column', False) is None and not isinstance(agg, (rd.count, rd.any)):
if not elements:
raise ValueError('Could not find any elements to apply '
'%s operation to.' % type(self).__name__)
inner_element = elements[0]
if isinstance(inner_element, TriMesh) and inner_element.nodes.vdims:
field = inner_element.nodes.vdims[0].name
elif inner_element.vdims:
field = inner_element.vdims[0].name
elif isinstance(element, NdOverlay):
field = element.kdims[0].name
else:
raise ValueError("Could not determine dimension to apply "
"'%s' operation to. Declare the dimension "
"to aggregate as part of the datashader "
"aggregator." % type(self).__name__)
agg = type(agg)(field)
return agg
def _empty_agg(self, element, x, y, width, height, xs, ys, agg_fn, **params):
x = x.name if x else 'x'
y = y.name if x else 'y'
xarray = xr.DataArray(np.full((height, width), np.NaN),
dims=[y, x], coords={x: xs, y: ys})
if width == 0:
params['xdensity'] = 1
if height == 0:
params['ydensity'] = 1
el = self.p.element_type(xarray, **params)
if isinstance(agg_fn, ds.count_cat):
vals = element.dimension_values(agg_fn.column, expanded=False)
dim = element.get_dimension(agg_fn.column)
return NdOverlay({v: el for v in vals}, dim)
return el
def _get_agg_params(self, element, x, y, agg_fn, bounds):
params = dict(get_param_values(element), kdims=[x, y],
datatype=['xarray'], bounds=bounds)
category = None
if hasattr(agg_fn, 'reduction'):
category = agg_fn.cat_column
agg_fn = agg_fn.reduction
column = agg_fn.column if agg_fn else None
if column:
dims = [d for d in element.dimensions('ranges') if d == column]
if not dims:
raise ValueError("Aggregation column '%s' not found on '%s' element. "
"Ensure the aggregator references an existing "
"dimension." % (column,element))
name = '%s Count' % column if isinstance(agg_fn, ds.count_cat) else column
vdims = [dims[0].clone(name)]
elif category:
vdims = Dimension('%s Count' % category)
else:
vdims = Dimension('Count')
params['vdims'] = vdims
return params
class aggregate(AggregationOperation):
"""
aggregate implements 2D binning for any valid HoloViews Element
type using datashader. I.e., this operation turns a HoloViews
Element or overlay of Elements into an Image or an overlay of
Images by rasterizing it. This allows quickly aggregating large
datasets computing a fixed-sized representation independent
of the original dataset size.
By default it will simply count the number of values in each bin
but other aggregators can be supplied implementing mean, max, min
and other reduction operations.
The bins of the aggregate are defined by the width and height and
the x_range and y_range. If x_sampling or y_sampling are supplied
the operation will ensure that a bin is no smaller than the minimum
sampling distance by reducing the width and height when zoomed in
beyond the minimum sampling distance.
By default, the PlotSize stream is applied when this operation
is used dynamically, which means that the height and width
will automatically be set to match the inner dimensions of
the linked plot.
"""
@classmethod
def get_agg_data(cls, obj, category=None):
"""
Reduces any Overlay or NdOverlay of Elements into a single
xarray Dataset that can be aggregated.
"""
paths = []
if isinstance(obj, Graph):
obj = obj.edgepaths
kdims = list(obj.kdims)
vdims = list(obj.vdims)
dims = obj.dimensions()[:2]
if isinstance(obj, Path):
glyph = 'line'
for p in obj.split(datatype='dataframe'):
paths.append(p)
elif isinstance(obj, CompositeOverlay):
element = None
for key, el in obj.data.items():
x, y, element, glyph = cls.get_agg_data(el)
dims = (x, y)
df = PandasInterface.as_dframe(element)
if isinstance(obj, NdOverlay):
df = df.assign(**dict(zip(obj.dimensions('key', True), key)))
paths.append(df)
if element is None:
dims = None
else:
kdims += element.kdims
vdims = element.vdims
elif isinstance(obj, Element):
glyph = 'line' if isinstance(obj, Curve) else 'points'
paths.append(PandasInterface.as_dframe(obj))
if dims is None or len(dims) != 2:
return None, None, None, None
else:
x, y = dims
if len(paths) > 1:
if glyph == 'line':
path = paths[0][:1]
if isinstance(path, dd.DataFrame):
path = path.compute()
empty = path.copy()
empty.iloc[0, :] = (np.NaN,) * empty.shape[1]
paths = [elem for p in paths for elem in (p, empty)][:-1]
if all(isinstance(path, dd.DataFrame) for path in paths):
df = dd.concat(paths)
else:
paths = [p.compute() if isinstance(p, dd.DataFrame) else p for p in paths]
df = pd.concat(paths)
else:
df = paths[0] if paths else pd.DataFrame([], columns=[x.name, y.name])
if category and df[category].dtype.name != 'category':
df[category] = df[category].astype('category')
is_custom = isinstance(df, dd.DataFrame) or cuDFInterface.applies(df)
if any((not is_custom and len(df[d.name]) and isinstance(df[d.name].values[0], cftime_types)) or
df[d.name].dtype.kind == 'M' for d in (x, y)):
df = df.copy()
for d in (x, y):
vals = df[d.name]
if not is_custom and len(vals) and isinstance(vals.values[0], cftime_types):
vals = cftime_to_timestamp(vals, 'ns')
elif df[d.name].dtype.kind == 'M':
vals = vals.astype('datetime64[ns]')
else:
continue
df[d.name] = vals.astype('int64')
return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph
def _process(self, element, key=None):
agg_fn = self._get_aggregator(element)
if hasattr(agg_fn, 'cat_column'):
category = agg_fn.cat_column
else:
category = agg_fn.column if isinstance(agg_fn, ds.count_cat) else None
if overlay_aggregate.applies(element, agg_fn):
params = dict(
{p: v for p, v in self.param.get_param_values() if p != 'name'},
dynamic=False, **{p: v for p, v in self.p.items()
if p not in ('name', 'dynamic')})
return overlay_aggregate(element, **params)
if element._plot_id in self._precomputed:
x, y, data, glyph = self._precomputed[element._plot_id]
else:
x, y, data, glyph = self.get_agg_data(element, category)
if self.p.precompute:
self._precomputed[element._plot_id] = x, y, data, glyph
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = self._get_sampling(element, x, y)
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
params = self._get_agg_params(element, x, y, agg_fn, (x0, y0, x1, y1))
if x is None or y is None or width == 0 or height == 0:
return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params)
elif not getattr(data, 'interface', None) is DaskInterface and not len(data):
empty_val = 0 if isinstance(agg_fn, ds.count) else np.NaN
xarray = xr.DataArray(np.full((height, width), empty_val),
dims=[y.name, x.name], coords={x.name: xs, y.name: ys})
return self.p.element_type(xarray, **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
dfdata = PandasInterface.as_dframe(data)
agg = getattr(cvs, glyph)(dfdata, x.name, y.name, agg_fn)
if 'x_axis' in agg.coords and 'y_axis' in agg.coords:
agg = agg.rename({'x_axis': x, 'y_axis': y})
if xtype == 'datetime':
agg[x.name] = (agg[x.name]/1e3).astype('datetime64[us]')
if ytype == 'datetime':
agg[y.name] = (agg[y.name]/1e3).astype('datetime64[us]')
if agg.ndim == 2:
# Replacing x and y coordinates to avoid numerical precision issues
eldata = agg if ds_version > '0.5.0' else (xs, ys, agg.data)
return self.p.element_type(eldata, **params)
else:
layers = {}
for c in agg.coords[agg_fn.column].data:
cagg = agg.sel(**{agg_fn.column: c})
eldata = cagg if ds_version > '0.5.0' else (xs, ys, cagg.data)
layers[c] = self.p.element_type(eldata, **params)
return NdOverlay(layers, kdims=[data.get_dimension(agg_fn.column)])
class overlay_aggregate(aggregate):
"""
Optimized aggregation for NdOverlay objects by aggregating each
Element in an NdOverlay individually avoiding having to concatenate
items in the NdOverlay. Works by summing sum and count aggregates and
applying appropriate masking for NaN values. Mean aggregation
is also supported by dividing sum and count aggregates. count_cat
aggregates are grouped by the categorical dimension and a separate
aggregate for each category is generated.
"""
@classmethod
def applies(cls, element, agg_fn):
return (isinstance(element, NdOverlay) and
((isinstance(agg_fn, (ds.count, ds.sum, ds.mean)) and
(agg_fn.column is None or agg_fn.column not in element.kdims)) or
(isinstance(agg_fn, ds.count_cat) and agg_fn.column in element.kdims)))
def _process(self, element, key=None):
agg_fn = self._get_aggregator(element)
if not self.applies(element, agg_fn):
raise ValueError('overlay_aggregate only handles aggregation '
'of NdOverlay types with count, sum or mean '
'reduction.')
# Compute overall bounds
dims = element.last.dimensions()[0:2]
ndims = len(dims)
if ndims == 1:
x, y = dims[0], None
else:
x, y = dims
info = self._get_sampling(element, x, y, ndims)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
((x0, x1), (y0, y1)), _ = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
agg_params = dict({k: v for k, v in dict(self.param.get_param_values(),
**self.p).items()
if k in aggregate.param},
x_range=(x0, x1), y_range=(y0, y1))
bbox = (x0, y0, x1, y1)
# Optimize categorical counts by aggregating them individually
if isinstance(agg_fn, ds.count_cat):
agg_params.update(dict(dynamic=False, aggregator=ds.count()))
agg_fn1 = aggregate.instance(**agg_params)
if element.ndims == 1:
grouped = element
else:
grouped = element.groupby([agg_fn.column], container_type=NdOverlay,
group_type=NdOverlay)
groups = []
for k, v in grouped.items():
agg = agg_fn1(v)
groups.append((k, agg.clone(agg.data, bounds=bbox)))
return grouped.clone(groups)
# Create aggregate instance for sum, count operations, breaking mean
# into two aggregates
column = agg_fn.column or 'Count'
if isinstance(agg_fn, ds.mean):
agg_fn1 = aggregate.instance(**dict(agg_params, aggregator=ds.sum(column)))
agg_fn2 = aggregate.instance(**dict(agg_params, aggregator=ds.count()))
else:
agg_fn1 = aggregate.instance(**agg_params)
agg_fn2 = None
is_sum = isinstance(agg_fn1.aggregator, ds.sum)
# Accumulate into two aggregates and mask
agg, agg2, mask = None, None, None
mask = None
for v in element:
# Compute aggregates and mask
new_agg = agg_fn1.process_element(v, None)
if is_sum:
new_mask = np.isnan(new_agg.data[column].values)
new_agg.data = new_agg.data.fillna(0)
if agg_fn2:
new_agg2 = agg_fn2.process_element(v, None)
if agg is None:
agg = new_agg
if is_sum: mask = new_mask
if agg_fn2: agg2 = new_agg2
else:
agg.data += new_agg.data
if is_sum: mask &= new_mask
if agg_fn2: agg2.data += new_agg2.data
# Divide sum by count to compute mean
if agg2 is not None:
agg2.data.rename({'Count': agg_fn.column}, inplace=True)
with np.errstate(divide='ignore', invalid='ignore'):
agg.data /= agg2.data
# Fill masked with with NaNs
if is_sum:
agg.data[column].values[mask] = np.NaN
return agg.clone(bounds=bbox)
class area_aggregate(AggregationOperation):
"""
Aggregates Area elements by filling the area between zero and
the y-values if only one value dimension is defined and the area
between the curves if two are provided.
"""
def _process(self, element, key=None):
x, y = element.dimensions()[:2]
agg_fn = self._get_aggregator(element)
default = None
if not self.p.y_range:
y0, y1 = element.range(1)
if len(element.vdims) > 1:
y0, _ = element.range(2)
elif y0 >= 0:
y0 = 0
elif y1 <= 0:
y1 = 0
default = (y0, y1)
ystack = element.vdims[1].name if len(element.vdims) > 1 else None
info = self._get_sampling(element, x, y, ndim=2, default=default)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
df = PandasInterface.as_dframe(element)
if isinstance(agg_fn, (ds.count, ds.any)):
vdim = type(agg_fn).__name__
else:
vdim = element.get_dimension(agg_fn.column)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
params = dict(get_param_values(element), kdims=[x, y], vdims=vdim,
datatype=['xarray'], bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params)
agg = cvs.area(df, x.name, y.name, agg_fn, axis=0, y_stack=ystack)
if xtype == "datetime":
agg[x.name] = (agg[x.name]/1e3).astype('datetime64[us]')
return self.p.element_type(agg, **params)
class spread_aggregate(area_aggregate):
"""
Aggregates Spread elements by filling the area between the lower
and upper error band.
"""
def _process(self, element, key=None):
x, y = element.dimensions()[:2]
df = PandasInterface.as_dframe(element)
if df is element.data:
df = df.copy()
pos, neg = element.vdims[1:3] if len(element.vdims) > 2 else element.vdims[1:2]*2
yvals = df[y.name]
df[y.name] = yvals+df[pos.name]
df['_lower'] = yvals-df[neg.name]
area = element.clone(df, vdims=[y, '_lower']+element.vdims[3:], new_type=Area)
return super(spread_aggregate, self)._process(area, key=None)
class spikes_aggregate(AggregationOperation):
"""
Aggregates Spikes elements by drawing individual line segments
over the entire y_range if no value dimension is defined and
between zero and the y-value if one is defined.
"""
spike_length = param.Number(default=None, allow_None=True, doc="""
If numeric, specifies the length of each spike, overriding the
vdims values (if present).""")
offset = param.Number(default=0., doc="""
The offset of the lower end of each spike.""")
def _process(self, element, key=None):
agg_fn = self._get_aggregator(element)
x, y = element.kdims[0], None
spike_length = 0.5 if self.p.spike_length is None else self.p.spike_length
if element.vdims and self.p.spike_length is None:
x, y = element.dimensions()[:2]
rename_dict = {'x': x.name, 'y':y.name}
if not self.p.y_range:
y0, y1 = element.range(1)
if y0 >= 0:
default = (0, y1)
elif y1 <= 0:
default = (y0, 0)
else:
default = (y0, y1)
else:
default = None
else:
x, y = element.kdims[0], None
default = (float(self.p.offset),
float(self.p.offset + spike_length))
rename_dict = {'x': x.name}
info = self._get_sampling(element, x, y, ndim=1, default=default)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
value_cols = [] if agg_fn.column is None else [agg_fn.column]
if y is None:
df = element.dframe([x]+value_cols).copy()
y = Dimension('y')
df['y0'] = float(self.p.offset)
df['y1'] = float(self.p.offset + spike_length)
yagg = ['y0', 'y1']
if not self.p.expand: height = 1
else:
df = element.dframe([x, y]+value_cols).copy()
df['y0'] = np.array(0, df.dtypes[y.name])
yagg = ['y0', y.name]
if xtype == 'datetime':
df[x.name] = df[x.name].astype('datetime64[us]').astype('int64')
if isinstance(agg_fn, (ds.count, ds.any)):
vdim = type(agg_fn).__name__
else:
vdim = element.get_dimension(agg_fn.column)
params = dict(get_param_values(element), kdims=[x, y], vdims=vdim,
datatype=['xarray'], bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
agg = cvs.line(df, x.name, yagg, agg_fn, axis=1).rename(rename_dict)
if xtype == "datetime":
agg[x.name] = (agg[x.name]/1e3).astype('datetime64[us]')
return self.p.element_type(agg, **params)
class segments_aggregate(AggregationOperation):
"""
Aggregates Segments elements.
"""
def _process(self, element, key=None):
agg_fn = self._get_aggregator(element)
x0d, y0d, x1d, y1d = element.kdims
info = self._get_sampling(element, [x0d, x1d], [y0d, y1d], ndim=1)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
df = element.interface.as_dframe(element)
if xtype == 'datetime':
df[x0d.name] = df[x0d.name].astype('datetime64[us]').astype('int64')
df[x1d.name] = df[x1d.name].astype('datetime64[us]').astype('int64')
if ytype == 'datetime':
df[y0d.name] = df[y0d.name].astype('datetime64[us]').astype('int64')
df[y1d.name] = df[y1d.name].astype('datetime64[us]').astype('int64')
if isinstance(agg_fn, (ds.count, ds.any)):
vdim = type(agg_fn).__name__
else:
vdim = element.get_dimension(agg_fn.column)
params = dict(get_param_values(element), kdims=[x0d, y0d], vdims=vdim,
datatype=['xarray'], bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
return self._empty_agg(element, x0d, y0d, width, height, xs, ys, agg_fn, **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
agg = cvs.line(df, [x0d.name, x1d.name], [y0d.name, y1d.name], agg_fn, axis=1)
xdim, ydim = list(agg.dims)[:2][::-1]
if xtype == "datetime":
agg[xdim] = (agg[xdim]/1e3).astype('datetime64[us]')
if ytype == "datetime":
agg[ydim] = (agg[ydim]/1e3).astype('datetime64[us]')
params['kdims'] = [xdim, ydim]
return self.p.element_type(agg, **params)
class regrid(AggregationOperation):
"""
regrid allows resampling a HoloViews Image type using specified
up- and downsampling functions defined using the aggregator and
interpolation parameters respectively. By default upsampling is
disabled to avoid unnecessarily upscaling an image that has to be
sent to the browser. Also disables expanding the image beyond its
original bounds avoiding unnecessarily padding the output array
with NaN values.
"""
aggregator = param.ClassSelector(default=ds.mean(),
class_=(ds.reductions.Reduction, basestring))
expand = param.Boolean(default=False, doc="""
Whether the x_range and y_range should be allowed to expand
beyond the extent of the data. Setting this value to True is
useful for the case where you want to ensure a certain size of
output grid, e.g. if you are doing masking or other arithmetic
on the grids. A value of False ensures that the grid is only
just as large as it needs to be to contain the data, which will
be faster and use less memory if the resulting aggregate is
being overlaid on a much larger background.""")
interpolation = param.ObjectSelector(default='nearest',
objects=['linear', 'nearest', 'bilinear', None, False], doc="""
Interpolation method""")
upsample = param.Boolean(default=False, doc="""
Whether to allow upsampling if the source array is smaller
than the requested array. Setting this value to True will
enable upsampling using the interpolation method, when the
requested width and height are larger than what is available
on the source grid. If upsampling is disabled (the default)
the width and height are clipped to what is available on the
source array.""")
def _get_xarrays(self, element, coords, xtype, ytype):
x, y = element.kdims
dims = [y.name, x.name]
irregular = any(element.interface.irregular(element, d)
for d in dims)
if irregular:
coord_dict = {x.name: (('y', 'x'), coords[0]),
y.name: (('y', 'x'), coords[1])}
else:
coord_dict = {x.name: coords[0], y.name: coords[1]}
arrays = {}
for i, vd in enumerate(element.vdims):
if element.interface is XArrayInterface:
if element.interface.packed(element):
xarr = element.data[..., i]
else:
xarr = element.data[vd.name]
if 'datetime' in (xtype, ytype):
xarr = xarr.copy()
if dims != xarr.dims and not irregular:
xarr = xarr.transpose(*dims)
elif irregular:
arr = element.dimension_values(vd, flat=False)
xarr = xr.DataArray(arr, coords=coord_dict, dims=['y', 'x'])
else:
arr = element.dimension_values(vd, flat=False)
xarr = xr.DataArray(arr, coords=coord_dict, dims=dims)
if xtype == "datetime":
xarr[x.name] = [dt_to_int(v, 'ns') for v in xarr[x.name].values]
if ytype == "datetime":
xarr[y.name] = [dt_to_int(v, 'ns') for v in xarr[y.name].values]
arrays[vd.name] = xarr
return arrays
def _process(self, element, key=None):
if ds_version <= '0.5.0':
raise RuntimeError('regrid operation requires datashader>=0.6.0')
# Compute coords, anges and size
x, y = element.kdims
coords = tuple(element.dimension_values(d, expanded=False) for d in [x, y])
info = self._get_sampling(element, x, y)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
# Disable upsampling by clipping size and ranges
(xstart, xend), (ystart, yend) = (x_range, y_range)
xspan, yspan = (xend-xstart), (yend-ystart)
interp = self.p.interpolation or None
if interp == 'bilinear': interp = 'linear'
if not (self.p.upsample or interp is None) and self.p.target is None:
(x0, x1), (y0, y1) = element.range(0), element.range(1)
if isinstance(x0, datetime_types):
x0, x1 = dt_to_int(x0, 'ns'), dt_to_int(x1, 'ns')
if isinstance(y0, datetime_types):
y0, y1 = dt_to_int(y0, 'ns'), dt_to_int(y1, 'ns')
exspan, eyspan = (x1-x0), (y1-y0)
if np.isfinite(exspan) and exspan > 0 and xspan > 0:
width = max([min([int((xspan/exspan) * len(coords[0])), width]), 1])
else:
width = 0
if np.isfinite(eyspan) and eyspan > 0 and yspan > 0:
height = max([min([int((yspan/eyspan) * len(coords[1])), height]), 1])
else:
height = 0
xunit = float(xspan)/width if width else 0
yunit = float(yspan)/height if height else 0
xs, ys = (np.linspace(xstart+xunit/2., xend-xunit/2., width),
np.linspace(ystart+yunit/2., yend-yunit/2., height))
# Compute bounds (converting datetimes)
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
params = dict(bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
if width == 0:
params['xdensity'] = 1
if height == 0:
params['ydensity'] = 1
return element.clone((xs, ys, np.zeros((height, width))), **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
# Apply regridding to each value dimension
regridded = {}
arrays = self._get_xarrays(element, coords, xtype, ytype)
agg_fn = self._get_aggregator(element, add_field=False)
for vd, xarr in arrays.items():
rarray = cvs.raster(xarr, upsample_method=interp,
downsample_method=agg_fn)
# Convert datetime coordinates
if xtype == "datetime":
rarray[x.name] = (rarray[x.name]/1e3).astype('datetime64[us]')
if ytype == "datetime":
rarray[y.name] = (rarray[y.name]/1e3).astype('datetime64[us]')
regridded[vd] = rarray
regridded = xr.Dataset(regridded)
return element.clone(regridded, datatype=['xarray']+element.datatype, **params)
class contours_rasterize(aggregate):
"""
Rasterizes the Contours element by weighting the aggregation by
the iso-contour levels if a value dimension is defined, otherwise
default to any aggregator.
"""
aggregator = param.ClassSelector(default=ds.mean(),
class_=(ds.reductions.Reduction, basestring))
def _get_aggregator(self, element, add_field=True):
agg = self.p.aggregator
if not element.vdims and agg.column is None and not isinstance(agg, (rd.count, rd.any)):
return ds.any()
return super(contours_rasterize, self)._get_aggregator(element, add_field)
class trimesh_rasterize(aggregate):
"""
Rasterize the TriMesh element using the supplied aggregator. If
the TriMesh nodes or edges define a value dimension, will plot
filled and shaded polygons; otherwise returns a wiremesh of the
data.
"""
aggregator = param.ClassSelector(default=ds.mean(),
class_=(ds.reductions.Reduction, basestring))
interpolation = param.ObjectSelector(default='bilinear',
objects=['bilinear', 'linear', None, False], doc="""
The interpolation method to apply during rasterization.""")
def _precompute(self, element, agg):
from datashader.utils import mesh
if element.vdims and getattr(agg, 'column', None) not in element.nodes.vdims:
simplices = element.dframe([0, 1, 2, 3])
verts = element.nodes.dframe([0, 1])
elif element.nodes.vdims:
simplices = element.dframe([0, 1, 2])
verts = element.nodes.dframe([0, 1, 3])
for c, dtype in zip(simplices.columns[:3], simplices.dtypes):
if dtype.kind != 'i':
simplices[c] = simplices[c].astype('int')
return {'mesh': mesh(verts, simplices), 'simplices': simplices,
'vertices': verts}
def _precompute_wireframe(self, element, agg):
if hasattr(element, '_wireframe'):
segments = element._wireframe.data
else:
segments = connect_tri_edges_pd(element)
element._wireframe = Dataset(segments, datatype=['dataframe', 'dask'])
return {'segments': segments}
def _process(self, element, key=None):
if isinstance(element, TriMesh):
x, y = element.nodes.kdims[:2]
else:
x, y = element.kdims
info = self._get_sampling(element, x, y)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
agg = self.p.aggregator
interp = self.p.interpolation or None
precompute = self.p.precompute
if interp == 'linear': interp = 'bilinear'
wireframe = False
if (not (element.vdims or (isinstance(element, TriMesh) and element.nodes.vdims))) and ds_version <= '0.6.9':
self.p.aggregator = ds.any() if isinstance(agg, ds.any) or agg == 'any' else ds.count()
return aggregate._process(self, element, key)
elif ((not interp and (isinstance(agg, (ds.any, ds.count)) or
agg in ['any', 'count']))
or not (element.vdims or element.nodes.vdims)):
wireframe = True
precompute = False # TriMesh itself caches wireframe
agg = self._get_aggregator(element) if isinstance(agg, (ds.any, ds.count)) else ds.any()
vdim = 'Count' if isinstance(agg, ds.count) else 'Any'
elif getattr(agg, 'column', None):
if agg.column in element.vdims:
vdim = element.get_dimension(agg.column)
elif isinstance(element, TriMesh) and agg.column in element.nodes.vdims:
vdim = element.nodes.get_dimension(agg.column)
else:
raise ValueError("Aggregation column %s not found on TriMesh element."
% agg.column)
else:
if isinstance(element, TriMesh) and element.nodes.vdims:
vdim = element.nodes.vdims[0]
else:
vdim = element.vdims[0]
agg = self._get_aggregator(element)
if element._plot_id in self._precomputed:
precomputed = self._precomputed[element._plot_id]
elif wireframe:
precomputed = self._precompute_wireframe(element, agg)
else:
precomputed = self._precompute(element, agg)
params = dict(get_param_values(element), kdims=[x, y],
datatype=['xarray'], vdims=[vdim])
if width == 0 or height == 0:
if width == 0: params['xdensity'] = 1
if height == 0: params['ydensity'] = 1
bounds = (x_range[0], y_range[0], x_range[1], y_range[1])
return Image((xs, ys, np.zeros((height, width))), bounds=bounds, **params)
if wireframe:
segments = precomputed['segments']
else:
simplices = precomputed['simplices']
pts = precomputed['vertices']
mesh = precomputed['mesh']
if precompute:
self._precomputed = {element._plot_id: precomputed}
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
if wireframe:
agg = cvs.line(segments, x=['x0', 'x1', 'x2', 'x0'],
y=['y0', 'y1', 'y2', 'y0'], axis=1,
agg=agg).rename({'x': x.name, 'y': y.name})
else:
interpolate = bool(self.p.interpolation)
agg = cvs.trimesh(pts, simplices, agg=agg,
interp=interpolate, mesh=mesh)
return Image(agg, **params)
class quadmesh_rasterize(trimesh_rasterize):
"""
Rasterize the QuadMesh element using the supplied aggregator.
Simply converts to a TriMesh and lets trimesh_rasterize
handle the actual rasterization.
"""
def _precompute(self, element, agg):
if ds_version <= '0.7.0':
return super(quadmesh_rasterize, self)._precompute(element.trimesh(), agg)
def _process(self, element, key=None):
if ds_version <= '0.7.0':
return super(quadmesh_rasterize, self)._process(element, key)
if element.interface.datatype != 'xarray':
element = element.clone(datatype=['xarray'])
data = element.data
x, y = element.kdims
agg_fn = self._get_aggregator(element)
info = self._get_sampling(element, x, y)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
if xtype == 'datetime':
data[x.name] = data[x.name].astype('datetime64[us]').astype('int64')
if ytype == 'datetime':
data[y.name] = data[y.name].astype('datetime64[us]').astype('int64')
# Compute bounds (converting datetimes)
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(
x_range, y_range, xs, ys, xtype, ytype
)
params = dict(get_param_values(element), datatype=['xarray'],
bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
vdim = getattr(agg_fn, 'column', element.vdims[0].name)
agg = cvs.quadmesh(data[vdim], x.name, y.name, agg_fn)
xdim, ydim = list(agg.dims)[:2][::-1]
if xtype == "datetime":
agg[xdim] = (agg[xdim]/1e3).astype('datetime64[us]')
if ytype == "datetime":
agg[ydim] = (agg[ydim]/1e3).astype('datetime64[us]')
return Image(agg, **params)
class shade(LinkableOperation):
"""
shade applies a normalization function followed by colormapping to
an Image or NdOverlay of Images, returning an RGB Element.
The data must be in the form of a 2D or 3D DataArray, but NdOverlays
of 2D Images will be automatically converted to a 3D array.
In the 2D case data is normalized and colormapped, while a 3D
array representing categorical aggregates will be supplied a color
key for each category. The colormap (cmap) for the 2D case may be
supplied as an Iterable or a Callable.
"""
alpha = param.Integer(default=255, bounds=(0, 255), doc="""
Value between 0 - 255 representing the alpha value to use for
colormapped pixels that contain data (i.e. non-NaN values).
Regardless of this value, ``NaN`` values are set to be fully
transparent when doing colormapping.""")
cmap = param.ClassSelector(class_=(Iterable, Callable, dict), doc="""
Iterable or callable which returns colors as hex colors
or web color names (as defined by datashader), to be used
for the colormap of single-layer datashader output.
Callable type must allow mapping colors between 0 and 1.
The default value of None reverts to Datashader's default
colormap.""")
color_key = param.ClassSelector(class_=(Iterable, Callable, dict), doc="""
Iterable or callable that returns colors as hex colors, to
be used for the color key of categorical datashader output.
Callable type must allow mapping colors for supplied values
between 0 and 1.""")
normalization = param.ClassSelector(default='eq_hist',
class_=(basestring, Callable),
doc="""
The normalization operation applied before colormapping.
Valid options include 'linear', 'log', 'eq_hist', 'cbrt',
and any valid transfer function that accepts data, mask, nbins
arguments.""")
clims = param.NumericTuple(default=None, length=2, doc="""
Min and max data values to use for colormap interpolation, when
wishing to override autoranging.
""")
min_alpha = param.Number(default=40, bounds=(0, 255), doc="""
The minimum alpha value to use for non-empty pixels when doing
colormapping, in [0, 255]. Use a higher value to avoid
undersaturation, i.e. poorly visible low-value datapoints, at
the expense of the overall dynamic range..""")
@classmethod
def concatenate(cls, overlay):
"""
Concatenates an NdOverlay of Image types into a single 3D
xarray Dataset.
"""
if not isinstance(overlay, NdOverlay):
raise ValueError('Only NdOverlays can be concatenated')
xarr = xr.concat([v.data.transpose() for v in overlay.values()],
pd.Index(overlay.keys(), name=overlay.kdims[0].name))
params = dict(get_param_values(overlay.last),
vdims=overlay.last.vdims,
kdims=overlay.kdims+overlay.last.kdims)
return Dataset(xarr.transpose(), datatype=['xarray'], **params)
@classmethod
def uint32_to_uint8(cls, img):
"""
Cast uint32 RGB image to 4 uint8 channels.
"""
return np.flipud(img.view(dtype=np.uint8).reshape(img.shape + (4,)))
@classmethod
def uint32_to_uint8_xr(cls, img):
"""
Cast uint32 xarray DataArray to 4 uint8 channels.
"""
new_array = img.values.view(dtype=np.uint8).reshape(img.shape + (4,))
coords = OrderedDict(list(img.coords.items())+[('band', [0, 1, 2, 3])])
return xr.DataArray(new_array, coords=coords, dims=img.dims+('band',))
@classmethod
def rgb2hex(cls, rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1]
return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb))
@classmethod
def to_xarray(cls, element):
if issubclass(element.interface, XArrayInterface):
return element
data = tuple(element.dimension_values(kd, expanded=False)
for kd in element.kdims)
data += tuple(element.dimension_values(vd, flat=False)
for vd in element.vdims)
dtypes = [dt for dt in element.datatype if dt != 'xarray']
return element.clone(data, datatype=['xarray']+dtypes,
bounds=element.bounds,
xdensity=element.xdensity,
ydensity=element.ydensity)
def _process(self, element, key=None):
element = element.map(self.to_xarray, Image)
if isinstance(element, NdOverlay):
bounds = element.last.bounds
xdensity = element.last.xdensity
ydensity = element.last.ydensity
element = self.concatenate(element)
elif isinstance(element, Overlay):
return element.map(partial(shade._process, self), [Element])
else:
xdensity = element.xdensity
ydensity = element.ydensity
bounds = element.bounds
vdim = element.vdims[0].name
array = element.data[vdim]
kdims = element.kdims
# Compute shading options depending on whether
# it is a categorical or regular aggregate
shade_opts = dict(how=self.p.normalization,
min_alpha=self.p.min_alpha,
alpha=self.p.alpha)
if element.ndims > 2:
kdims = element.kdims[1:]
categories = array.shape[-1]
if not self.p.color_key:
pass
elif isinstance(self.p.color_key, dict):
shade_opts['color_key'] = self.p.color_key
elif isinstance(self.p.color_key, Iterable):
shade_opts['color_key'] = [c for i, c in
zip(range(categories), self.p.color_key)]
else:
colors = [self.p.color_key(s) for s in np.linspace(0, 1, categories)]
shade_opts['color_key'] = map(self.rgb2hex, colors)
elif not self.p.cmap:
pass
elif isinstance(self.p.cmap, Callable):
colors = [self.p.cmap(s) for s in np.linspace(0, 1, 256)]
shade_opts['cmap'] = map(self.rgb2hex, colors)
else:
shade_opts['cmap'] = self.p.cmap
if self.p.clims:
shade_opts['span'] = self.p.clims
elif ds_version > '0.5.0' and self.p.normalization != 'eq_hist':
shade_opts['span'] = element.range(vdim)
params = dict(get_param_values(element), kdims=kdims,
bounds=bounds, vdims=RGB.vdims[:],
xdensity=xdensity, ydensity=ydensity)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered in true_divide')
if np.isnan(array.data).all():
xd, yd = kdims[:2]
arr = np.zeros(array.data.shape[:2]+(4,), dtype=np.uint8)
coords = {xd.name: element.data.coords[xd.name],
yd.name: element.data.coords[yd.name],
'band': [0, 1, 2, 3]}
img = xr.DataArray(arr, coords=coords, dims=(yd.name, xd.name, 'band'))
return RGB(img, **params)
else:
img = tf.shade(array, **shade_opts)
return RGB(self.uint32_to_uint8_xr(img), **params)
class geometry_rasterize(AggregationOperation):
"""
Rasterizes geometries by converting them to spatialpandas.
"""
aggregator = param.ClassSelector(default=ds.mean(),
class_=(ds.reductions.Reduction, basestring))
def _get_aggregator(self, element, add_field=True):
agg = self.p.aggregator
if (not (element.vdims or isinstance(agg, basestring)) and
agg.column is None and not isinstance(agg, (rd.count, rd.any))):
return ds.count()
return super(geometry_rasterize, self)._get_aggregator(element, add_field)
def _process(self, element, key=None):
agg_fn = self._get_aggregator(element)
xdim, ydim = element.kdims
info = self._get_sampling(element, xdim, ydim)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
x0, x1 = x_range
y0, y1 = y_range
params = self._get_agg_params(element, xdim, ydim, agg_fn, (x0, y0, x1, y1))
if width == 0 or height == 0:
return self._empty_agg(element, xdim, ydim, width, height, xs, ys, agg_fn, **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
if element._plot_id in self._precomputed:
data, col = self._precomputed[element._plot_id]
else:
if element.interface.datatype != 'spatialpandas':
element = element.clone(datatype=['spatialpandas'])
data = element.data
col = element.interface.geo_column(data)
if self.p.precompute:
self._precomputed[element._plot_id] = (data, col)
if isinstance(agg_fn, ds.count_cat):
data[agg_fn.column] = data[agg_fn.column].astype('category')
if isinstance(element, Polygons):
agg = cvs.polygons(data, geometry=col, agg=agg_fn)
elif isinstance(element, Path):
agg = cvs.line(data, geometry=col, agg=agg_fn)
elif isinstance(element, Points):
agg = cvs.points(data, geometry=col, agg=agg_fn)
agg = agg.rename({'x': xdim.name, 'y': ydim.name})
if agg.ndim == 2:
return self.p.element_type(agg, **params)
else:
layers = {}
for c in agg.coords[agg_fn.column].data:
cagg = agg.sel(**{agg_fn.column: c})
layers[c] = self.p.element_type(cagg, **params)
return NdOverlay(layers, kdims=[element.get_dimension(agg_fn.column)])
class rasterize(AggregationOperation):
"""
Rasterize is a high-level operation that will rasterize any
Element or combination of Elements, aggregating them with the supplied
aggregator and interpolation method.
The default aggregation method depends on the type of Element but
usually defaults to the count of samples in each bin. Other
aggregators can be supplied implementing mean, max, min and other
reduction operations.
The bins of the aggregate are defined by the width and height and
the x_range and y_range. If x_sampling or y_sampling are supplied
the operation will ensure that a bin is no smaller than the minimum
sampling distance by reducing the width and height when zoomed in
beyond the minimum sampling distance.
By default, the PlotSize and RangeXY streams are applied when this
operation is used dynamically, which means that the width, height,
x_range and y_range will automatically be set to match the inner
dimensions of the linked plot and the ranges of the axes.
"""
aggregator = param.ClassSelector(class_=(ds.reductions.Reduction, basestring),
default='default')
interpolation = param.ObjectSelector(
default='default', objects=['default', 'linear', 'nearest', 'bilinear', None, False], doc="""
The interpolation method to apply during rasterization.
Default depends on element type""")
_transforms = [(Image, regrid),
(Polygons, geometry_rasterize),
(lambda x: (isinstance(x, Path) and
x.interface.datatype == 'spatialpandas'),
geometry_rasterize),
(TriMesh, trimesh_rasterize),
(QuadMesh, quadmesh_rasterize),
(lambda x: (isinstance(x, NdOverlay) and
issubclass(x.type, (Scatter, Points, Curve, Path))),
aggregate),
(Spikes, spikes_aggregate),
(Area, area_aggregate),
(Spread, spread_aggregate),
(Segments, segments_aggregate),
(Contours, contours_rasterize),
(Graph, aggregate),
(Scatter, aggregate),
(Points, aggregate),
(Curve, aggregate),
(Path, aggregate),
(type(None), shade) # To handle parameters of datashade
]
def _process(self, element, key=None):
# Potentially needs traverse to find element types first?
all_allowed_kws = set()
all_supplied_kws = set()
for predicate, transform in self._transforms:
merged_param_values = dict(self.param.get_param_values(), **self.p)
# If aggregator or interpolation are 'default', pop parameter so
# datashader can choose the default aggregator itself
for k in ['aggregator', 'interpolation']:
if merged_param_values.get(k, None) == 'default':
merged_param_values.pop(k)
op_params = dict({k: v for k, v in merged_param_values.items()
if not (v is None and k == 'aggregator')},
dynamic=False)
extended_kws = dict(op_params, **self.p.extra_keywords())
all_supplied_kws |= set(extended_kws)
all_allowed_kws |= set(transform.param)
# Collect union set of consumed. Versus union of available.
op = transform.instance(**{k:v for k,v in extended_kws.items()
if k in transform.param})
op._precomputed = self._precomputed
element = element.map(op, predicate)
self._precomputed = op._precomputed
unused_params = list(all_supplied_kws - all_allowed_kws)
if unused_params:
self.param.warning('Parameter(s) [%s] not consumed by any element rasterizer.'
% ', '.join(unused_params))
return element
class datashade(rasterize, shade):
"""
Applies the aggregate and shade operations, aggregating all
elements in the supplied object and then applying normalization
and colormapping the aggregated data returning RGB elements.
See aggregate and shade operations for more details.
"""
def _process(self, element, key=None):
agg = rasterize._process(self, element, key)
shaded = shade._process(self, agg, key)
return shaded
class stack(Operation):
"""
The stack operation allows compositing multiple RGB Elements using
the defined compositing operator.
"""
compositor = param.ObjectSelector(objects=['add', 'over', 'saturate', 'source'],
default='over', doc="""
Defines how the compositing operation combines the images""")
def uint8_to_uint32(self, element):
img = np.dstack([element.dimension_values(d, flat=False)
for d in element.vdims])
if img.shape[2] == 3: # alpha channel not included
alpha = np.ones(img.shape[:2])
if img.dtype.name == 'uint8':
alpha = (alpha*255).astype('uint8')
img = np.dstack([img, alpha])
if img.dtype.name != 'uint8':
img = (img*255).astype(np.uint8)
N, M, _ = img.shape
return img.view(dtype=np.uint32).reshape((N, M))
def _process(self, overlay, key=None):
if not isinstance(overlay, CompositeOverlay):
return overlay
elif len(overlay) == 1:
return overlay.last if isinstance(overlay, NdOverlay) else overlay.get(0)
imgs = []
for rgb in overlay:
if not isinstance(rgb, RGB):
raise TypeError("The stack operation expects elements of type RGB, "
"not '%s'." % type(rgb).__name__)
rgb = rgb.rgb
dims = [kd.name for kd in rgb.kdims][::-1]
coords = {kd.name: rgb.dimension_values(kd, False)
for kd in rgb.kdims}
imgs.append(tf.Image(self.uint8_to_uint32(rgb), coords=coords, dims=dims))
try:
imgs = xr.align(*imgs, join='exact')
except ValueError:
raise ValueError('RGB inputs to the stack operation could not be aligned; '
'ensure they share the same grid sampling.')
stacked = tf.stack(*imgs, how=self.p.compositor)
arr = shade.uint32_to_uint8(stacked.data)[::-1]
data = (coords[dims[1]], coords[dims[0]], arr[:, :, 0],
arr[:, :, 1], arr[:, :, 2])
if arr.shape[-1] == 4:
data = data + (arr[:, :, 3],)
return rgb.clone(data, datatype=[rgb.interface.datatype]+rgb.datatype)
class SpreadingOperation(LinkableOperation):
"""
Spreading expands each pixel in an Image based Element a certain
number of pixels on all sides according to a given shape, merging
pixels using a specified compositing operator. This can be useful
to make sparse plots more visible.
"""
how = param.ObjectSelector(default='source' if ds_version <= '0.11.1' else None,
objects=[None, 'source', 'over', 'saturate', 'add', 'max', 'min'], doc="""
The name of the compositing operator to use when combining
pixels. Default of None uses 'over' operator for RGB elements
and 'add' operator for aggregate arrays.""")
shape = param.ObjectSelector(default='circle', objects=['circle', 'square'],
doc="""
The shape to spread by. Options are 'circle' [default] or 'square'.""")
_per_element = True
@classmethod
def uint8_to_uint32(cls, img):
shape = img.shape
flat_shape = np.multiply.reduce(shape[:2])
if shape[-1] == 3:
img = np.dstack([img, np.ones(shape[:2], dtype='uint8')*255])
rgb = img.reshape((flat_shape, 4)).view('uint32').reshape(shape[:2])
return rgb
def _apply_spreading(self, array):
"""Apply the spread function using the indicated parameters."""
raise NotImplementedError
def _preprocess_rgb(self, element):
rgbarray = np.dstack([element.dimension_values(vd, flat=False)
for vd in element.vdims])
if rgbarray.dtype.kind == 'f':
rgbarray = rgbarray * 255
return tf.Image(self.uint8_to_uint32(rgbarray.astype('uint8')))
def _process(self, element, key=None):
if isinstance(element, RGB):
rgb = element.rgb
data = self._preprocess_rgb(rgb)
elif isinstance(element, Image):
data = element.clone(datatype=['xarray']).data[element.vdims[0].name]
else:
raise ValueError('spreading can only be applied to Image or RGB Elements.')
kwargs = {}
array = self._apply_spreading(data)
if isinstance(element, RGB):
img = datashade.uint32_to_uint8(array.data)[::-1]
new_data = {
kd.name: rgb.dimension_values(kd, expanded=False)
for kd in rgb.kdims
}
vdims = rgb.vdims+[rgb.alpha_dimension] if len(rgb.vdims) == 3 else rgb.vdims
kwargs['vdims'] = vdims
new_data[tuple(vd.name for vd in vdims)] = img
else:
new_data = array
return element.clone(new_data, **kwargs)
class spread(SpreadingOperation):
"""
Spreading expands each pixel in an Image based Element a certain
number of pixels on all sides according to a given shape, merging
pixels using a specified compositing operator. This can be useful
to make sparse plots more visible.
See the datashader documentation for more detail:
http://datashader.org/api.html#datashader.transfer_functions.spread
"""
px = param.Integer(default=1, doc="""
Number of pixels to spread on all sides.""")
def _apply_spreading(self, array):
return tf.spread(array, px=self.p.px, how=self.p.how, shape=self.p.shape)
class dynspread(SpreadingOperation):
"""
Spreading expands each pixel in an Image based Element a certain
number of pixels on all sides according to a given shape, merging
pixels using a specified compositing operator. This can be useful
to make sparse plots more visible. Dynamic spreading determines
how many pixels to spread based on a density heuristic.
See the datashader documentation for more detail:
http://datashader.org/api.html#datashader.transfer_functions.dynspread
"""
max_px = param.Integer(default=3, doc="""
Maximum number of pixels to spread on all sides.""")
threshold = param.Number(default=0.5, bounds=(0,1), doc="""
When spreading, determines how far to spread.
Spreading starts at 1 pixel, and stops when the fraction
of adjacent non-empty pixels reaches this threshold.
Higher values give more spreading, up to the max_px
allowed.""")
def _apply_spreading(self, array):
return tf.dynspread(
array, max_px=self.p.max_px, threshold=self.p.threshold,
how=self.p.how, shape=self.p.shape
)
def split_dataframe(path_df):
"""
Splits a dataframe of paths separated by NaNs into individual
dataframes.
"""
splits = np.where(path_df.iloc[:, 0].isnull())[0]+1
return [df for df in np.split(path_df, splits) if len(df) > 1]
class _connect_edges(Operation):
split = param.Boolean(default=False, doc="""
Determines whether bundled edges will be split into individual edges
or concatenated with NaN separators.""")
def _bundle(self, position_df, edges_df):
raise NotImplementedError('_connect_edges is an abstract baseclass '
'and does not implement any actual bundling.')
def _process(self, element, key=None):
index = element.nodes.kdims[2].name
rename_edges = {d.name: v for d, v in zip(element.kdims[:2], ['source', 'target'])}
rename_nodes = {d.name: v for d, v in zip(element.nodes.kdims[:2], ['x', 'y'])}
position_df = element.nodes.redim(**rename_nodes).dframe([0, 1, 2]).set_index(index)
edges_df = element.redim(**rename_edges).dframe([0, 1])
paths = self._bundle(position_df, edges_df)
paths = paths.rename(columns={v: k for k, v in rename_nodes.items()})
paths = split_dataframe(paths) if self.p.split else [paths]
return element.clone((element.data, element.nodes, paths))
class bundle_graph(_connect_edges, hammer_bundle):
"""
Iteratively group edges and return as paths suitable for datashading.
Breaks each edge into a path with multiple line segments, and
iteratively curves this path to bundle edges into groups.
"""
def _bundle(self, position_df, edges_df):
from datashader.bundling import hammer_bundle
return hammer_bundle.__call__(self, position_df, edges_df, **self.p)
class directly_connect_edges(_connect_edges, connect_edges):
"""
Given a Graph object will directly connect all nodes.
"""
def _bundle(self, position_df, edges_df):
return connect_edges.__call__(self, position_df, edges_df)
| 1 | 23,971 | Why does `clone` not already copy `xdensity` and `ydensity` from what it is cloning? | holoviz-holoviews | py |
@@ -52,6 +52,11 @@ namespace OpenTelemetry.Metrics
AggregationTemporality temporality = AggregationTemporality.Cumulative;
+ if (meterSources.Count() == 0)
+ {
+ throw new ArgumentException("No meter was added to the sdk.");
+ }
+
foreach (var reader in readers)
{
if (reader == null) | 1 | // <copyright file="MeterProviderSdk.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.Metrics;
using System.Linq;
using System.Text.RegularExpressions;
using OpenTelemetry.Internal;
using OpenTelemetry.Resources;
namespace OpenTelemetry.Metrics
{
internal sealed class MeterProviderSdk : MeterProvider
{
internal const int MaxMetrics = 1000;
internal int ShutdownCount;
private readonly Metric[] metrics;
private readonly List<object> instrumentations = new List<object>();
private readonly List<Func<Instrument, MetricStreamConfiguration>> viewConfigs;
private readonly object collectLock = new object();
private readonly object instrumentCreationLock = new object();
private readonly Dictionary<string, bool> metricStreamNames = new Dictionary<string, bool>(StringComparer.OrdinalIgnoreCase);
private readonly MeterListener listener;
private readonly MetricReader reader;
private int metricIndex = -1;
internal MeterProviderSdk(
Resource resource,
IEnumerable<string> meterSources,
List<MeterProviderBuilderBase.InstrumentationFactory> instrumentationFactories,
List<Func<Instrument, MetricStreamConfiguration>> viewConfigs,
IEnumerable<MetricReader> readers)
{
this.Resource = resource;
this.viewConfigs = viewConfigs;
this.metrics = new Metric[MaxMetrics];
AggregationTemporality temporality = AggregationTemporality.Cumulative;
foreach (var reader in readers)
{
if (reader == null)
{
throw new ArgumentException("A null value was found.", nameof(readers));
}
reader.SetParentProvider(this);
// TODO: Actually support multiple readers.
// Currently the last reader's temporality wins.
temporality = reader.PreferredAggregationTemporality;
if (this.reader == null)
{
this.reader = reader;
}
else if (this.reader is CompositeMetricReader compositeReader)
{
compositeReader.AddReader(reader);
}
else
{
this.reader = new CompositeMetricReader(new[] { this.reader, reader });
}
}
if (instrumentationFactories.Any())
{
foreach (var instrumentationFactory in instrumentationFactories)
{
this.instrumentations.Add(instrumentationFactory.Factory());
}
}
// Setup Listener
Func<Instrument, bool> shouldListenTo = instrument => false;
if (meterSources.Any(s => s.Contains('*')))
{
var regex = GetWildcardRegex(meterSources);
shouldListenTo = instrument => regex.IsMatch(instrument.Meter.Name);
}
else if (meterSources.Any())
{
var meterSourcesToSubscribe = new HashSet<string>(StringComparer.OrdinalIgnoreCase);
foreach (var meterSource in meterSources)
{
meterSourcesToSubscribe.Add(meterSource);
}
shouldListenTo = instrument => meterSourcesToSubscribe.Contains(instrument.Meter.Name);
}
this.listener = new MeterListener();
var viewConfigCount = this.viewConfigs.Count;
if (viewConfigCount > 0)
{
this.listener.InstrumentPublished = (instrument, listener) =>
{
if (!shouldListenTo(instrument))
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(instrument.Name, instrument.Meter.Name, "Instrument belongs to a Meter not subscribed by the provider.", "Use AddMeter to add the Meter to the provider.");
return;
}
// Creating list with initial capacity as the maximum
// possible size, to avoid any array resize/copy internally.
// There may be excess space wasted, but it'll eligible for
// GC right after this method.
var metricStreamConfigs = new List<MetricStreamConfiguration>(viewConfigCount);
foreach (var viewConfig in this.viewConfigs)
{
var metricStreamConfig = viewConfig(instrument);
if (metricStreamConfig != null)
{
metricStreamConfigs.Add(metricStreamConfig);
}
}
if (metricStreamConfigs.Count == 0)
{
// No views matched. Add null
// which will apply defaults.
// Users can turn off this default
// by adding a view like below as the last view.
// .AddView(instrumentName: "*", new MetricStreamConfiguration() { Aggregation = Aggregation.Drop })
metricStreamConfigs.Add(null);
}
var maxCountMetricsToBeCreated = metricStreamConfigs.Count;
// Create list with initial capacity as the max metric count.
// Due to duplicate/max limit, we may not end up using them
// all, and that memory is wasted until Meter disposed.
// TODO: Revisit to see if we need to do metrics.TrimExcess()
var metrics = new List<Metric>(maxCountMetricsToBeCreated);
lock (this.instrumentCreationLock)
{
for (int i = 0; i < maxCountMetricsToBeCreated; i++)
{
var metricStreamConfig = metricStreamConfigs[i];
var metricStreamName = metricStreamConfig?.Name ?? instrument.Name;
if (this.metricStreamNames.ContainsKey(metricStreamName))
{
// TODO: Log that instrument is ignored
// as the resulting Metric name is conflicting
// with existing name.
continue;
}
if (metricStreamConfig?.Aggregation == Aggregation.Drop)
{
// TODO: Log that instrument is ignored
// as user explicitly asked to drop it
// with View.
continue;
}
var index = ++this.metricIndex;
if (index >= MaxMetrics)
{
// TODO: Log that instrument is ignored
// as max number of Metrics have reached.
}
else
{
Metric metric;
var metricDescription = metricStreamConfig?.Description ?? instrument.Description;
string[] tagKeysInteresting = metricStreamConfig?.TagKeys;
double[] histogramBucketBounds = (metricStreamConfig is HistogramConfiguration histogramConfig
&& histogramConfig.BucketBounds != null) ? histogramConfig.BucketBounds : null;
metric = new Metric(instrument, temporality, metricStreamName, metricDescription, histogramBucketBounds, tagKeysInteresting);
this.metrics[index] = metric;
metrics.Add(metric);
this.metricStreamNames.Add(metricStreamName, true);
}
}
if (metrics.Count > 0)
{
listener.EnableMeasurementEvents(instrument, metrics);
}
}
};
// Everything double
this.listener.SetMeasurementEventCallback<double>(this.MeasurementRecordedDouble);
this.listener.SetMeasurementEventCallback<float>((instrument, value, tags, state) => this.MeasurementRecordedDouble(instrument, value, tags, state));
// Everything long
this.listener.SetMeasurementEventCallback<long>(this.MeasurementRecordedLong);
this.listener.SetMeasurementEventCallback<int>((instrument, value, tags, state) => this.MeasurementRecordedLong(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<short>((instrument, value, tags, state) => this.MeasurementRecordedLong(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<byte>((instrument, value, tags, state) => this.MeasurementRecordedLong(instrument, value, tags, state));
}
else
{
this.listener.InstrumentPublished = (instrument, listener) =>
{
if (!shouldListenTo(instrument))
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(instrument.Name, instrument.Meter.Name, "Instrument belongs to a Meter not subscribed by the provider.", "Use AddMeter to add the Meter to the provider.");
return;
}
try
{
var metricName = instrument.Name;
Metric metric = null;
lock (this.instrumentCreationLock)
{
if (this.metricStreamNames.ContainsKey(metricName))
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(metricName, instrument.Meter.Name, "Metric name conflicting with existing name.", "Either change the name of the instrument or change name using View.");
return;
}
var index = ++this.metricIndex;
if (index >= MaxMetrics)
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(metricName, instrument.Meter.Name, "Maximum allowed Metrics for the provider exceeded.", "Use views to drop unused instruments. Or configure Provider to allow higher limit.");
return;
}
else
{
metric = new Metric(instrument, temporality, metricName, instrument.Description);
this.metrics[index] = metric;
this.metricStreamNames.Add(metricName, true);
}
}
listener.EnableMeasurementEvents(instrument, metric);
}
catch (Exception)
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(instrument.Name, instrument.Meter.Name, "SDK internal error occurred.", "Contact SDK owners.");
}
};
// Everything double
this.listener.SetMeasurementEventCallback<double>(this.MeasurementRecordedDoubleSingleStream);
this.listener.SetMeasurementEventCallback<float>((instrument, value, tags, state) => this.MeasurementRecordedDoubleSingleStream(instrument, value, tags, state));
// Everything long
this.listener.SetMeasurementEventCallback<long>(this.MeasurementRecordedLongSingleStream);
this.listener.SetMeasurementEventCallback<int>((instrument, value, tags, state) => this.MeasurementRecordedLongSingleStream(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<short>((instrument, value, tags, state) => this.MeasurementRecordedLongSingleStream(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<byte>((instrument, value, tags, state) => this.MeasurementRecordedLongSingleStream(instrument, value, tags, state));
}
this.listener.MeasurementsCompleted = (instrument, state) => this.MeasurementsCompleted(instrument, state);
this.listener.Start();
static Regex GetWildcardRegex(IEnumerable<string> collection)
{
var pattern = '^' + string.Join("|", from name in collection select "(?:" + Regex.Escape(name).Replace("\\*", ".*") + ')') + '$';
return new Regex(pattern, RegexOptions.Compiled | RegexOptions.IgnoreCase);
}
}
internal Resource Resource { get; }
internal List<object> Instrumentations => this.instrumentations;
internal MetricReader Reader => this.reader;
internal void MeasurementsCompleted(Instrument instrument, object state)
{
Console.WriteLine($"Instrument {instrument.Meter.Name}:{instrument.Name} completed.");
}
internal void MeasurementRecordedDouble(Instrument instrument, double value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metrics = state as List<Metric>;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metrics == null)
{
// TODO: log
return;
}
if (metrics.Count == 1)
{
// special casing the common path
// as this is faster than the
// foreach, when count is 1.
metrics[0].UpdateDouble(value, tagsRos);
}
else
{
foreach (var metric in metrics)
{
metric.UpdateDouble(value, tagsRos);
}
}
}
internal void MeasurementRecordedLong(Instrument instrument, long value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metrics = state as List<Metric>;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metrics == null)
{
// TODO: log
return;
}
if (metrics.Count == 1)
{
// special casing the common path
// as this is faster than the
// foreach, when count is 1.
metrics[0].UpdateLong(value, tagsRos);
}
else
{
foreach (var metric in metrics)
{
metric.UpdateLong(value, tagsRos);
}
}
}
internal void MeasurementRecordedLongSingleStream(Instrument instrument, long value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metric = state as Metric;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metric == null)
{
// TODO: log
return;
}
metric.UpdateLong(value, tagsRos);
}
internal void MeasurementRecordedDoubleSingleStream(Instrument instrument, double value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metric = state as Metric;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metric == null)
{
// TODO: log
return;
}
metric.UpdateDouble(value, tagsRos);
}
internal Batch<Metric> Collect()
{
lock (this.collectLock)
{
try
{
// Record all observable instruments
try
{
this.listener.RecordObservableInstruments();
}
catch (Exception exception)
{
// TODO:
// It doesn't looks like we can find which instrument callback
// threw.
OpenTelemetrySdkEventSource.Log.MetricObserverCallbackException(exception);
}
var indexSnapShot = Math.Min(this.metricIndex, MaxMetrics - 1);
var target = indexSnapShot + 1;
for (int i = 0; i < target; i++)
{
this.metrics[i].SnapShot();
}
return (target > 0) ? new Batch<Metric>(this.metrics, target) : default;
}
catch (Exception)
{
// TODO: Log
return default;
}
}
}
/// <summary>
/// Called by <c>ForceFlush</c>. This function should block the current
/// thread until flush completed or timed out.
/// </summary>
/// <param name="timeoutMilliseconds">
/// The number (non-negative) of milliseconds to wait, or
/// <c>Timeout.Infinite</c> to wait indefinitely.
/// </param>
/// <returns>
/// Returns <c>true</c> when flush succeeded; otherwise, <c>false</c>.
/// </returns>
/// <remarks>
/// This function is called synchronously on the thread which made the
/// first call to <c>ForceFlush</c>. This function should not throw
/// exceptions.
/// </remarks>
internal bool OnForceFlush(int timeoutMilliseconds)
{
return this.reader?.Collect(timeoutMilliseconds) ?? true;
}
/// <summary>
/// Called by <c>Shutdown</c>. This function should block the current
/// thread until shutdown completed or timed out.
/// </summary>
/// <param name="timeoutMilliseconds">
/// The number (non-negative) of milliseconds to wait, or
/// <c>Timeout.Infinite</c> to wait indefinitely.
/// </param>
/// <returns>
/// Returns <c>true</c> when shutdown succeeded; otherwise, <c>false</c>.
/// </returns>
/// <remarks>
/// This function is called synchronously on the thread which made the
/// first call to <c>Shutdown</c>. This function should not throw
/// exceptions.
/// </remarks>
internal bool OnShutdown(int timeoutMilliseconds)
{
return this.reader?.Shutdown(timeoutMilliseconds) ?? true;
}
protected override void Dispose(bool disposing)
{
if (this.instrumentations != null)
{
foreach (var item in this.instrumentations)
{
(item as IDisposable)?.Dispose();
}
this.instrumentations.Clear();
}
// Wait for up to 5 seconds grace period
this.reader?.Shutdown(5000);
this.reader?.Dispose();
this.listener.Dispose();
}
}
}
| 1 | 22,038 | Curious - do we do the same for traces (when no ActivitySource / legacy source are added)? (and why we want to do it for metrics?) | open-telemetry-opentelemetry-dotnet | .cs |
@@ -801,11 +801,11 @@ SdMmcHcStartSdClock (
Refer to SD Host Controller Simplified spec 3.0 Section 3.2.1 for details.
- @param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
- @param[in] Slot The slot number of the SD card to send the command to.
- @param[in] BusTiming BusTiming at which the frequency change is done.
- @param[in] FirstTimeSetup Flag to indicate whether the clock is being setup for the first time.
- @param[in] ClockFreq The max clock frequency to be set. The unit is KHz.
+ @param[in] PciIo The PCI IO protocol instance.
+ @param[in] Slot The slot number of the SD card to send the command to.
+ @param[in] ClockFreq The max clock frequency to be set. The unit is KHz.
+ @param[in] BaseClkFreq The base clock frequency of host controller in MHz.
+ @param[in] ControllerVer The version of host controller.
@retval EFI_SUCCESS The clock is supplied successfully.
@retval Others The clock isn't supplied successfully. | 1 | /** @file
This driver is used to manage SD/MMC PCI host controllers which are compliance
with SD Host Controller Simplified Specification version 3.00 plus the 64-bit
System Addressing support in SD Host Controller Simplified Specification version
4.20.
It would expose EFI_SD_MMC_PASS_THRU_PROTOCOL for upper layer use.
Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
Copyright (c) 2015 - 2020, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#include "SdMmcPciHcDxe.h"
/**
Dump the content of SD/MMC host controller's Capability Register.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] Capability The buffer to store the capability data.
**/
VOID
DumpCapabilityReg (
IN UINT8 Slot,
IN SD_MMC_HC_SLOT_CAP *Capability
)
{
//
// Dump Capability Data
//
DEBUG ((DEBUG_INFO, " == Slot [%d] Capability is 0x%x ==\n", Slot, Capability));
DEBUG ((DEBUG_INFO, " Timeout Clk Freq %d%a\n", Capability->TimeoutFreq, (Capability->TimeoutUnit) ? "MHz" : "KHz"));
DEBUG ((DEBUG_INFO, " Base Clk Freq %dMHz\n", Capability->BaseClkFreq));
DEBUG ((DEBUG_INFO, " Max Blk Len %dbytes\n", 512 * (1 << Capability->MaxBlkLen)));
DEBUG ((DEBUG_INFO, " 8-bit Support %a\n", Capability->BusWidth8 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " ADMA2 Support %a\n", Capability->Adma2 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " HighSpeed Support %a\n", Capability->HighSpeed ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " SDMA Support %a\n", Capability->Sdma ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Suspend/Resume %a\n", Capability->SuspRes ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Voltage 3.3 %a\n", Capability->Voltage33 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Voltage 3.0 %a\n", Capability->Voltage30 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Voltage 1.8 %a\n", Capability->Voltage18 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " V4 64-bit Sys Bus %a\n", Capability->SysBus64V4 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " V3 64-bit Sys Bus %a\n", Capability->SysBus64V3 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Async Interrupt %a\n", Capability->AsyncInt ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " SlotType "));
if (Capability->SlotType == 0x00) {
DEBUG ((DEBUG_INFO, "%a\n", "Removable Slot"));
} else if (Capability->SlotType == 0x01) {
DEBUG ((DEBUG_INFO, "%a\n", "Embedded Slot"));
} else if (Capability->SlotType == 0x02) {
DEBUG ((DEBUG_INFO, "%a\n", "Shared Bus Slot"));
} else {
DEBUG ((DEBUG_INFO, "%a\n", "Reserved"));
}
DEBUG ((DEBUG_INFO, " SDR50 Support %a\n", Capability->Sdr50 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " SDR104 Support %a\n", Capability->Sdr104 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " DDR50 Support %a\n", Capability->Ddr50 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Driver Type A %a\n", Capability->DriverTypeA ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Driver Type C %a\n", Capability->DriverTypeC ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Driver Type D %a\n", Capability->DriverTypeD ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Driver Type 4 %a\n", Capability->DriverType4 ? "TRUE" : "FALSE"));
if (Capability->TimerCount == 0) {
DEBUG ((DEBUG_INFO, " Retuning TimerCnt Disabled\n", 2 * (Capability->TimerCount - 1)));
} else {
DEBUG ((DEBUG_INFO, " Retuning TimerCnt %dseconds\n", 2 * (Capability->TimerCount - 1)));
}
DEBUG ((DEBUG_INFO, " SDR50 Tuning %a\n", Capability->TuningSDR50 ? "TRUE" : "FALSE"));
DEBUG ((DEBUG_INFO, " Retuning Mode Mode %d\n", Capability->RetuningMod + 1));
DEBUG ((DEBUG_INFO, " Clock Multiplier M = %d\n", Capability->ClkMultiplier + 1));
DEBUG ((DEBUG_INFO, " HS 400 %a\n", Capability->Hs400 ? "TRUE" : "FALSE"));
return;
}
/**
Read SlotInfo register from SD/MMC host controller pci config space.
@param[in] PciIo The PCI IO protocol instance.
@param[out] FirstBar The buffer to store the first BAR value.
@param[out] SlotNum The buffer to store the supported slot number.
@retval EFI_SUCCESS The operation succeeds.
@retval Others The operation fails.
**/
EFI_STATUS
EFIAPI
SdMmcHcGetSlotInfo (
IN EFI_PCI_IO_PROTOCOL *PciIo,
OUT UINT8 *FirstBar,
OUT UINT8 *SlotNum
)
{
EFI_STATUS Status;
SD_MMC_HC_SLOT_INFO SlotInfo;
Status = PciIo->Pci.Read (
PciIo,
EfiPciIoWidthUint8,
SD_MMC_HC_SLOT_OFFSET,
sizeof (SlotInfo),
&SlotInfo
);
if (EFI_ERROR (Status)) {
return Status;
}
*FirstBar = SlotInfo.FirstBar;
*SlotNum = SlotInfo.SlotNum + 1;
ASSERT ((*FirstBar + *SlotNum) < SD_MMC_HC_MAX_SLOT);
return EFI_SUCCESS;
}
/**
Read/Write specified SD/MMC host controller mmio register.
@param[in] PciIo The PCI IO protocol instance.
@param[in] BarIndex The BAR index of the standard PCI Configuration
header to use as the base address for the memory
operation to perform.
@param[in] Offset The offset within the selected BAR to start the
memory operation.
@param[in] Read A boolean to indicate it's read or write operation.
@param[in] Count The width of the mmio register in bytes.
Must be 1, 2 , 4 or 8 bytes.
@param[in, out] Data For read operations, the destination buffer to store
the results. For write operations, the source buffer
to write data from. The caller is responsible for
having ownership of the data buffer and ensuring its
size not less than Count bytes.
@retval EFI_INVALID_PARAMETER The PciIo or Data is NULL or the Count is not valid.
@retval EFI_SUCCESS The read/write operation succeeds.
@retval Others The read/write operation fails.
**/
EFI_STATUS
EFIAPI
SdMmcHcRwMmio (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 BarIndex,
IN UINT32 Offset,
IN BOOLEAN Read,
IN UINT8 Count,
IN OUT VOID *Data
)
{
EFI_STATUS Status;
EFI_PCI_IO_PROTOCOL_WIDTH Width;
if ((PciIo == NULL) || (Data == NULL)) {
return EFI_INVALID_PARAMETER;
}
switch (Count) {
case 1:
Width = EfiPciIoWidthUint8;
break;
case 2:
Width = EfiPciIoWidthUint16;
Count = 1;
break;
case 4:
Width = EfiPciIoWidthUint32;
Count = 1;
break;
case 8:
Width = EfiPciIoWidthUint32;
Count = 2;
break;
default:
return EFI_INVALID_PARAMETER;
}
if (Read) {
Status = PciIo->Mem.Read (
PciIo,
Width,
BarIndex,
(UINT64)Offset,
Count,
Data
);
} else {
Status = PciIo->Mem.Write (
PciIo,
Width,
BarIndex,
(UINT64)Offset,
Count,
Data
);
}
return Status;
}
/**
Do OR operation with the value of the specified SD/MMC host controller mmio register.
@param[in] PciIo The PCI IO protocol instance.
@param[in] BarIndex The BAR index of the standard PCI Configuration
header to use as the base address for the memory
operation to perform.
@param[in] Offset The offset within the selected BAR to start the
memory operation.
@param[in] Count The width of the mmio register in bytes.
Must be 1, 2 , 4 or 8 bytes.
@param[in] OrData The pointer to the data used to do OR operation.
The caller is responsible for having ownership of
the data buffer and ensuring its size not less than
Count bytes.
@retval EFI_INVALID_PARAMETER The PciIo or OrData is NULL or the Count is not valid.
@retval EFI_SUCCESS The OR operation succeeds.
@retval Others The OR operation fails.
**/
EFI_STATUS
EFIAPI
SdMmcHcOrMmio (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 BarIndex,
IN UINT32 Offset,
IN UINT8 Count,
IN VOID *OrData
)
{
EFI_STATUS Status;
UINT64 Data;
UINT64 Or;
Status = SdMmcHcRwMmio (PciIo, BarIndex, Offset, TRUE, Count, &Data);
if (EFI_ERROR (Status)) {
return Status;
}
if (Count == 1) {
Or = *(UINT8 *)OrData;
} else if (Count == 2) {
Or = *(UINT16 *)OrData;
} else if (Count == 4) {
Or = *(UINT32 *)OrData;
} else if (Count == 8) {
Or = *(UINT64 *)OrData;
} else {
return EFI_INVALID_PARAMETER;
}
Data |= Or;
Status = SdMmcHcRwMmio (PciIo, BarIndex, Offset, FALSE, Count, &Data);
return Status;
}
/**
Do AND operation with the value of the specified SD/MMC host controller mmio register.
@param[in] PciIo The PCI IO protocol instance.
@param[in] BarIndex The BAR index of the standard PCI Configuration
header to use as the base address for the memory
operation to perform.
@param[in] Offset The offset within the selected BAR to start the
memory operation.
@param[in] Count The width of the mmio register in bytes.
Must be 1, 2 , 4 or 8 bytes.
@param[in] AndData The pointer to the data used to do AND operation.
The caller is responsible for having ownership of
the data buffer and ensuring its size not less than
Count bytes.
@retval EFI_INVALID_PARAMETER The PciIo or AndData is NULL or the Count is not valid.
@retval EFI_SUCCESS The AND operation succeeds.
@retval Others The AND operation fails.
**/
EFI_STATUS
EFIAPI
SdMmcHcAndMmio (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 BarIndex,
IN UINT32 Offset,
IN UINT8 Count,
IN VOID *AndData
)
{
EFI_STATUS Status;
UINT64 Data;
UINT64 And;
Status = SdMmcHcRwMmio (PciIo, BarIndex, Offset, TRUE, Count, &Data);
if (EFI_ERROR (Status)) {
return Status;
}
if (Count == 1) {
And = *(UINT8 *)AndData;
} else if (Count == 2) {
And = *(UINT16 *)AndData;
} else if (Count == 4) {
And = *(UINT32 *)AndData;
} else if (Count == 8) {
And = *(UINT64 *)AndData;
} else {
return EFI_INVALID_PARAMETER;
}
Data &= And;
Status = SdMmcHcRwMmio (PciIo, BarIndex, Offset, FALSE, Count, &Data);
return Status;
}
/**
Wait for the value of the specified MMIO register set to the test value.
@param[in] PciIo The PCI IO protocol instance.
@param[in] BarIndex The BAR index of the standard PCI Configuration
header to use as the base address for the memory
operation to perform.
@param[in] Offset The offset within the selected BAR to start the
memory operation.
@param[in] Count The width of the mmio register in bytes.
Must be 1, 2, 4 or 8 bytes.
@param[in] MaskValue The mask value of memory.
@param[in] TestValue The test value of memory.
@retval EFI_NOT_READY The MMIO register hasn't set to the expected value.
@retval EFI_SUCCESS The MMIO register has expected value.
@retval Others The MMIO operation fails.
**/
EFI_STATUS
EFIAPI
SdMmcHcCheckMmioSet (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 BarIndex,
IN UINT32 Offset,
IN UINT8 Count,
IN UINT64 MaskValue,
IN UINT64 TestValue
)
{
EFI_STATUS Status;
UINT64 Value;
//
// Access PCI MMIO space to see if the value is the tested one.
//
Value = 0;
Status = SdMmcHcRwMmio (PciIo, BarIndex, Offset, TRUE, Count, &Value);
if (EFI_ERROR (Status)) {
return Status;
}
Value &= MaskValue;
if (Value == TestValue) {
return EFI_SUCCESS;
}
return EFI_NOT_READY;
}
/**
Wait for the value of the specified MMIO register set to the test value.
@param[in] PciIo The PCI IO protocol instance.
@param[in] BarIndex The BAR index of the standard PCI Configuration
header to use as the base address for the memory
operation to perform.
@param[in] Offset The offset within the selected BAR to start the
memory operation.
@param[in] Count The width of the mmio register in bytes.
Must be 1, 2, 4 or 8 bytes.
@param[in] MaskValue The mask value of memory.
@param[in] TestValue The test value of memory.
@param[in] Timeout The time out value for wait memory set, uses 1
microsecond as a unit.
@retval EFI_TIMEOUT The MMIO register hasn't expected value in timeout
range.
@retval EFI_SUCCESS The MMIO register has expected value.
@retval Others The MMIO operation fails.
**/
EFI_STATUS
EFIAPI
SdMmcHcWaitMmioSet (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 BarIndex,
IN UINT32 Offset,
IN UINT8 Count,
IN UINT64 MaskValue,
IN UINT64 TestValue,
IN UINT64 Timeout
)
{
EFI_STATUS Status;
BOOLEAN InfiniteWait;
if (Timeout == 0) {
InfiniteWait = TRUE;
} else {
InfiniteWait = FALSE;
}
while (InfiniteWait || (Timeout > 0)) {
Status = SdMmcHcCheckMmioSet (
PciIo,
BarIndex,
Offset,
Count,
MaskValue,
TestValue
);
if (Status != EFI_NOT_READY) {
return Status;
}
//
// Stall for 1 microsecond.
//
gBS->Stall (1);
Timeout--;
}
return EFI_TIMEOUT;
}
/**
Get the controller version information from the specified slot.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[out] Version The buffer to store the version information.
@retval EFI_SUCCESS The operation executes successfully.
@retval Others The operation fails.
**/
EFI_STATUS
SdMmcHcGetControllerVersion (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
OUT UINT16 *Version
)
{
EFI_STATUS Status;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_CTRL_VER, TRUE, sizeof (UINT16), Version);
if (EFI_ERROR (Status)) {
return Status;
}
*Version &= 0xFF;
return EFI_SUCCESS;
}
/**
Software reset the specified SD/MMC host controller and enable all interrupts.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Slot The slot number of the SD card to send the command to.
@retval EFI_SUCCESS The software reset executes successfully.
@retval Others The software reset fails.
**/
EFI_STATUS
SdMmcHcReset (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN UINT8 Slot
)
{
EFI_STATUS Status;
UINT8 SwReset;
EFI_PCI_IO_PROTOCOL *PciIo;
//
// Notify the SD/MMC override protocol that we are about to reset
// the SD/MMC host controller.
//
if ((mOverride != NULL) && (mOverride->NotifyPhase != NULL)) {
Status = mOverride->NotifyPhase (
Private->ControllerHandle,
Slot,
EdkiiSdMmcResetPre,
NULL
);
if (EFI_ERROR (Status)) {
DEBUG ((
DEBUG_WARN,
"%a: SD/MMC pre reset notifier callback failed - %r\n",
__FUNCTION__,
Status
));
return Status;
}
}
PciIo = Private->PciIo;
SwReset = BIT0;
Status = SdMmcHcOrMmio (PciIo, Slot, SD_MMC_HC_SW_RST, sizeof (SwReset), &SwReset);
if (EFI_ERROR (Status)) {
DEBUG ((DEBUG_ERROR, "SdMmcHcReset: write SW Reset for All fails: %r\n", Status));
return Status;
}
Status = SdMmcHcWaitMmioSet (
PciIo,
Slot,
SD_MMC_HC_SW_RST,
sizeof (SwReset),
BIT0,
0x00,
SD_MMC_HC_GENERIC_TIMEOUT
);
if (EFI_ERROR (Status)) {
DEBUG ((DEBUG_INFO, "SdMmcHcReset: reset done with %r\n", Status));
return Status;
}
//
// Enable all interrupt after reset all.
//
Status = SdMmcHcEnableInterrupt (PciIo, Slot);
if (EFI_ERROR (Status)) {
DEBUG ((
DEBUG_INFO,
"SdMmcHcReset: SdMmcHcEnableInterrupt done with %r\n",
Status
));
return Status;
}
//
// Notify the SD/MMC override protocol that we have just reset
// the SD/MMC host controller.
//
if ((mOverride != NULL) && (mOverride->NotifyPhase != NULL)) {
Status = mOverride->NotifyPhase (
Private->ControllerHandle,
Slot,
EdkiiSdMmcResetPost,
NULL
);
if (EFI_ERROR (Status)) {
DEBUG ((
DEBUG_WARN,
"%a: SD/MMC post reset notifier callback failed - %r\n",
__FUNCTION__,
Status
));
}
}
return Status;
}
/**
Set all interrupt status bits in Normal and Error Interrupt Status Enable
register.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@retval EFI_SUCCESS The operation executes successfully.
@retval Others The operation fails.
**/
EFI_STATUS
SdMmcHcEnableInterrupt (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot
)
{
EFI_STATUS Status;
UINT16 IntStatus;
//
// Enable all bits in Error Interrupt Status Enable Register
//
IntStatus = 0xFFFF;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_ERR_INT_STS_EN, FALSE, sizeof (IntStatus), &IntStatus);
if (EFI_ERROR (Status)) {
return Status;
}
//
// Enable all bits in Normal Interrupt Status Enable Register
//
IntStatus = 0xFFFF;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_NOR_INT_STS_EN, FALSE, sizeof (IntStatus), &IntStatus);
return Status;
}
/**
Get the capability data from the specified slot.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[out] Capability The buffer to store the capability data.
@retval EFI_SUCCESS The operation executes successfully.
@retval Others The operation fails.
**/
EFI_STATUS
SdMmcHcGetCapability (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
OUT SD_MMC_HC_SLOT_CAP *Capability
)
{
EFI_STATUS Status;
UINT64 Cap;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_CAP, TRUE, sizeof (Cap), &Cap);
if (EFI_ERROR (Status)) {
return Status;
}
CopyMem (Capability, &Cap, sizeof (Cap));
return EFI_SUCCESS;
}
/**
Get the maximum current capability data from the specified slot.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[out] MaxCurrent The buffer to store the maximum current capability data.
@retval EFI_SUCCESS The operation executes successfully.
@retval Others The operation fails.
**/
EFI_STATUS
SdMmcHcGetMaxCurrent (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
OUT UINT64 *MaxCurrent
)
{
EFI_STATUS Status;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_MAX_CURRENT_CAP, TRUE, sizeof (UINT64), MaxCurrent);
return Status;
}
/**
Detect whether there is a SD/MMC card attached at the specified SD/MMC host controller
slot.
Refer to SD Host Controller Simplified spec 3.0 Section 3.1 for details.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[out] MediaPresent The pointer to the media present boolean value.
@retval EFI_SUCCESS There is no media change happened.
@retval EFI_MEDIA_CHANGED There is media change happened.
@retval Others The detection fails.
**/
EFI_STATUS
SdMmcHcCardDetect (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
OUT BOOLEAN *MediaPresent
)
{
EFI_STATUS Status;
UINT16 Data;
UINT32 PresentState;
//
// Check Present State Register to see if there is a card presented.
//
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_PRESENT_STATE, TRUE, sizeof (PresentState), &PresentState);
if (EFI_ERROR (Status)) {
return Status;
}
if ((PresentState & BIT16) != 0) {
*MediaPresent = TRUE;
} else {
*MediaPresent = FALSE;
}
//
// Check Normal Interrupt Status Register
//
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_NOR_INT_STS, TRUE, sizeof (Data), &Data);
if (EFI_ERROR (Status)) {
return Status;
}
if ((Data & (BIT6 | BIT7)) != 0) {
//
// Clear BIT6 and BIT7 by writing 1 to these two bits if set.
//
Data &= BIT6 | BIT7;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_NOR_INT_STS, FALSE, sizeof (Data), &Data);
if (EFI_ERROR (Status)) {
return Status;
}
return EFI_MEDIA_CHANGED;
}
return EFI_SUCCESS;
}
/**
Stop SD/MMC card clock.
Refer to SD Host Controller Simplified spec 3.0 Section 3.2.2 for details.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@retval EFI_SUCCESS Succeed to stop SD/MMC clock.
@retval Others Fail to stop SD/MMC clock.
**/
EFI_STATUS
SdMmcHcStopClock (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot
)
{
EFI_STATUS Status;
UINT32 PresentState;
UINT16 ClockCtrl;
//
// Ensure no SD transactions are occurring on the SD Bus by
// waiting for Command Inhibit (DAT) and Command Inhibit (CMD)
// in the Present State register to be 0.
//
Status = SdMmcHcWaitMmioSet (
PciIo,
Slot,
SD_MMC_HC_PRESENT_STATE,
sizeof (PresentState),
BIT0 | BIT1,
0,
SD_MMC_HC_GENERIC_TIMEOUT
);
if (EFI_ERROR (Status)) {
return Status;
}
//
// Set SD Clock Enable in the Clock Control register to 0
//
ClockCtrl = (UINT16) ~BIT2;
Status = SdMmcHcAndMmio (PciIo, Slot, SD_MMC_HC_CLOCK_CTRL, sizeof (ClockCtrl), &ClockCtrl);
return Status;
}
/**
Start the SD clock.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number.
@retval EFI_SUCCESS Succeeded to start the SD clock.
@retval Others Failed to start the SD clock.
**/
EFI_STATUS
SdMmcHcStartSdClock (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot
)
{
UINT16 ClockCtrl;
//
// Set SD Clock Enable in the Clock Control register to 1
//
ClockCtrl = BIT2;
return SdMmcHcOrMmio (PciIo, Slot, SD_MMC_HC_CLOCK_CTRL, sizeof (ClockCtrl), &ClockCtrl);
}
/**
SD/MMC card clock supply.
Refer to SD Host Controller Simplified spec 3.0 Section 3.2.1 for details.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] BusTiming BusTiming at which the frequency change is done.
@param[in] FirstTimeSetup Flag to indicate whether the clock is being setup for the first time.
@param[in] ClockFreq The max clock frequency to be set. The unit is KHz.
@retval EFI_SUCCESS The clock is supplied successfully.
@retval Others The clock isn't supplied successfully.
**/
EFI_STATUS
SdMmcHcClockSupply (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN UINT8 Slot,
IN SD_MMC_BUS_MODE BusTiming,
IN BOOLEAN FirstTimeSetup,
IN UINT64 ClockFreq
)
{
EFI_STATUS Status;
UINT32 SettingFreq;
UINT32 Divisor;
UINT32 Remainder;
UINT16 ClockCtrl;
UINT32 BaseClkFreq;
UINT16 ControllerVer;
EFI_PCI_IO_PROTOCOL *PciIo;
PciIo = Private->PciIo;
BaseClkFreq = Private->BaseClkFreq[Slot];
ControllerVer = Private->ControllerVersion[Slot];
if ((BaseClkFreq == 0) || (ClockFreq == 0)) {
return EFI_INVALID_PARAMETER;
}
if (ClockFreq > (BaseClkFreq * 1000)) {
ClockFreq = BaseClkFreq * 1000;
}
//
// Calculate the divisor of base frequency.
//
Divisor = 0;
SettingFreq = BaseClkFreq * 1000;
while (ClockFreq < SettingFreq) {
Divisor++;
SettingFreq = (BaseClkFreq * 1000) / (2 * Divisor);
Remainder = (BaseClkFreq * 1000) % (2 * Divisor);
if ((ClockFreq == SettingFreq) && (Remainder == 0)) {
break;
}
if ((ClockFreq == SettingFreq) && (Remainder != 0)) {
SettingFreq++;
}
}
DEBUG ((DEBUG_INFO, "BaseClkFreq %dMHz Divisor %d ClockFreq %dKhz\n", BaseClkFreq, Divisor, ClockFreq));
//
// Set SDCLK Frequency Select and Internal Clock Enable fields in Clock Control register.
//
if ((ControllerVer >= SD_MMC_HC_CTRL_VER_300) &&
(ControllerVer <= SD_MMC_HC_CTRL_VER_420))
{
ASSERT (Divisor <= 0x3FF);
ClockCtrl = ((Divisor & 0xFF) << 8) | ((Divisor & 0x300) >> 2);
} else if ((ControllerVer == SD_MMC_HC_CTRL_VER_100) ||
(ControllerVer == SD_MMC_HC_CTRL_VER_200))
{
//
// Only the most significant bit can be used as divisor.
//
if (((Divisor - 1) & Divisor) != 0) {
Divisor = 1 << (HighBitSet32 (Divisor) + 1);
}
ASSERT (Divisor <= 0x80);
ClockCtrl = (Divisor & 0xFF) << 8;
} else {
DEBUG ((DEBUG_ERROR, "Unknown SD Host Controller Spec version [0x%x]!!!\n", ControllerVer));
return EFI_UNSUPPORTED;
}
//
// Stop bus clock at first
//
Status = SdMmcHcStopClock (PciIo, Slot);
if (EFI_ERROR (Status)) {
return Status;
}
//
// Supply clock frequency with specified divisor
//
ClockCtrl |= BIT0;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_CLOCK_CTRL, FALSE, sizeof (ClockCtrl), &ClockCtrl);
if (EFI_ERROR (Status)) {
DEBUG ((DEBUG_ERROR, "Set SDCLK Frequency Select and Internal Clock Enable fields fails\n"));
return Status;
}
//
// Wait Internal Clock Stable in the Clock Control register to be 1
//
Status = SdMmcHcWaitMmioSet (
PciIo,
Slot,
SD_MMC_HC_CLOCK_CTRL,
sizeof (ClockCtrl),
BIT1,
BIT1,
SD_MMC_HC_GENERIC_TIMEOUT
);
if (EFI_ERROR (Status)) {
return Status;
}
Status = SdMmcHcStartSdClock (PciIo, Slot);
if (EFI_ERROR (Status)) {
return Status;
}
//
// We don't notify the platform on first time setup to avoid changing
// legacy behavior. During first time setup we also don't know what type
// of the card slot it is and which enum value of BusTiming applies.
//
if (!FirstTimeSetup && (mOverride != NULL) && (mOverride->NotifyPhase != NULL)) {
Status = mOverride->NotifyPhase (
Private->ControllerHandle,
Slot,
EdkiiSdMmcSwitchClockFreqPost,
&BusTiming
);
if (EFI_ERROR (Status)) {
DEBUG ((
DEBUG_ERROR,
"%a: SD/MMC switch clock freq post notifier callback failed - %r\n",
__FUNCTION__,
Status
));
return Status;
}
}
Private->Slot[Slot].CurrentFreq = ClockFreq;
return Status;
}
/**
SD/MMC bus power control.
Refer to SD Host Controller Simplified spec 3.0 Section 3.3 for details.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] PowerCtrl The value setting to the power control register.
@retval TRUE There is a SD/MMC card attached.
@retval FALSE There is no a SD/MMC card attached.
**/
EFI_STATUS
SdMmcHcPowerControl (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
IN UINT8 PowerCtrl
)
{
EFI_STATUS Status;
//
// Clr SD Bus Power
//
PowerCtrl &= (UINT8) ~BIT0;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_POWER_CTRL, FALSE, sizeof (PowerCtrl), &PowerCtrl);
if (EFI_ERROR (Status)) {
return Status;
}
//
// Set SD Bus Voltage Select and SD Bus Power fields in Power Control Register
//
PowerCtrl |= BIT0;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_POWER_CTRL, FALSE, sizeof (PowerCtrl), &PowerCtrl);
return Status;
}
/**
Set the SD/MMC bus width.
Refer to SD Host Controller Simplified spec 3.0 Section 3.4 for details.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] BusWidth The bus width used by the SD/MMC device, it must be 1, 4 or 8.
@retval EFI_SUCCESS The bus width is set successfully.
@retval Others The bus width isn't set successfully.
**/
EFI_STATUS
SdMmcHcSetBusWidth (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
IN UINT16 BusWidth
)
{
EFI_STATUS Status;
UINT8 HostCtrl1;
if (BusWidth == 1) {
HostCtrl1 = (UINT8) ~(BIT5 | BIT1);
Status = SdMmcHcAndMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL1, sizeof (HostCtrl1), &HostCtrl1);
} else if (BusWidth == 4) {
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL1, TRUE, sizeof (HostCtrl1), &HostCtrl1);
if (EFI_ERROR (Status)) {
return Status;
}
HostCtrl1 |= BIT1;
HostCtrl1 &= (UINT8) ~BIT5;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL1, FALSE, sizeof (HostCtrl1), &HostCtrl1);
} else if (BusWidth == 8) {
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL1, TRUE, sizeof (HostCtrl1), &HostCtrl1);
if (EFI_ERROR (Status)) {
return Status;
}
HostCtrl1 &= (UINT8) ~BIT1;
HostCtrl1 |= BIT5;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL1, FALSE, sizeof (HostCtrl1), &HostCtrl1);
} else {
ASSERT (FALSE);
return EFI_INVALID_PARAMETER;
}
return Status;
}
/**
Configure V4 controller enhancements at initialization.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] Capability The capability of the slot.
@param[in] ControllerVer The version of host controller.
@retval EFI_SUCCESS The clock is supplied successfully.
**/
EFI_STATUS
SdMmcHcInitV4Enhancements (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
IN SD_MMC_HC_SLOT_CAP Capability,
IN UINT16 ControllerVer
)
{
EFI_STATUS Status;
UINT16 HostCtrl2;
//
// Check if controller version V4 or higher
//
if (ControllerVer >= SD_MMC_HC_CTRL_VER_400) {
HostCtrl2 = SD_MMC_HC_V4_EN;
//
// Check if controller version V4.0
//
if (ControllerVer == SD_MMC_HC_CTRL_VER_400) {
//
// Check if 64bit support is available
//
if (Capability.SysBus64V3 != 0) {
HostCtrl2 |= SD_MMC_HC_64_ADDR_EN;
DEBUG ((DEBUG_INFO, "Enabled V4 64 bit system bus support\n"));
}
}
//
// Check if controller version V4.10 or higher
//
else if (ControllerVer >= SD_MMC_HC_CTRL_VER_410) {
//
// Check if 64bit support is available
//
if (Capability.SysBus64V4 != 0) {
HostCtrl2 |= SD_MMC_HC_64_ADDR_EN;
DEBUG ((DEBUG_INFO, "Enabled V4 64 bit system bus support\n"));
}
HostCtrl2 |= SD_MMC_HC_26_DATA_LEN_ADMA_EN;
DEBUG ((DEBUG_INFO, "Enabled V4 26 bit data length ADMA support\n"));
}
Status = SdMmcHcOrMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL2, sizeof (HostCtrl2), &HostCtrl2);
if (EFI_ERROR (Status)) {
return Status;
}
}
return EFI_SUCCESS;
}
/**
Supply SD/MMC card with maximum voltage at initialization.
Refer to SD Host Controller Simplified spec 3.0 Section 3.3 for details.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] Capability The capability of the slot.
@retval EFI_SUCCESS The voltage is supplied successfully.
@retval Others The voltage isn't supplied successfully.
**/
EFI_STATUS
SdMmcHcInitPowerVoltage (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
IN SD_MMC_HC_SLOT_CAP Capability
)
{
EFI_STATUS Status;
UINT8 MaxVoltage;
UINT8 HostCtrl2;
//
// Calculate supported maximum voltage according to SD Bus Voltage Select
//
if (Capability.Voltage33 != 0) {
//
// Support 3.3V
//
MaxVoltage = 0x0E;
} else if (Capability.Voltage30 != 0) {
//
// Support 3.0V
//
MaxVoltage = 0x0C;
} else if (Capability.Voltage18 != 0) {
//
// Support 1.8V
//
MaxVoltage = 0x0A;
HostCtrl2 = BIT3;
Status = SdMmcHcOrMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL2, sizeof (HostCtrl2), &HostCtrl2);
gBS->Stall (5000);
if (EFI_ERROR (Status)) {
return Status;
}
} else {
ASSERT (FALSE);
return EFI_DEVICE_ERROR;
}
//
// Set SD Bus Voltage Select and SD Bus Power fields in Power Control Register
//
Status = SdMmcHcPowerControl (PciIo, Slot, MaxVoltage);
return Status;
}
/**
Initialize the Timeout Control register with most conservative value at initialization.
Refer to SD Host Controller Simplified spec 3.0 Section 2.2.15 for details.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@retval EFI_SUCCESS The timeout control register is configured successfully.
@retval Others The timeout control register isn't configured successfully.
**/
EFI_STATUS
SdMmcHcInitTimeoutCtrl (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot
)
{
EFI_STATUS Status;
UINT8 Timeout;
Timeout = 0x0E;
Status = SdMmcHcRwMmio (PciIo, Slot, SD_MMC_HC_TIMEOUT_CTRL, FALSE, sizeof (Timeout), &Timeout);
return Status;
}
/**
Initial SD/MMC host controller with lowest clock frequency, max power and max timeout value
at initialization.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Slot The slot number of the SD card to send the command to.
@retval EFI_SUCCESS The host controller is initialized successfully.
@retval Others The host controller isn't initialized successfully.
**/
EFI_STATUS
SdMmcHcInitHost (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN UINT8 Slot
)
{
EFI_STATUS Status;
EFI_PCI_IO_PROTOCOL *PciIo;
SD_MMC_HC_SLOT_CAP Capability;
//
// Notify the SD/MMC override protocol that we are about to initialize
// the SD/MMC host controller.
//
if ((mOverride != NULL) && (mOverride->NotifyPhase != NULL)) {
Status = mOverride->NotifyPhase (
Private->ControllerHandle,
Slot,
EdkiiSdMmcInitHostPre,
NULL
);
if (EFI_ERROR (Status)) {
DEBUG ((
DEBUG_WARN,
"%a: SD/MMC pre init notifier callback failed - %r\n",
__FUNCTION__,
Status
));
return Status;
}
}
PciIo = Private->PciIo;
Capability = Private->Capability[Slot];
Status = SdMmcHcInitV4Enhancements (PciIo, Slot, Capability, Private->ControllerVersion[Slot]);
if (EFI_ERROR (Status)) {
return Status;
}
//
// Perform first time clock setup with 400 KHz frequency.
// We send the 0 as the BusTiming value because at this time
// we still do not know the slot type and which enum value will apply.
// Since it is a first time setup SdMmcHcClockSupply won't notify
// the platofrm driver anyway so it doesn't matter.
//
Status = SdMmcHcClockSupply (Private, Slot, 0, TRUE, 400);
if (EFI_ERROR (Status)) {
return Status;
}
Status = SdMmcHcInitPowerVoltage (PciIo, Slot, Capability);
if (EFI_ERROR (Status)) {
return Status;
}
Status = SdMmcHcInitTimeoutCtrl (PciIo, Slot);
if (EFI_ERROR (Status)) {
return Status;
}
//
// Notify the SD/MMC override protocol that we are have just initialized
// the SD/MMC host controller.
//
if ((mOverride != NULL) && (mOverride->NotifyPhase != NULL)) {
Status = mOverride->NotifyPhase (
Private->ControllerHandle,
Slot,
EdkiiSdMmcInitHostPost,
NULL
);
if (EFI_ERROR (Status)) {
DEBUG ((
DEBUG_WARN,
"%a: SD/MMC post init notifier callback failed - %r\n",
__FUNCTION__,
Status
));
}
}
return Status;
}
/**
Set SD Host Controler control 2 registry according to selected speed.
@param[in] ControllerHandle The handle of the controller.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] Timing The timing to select.
@retval EFI_SUCCESS The timing is set successfully.
@retval Others The timing isn't set successfully.
**/
EFI_STATUS
SdMmcHcUhsSignaling (
IN EFI_HANDLE ControllerHandle,
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
IN SD_MMC_BUS_MODE Timing
)
{
EFI_STATUS Status;
UINT8 HostCtrl2;
HostCtrl2 = (UINT8) ~SD_MMC_HC_CTRL_UHS_MASK;
Status = SdMmcHcAndMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL2, sizeof (HostCtrl2), &HostCtrl2);
if (EFI_ERROR (Status)) {
return Status;
}
switch (Timing) {
case SdMmcUhsSdr12:
HostCtrl2 = SD_MMC_HC_CTRL_UHS_SDR12;
break;
case SdMmcUhsSdr25:
HostCtrl2 = SD_MMC_HC_CTRL_UHS_SDR25;
break;
case SdMmcUhsSdr50:
HostCtrl2 = SD_MMC_HC_CTRL_UHS_SDR50;
break;
case SdMmcUhsSdr104:
HostCtrl2 = SD_MMC_HC_CTRL_UHS_SDR104;
break;
case SdMmcUhsDdr50:
HostCtrl2 = SD_MMC_HC_CTRL_UHS_DDR50;
break;
case SdMmcMmcLegacy:
HostCtrl2 = SD_MMC_HC_CTRL_MMC_LEGACY;
break;
case SdMmcMmcHsSdr:
HostCtrl2 = SD_MMC_HC_CTRL_MMC_HS_SDR;
break;
case SdMmcMmcHsDdr:
HostCtrl2 = SD_MMC_HC_CTRL_MMC_HS_DDR;
break;
case SdMmcMmcHs200:
HostCtrl2 = SD_MMC_HC_CTRL_MMC_HS200;
break;
case SdMmcMmcHs400:
HostCtrl2 = SD_MMC_HC_CTRL_MMC_HS400;
break;
default:
HostCtrl2 = 0;
break;
}
Status = SdMmcHcOrMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL2, sizeof (HostCtrl2), &HostCtrl2);
if (EFI_ERROR (Status)) {
return Status;
}
if ((mOverride != NULL) && (mOverride->NotifyPhase != NULL)) {
Status = mOverride->NotifyPhase (
ControllerHandle,
Slot,
EdkiiSdMmcUhsSignaling,
&Timing
);
if (EFI_ERROR (Status)) {
DEBUG ((
DEBUG_ERROR,
"%a: SD/MMC uhs signaling notifier callback failed - %r\n",
__FUNCTION__,
Status
));
return Status;
}
}
return EFI_SUCCESS;
}
/**
Set driver strength in host controller.
@param[in] PciIo The PCI IO protocol instance.
@param[in] SlotIndex The slot index of the card.
@param[in] DriverStrength DriverStrength to set in the controller.
@retval EFI_SUCCESS Driver strength programmed successfully.
@retval Others Failed to set driver strength.
**/
EFI_STATUS
SdMmcSetDriverStrength (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 SlotIndex,
IN SD_DRIVER_STRENGTH_TYPE DriverStrength
)
{
EFI_STATUS Status;
UINT16 HostCtrl2;
if (DriverStrength == SdDriverStrengthIgnore) {
return EFI_SUCCESS;
}
HostCtrl2 = (UINT16) ~SD_MMC_HC_CTRL_DRIVER_STRENGTH_MASK;
Status = SdMmcHcAndMmio (PciIo, SlotIndex, SD_MMC_HC_HOST_CTRL2, sizeof (HostCtrl2), &HostCtrl2);
if (EFI_ERROR (Status)) {
return Status;
}
HostCtrl2 = (DriverStrength << 4) & SD_MMC_HC_CTRL_DRIVER_STRENGTH_MASK;
return SdMmcHcOrMmio (PciIo, SlotIndex, SD_MMC_HC_HOST_CTRL2, sizeof (HostCtrl2), &HostCtrl2);
}
/**
Turn on/off LED.
@param[in] PciIo The PCI IO protocol instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] On The boolean to turn on/off LED.
@retval EFI_SUCCESS The LED is turned on/off successfully.
@retval Others The LED isn't turned on/off successfully.
**/
EFI_STATUS
SdMmcHcLedOnOff (
IN EFI_PCI_IO_PROTOCOL *PciIo,
IN UINT8 Slot,
IN BOOLEAN On
)
{
EFI_STATUS Status;
UINT8 HostCtrl1;
if (On) {
HostCtrl1 = BIT0;
Status = SdMmcHcOrMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL1, sizeof (HostCtrl1), &HostCtrl1);
} else {
HostCtrl1 = (UINT8) ~BIT0;
Status = SdMmcHcAndMmio (PciIo, Slot, SD_MMC_HC_HOST_CTRL1, sizeof (HostCtrl1), &HostCtrl1);
}
return Status;
}
/**
Build ADMA descriptor table for transfer.
Refer to SD Host Controller Simplified spec 4.2 Section 1.13 for details.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@param[in] ControllerVer The version of host controller.
@retval EFI_SUCCESS The ADMA descriptor table is created successfully.
@retval Others The ADMA descriptor table isn't created successfully.
**/
EFI_STATUS
BuildAdmaDescTable (
IN SD_MMC_HC_TRB *Trb,
IN UINT16 ControllerVer
)
{
EFI_PHYSICAL_ADDRESS Data;
UINT64 DataLen;
UINT64 Entries;
UINT32 Index;
UINT64 Remaining;
UINT64 Address;
UINTN TableSize;
EFI_PCI_IO_PROTOCOL *PciIo;
EFI_STATUS Status;
UINTN Bytes;
UINT32 AdmaMaxDataPerLine;
UINT32 DescSize;
VOID *AdmaDesc;
AdmaMaxDataPerLine = ADMA_MAX_DATA_PER_LINE_16B;
DescSize = sizeof (SD_MMC_HC_ADMA_32_DESC_LINE);
AdmaDesc = NULL;
Data = Trb->DataPhy;
DataLen = Trb->DataLen;
PciIo = Trb->Private->PciIo;
//
// Check for valid ranges in 32bit ADMA Descriptor Table
//
if ((Trb->Mode == SdMmcAdma32bMode) &&
((Data >= 0x100000000ul) || ((Data + DataLen) > 0x100000000ul)))
{
return EFI_INVALID_PARAMETER;
}
//
// Check address field alignment
//
if (Trb->Mode != SdMmcAdma32bMode) {
//
// Address field shall be set on 64-bit boundary (Lower 3-bit is always set to 0)
//
if ((Data & (BIT0 | BIT1 | BIT2)) != 0) {
DEBUG ((DEBUG_INFO, "The buffer [0x%x] to construct ADMA desc is not aligned to 8 bytes boundary!\n", Data));
}
} else {
//
// Address field shall be set on 32-bit boundary (Lower 2-bit is always set to 0)
//
if ((Data & (BIT0 | BIT1)) != 0) {
DEBUG ((DEBUG_INFO, "The buffer [0x%x] to construct ADMA desc is not aligned to 4 bytes boundary!\n", Data));
}
}
//
// Configure 64b ADMA.
//
if (Trb->Mode == SdMmcAdma64bV3Mode) {
DescSize = sizeof (SD_MMC_HC_ADMA_64_V3_DESC_LINE);
} else if (Trb->Mode == SdMmcAdma64bV4Mode) {
DescSize = sizeof (SD_MMC_HC_ADMA_64_V4_DESC_LINE);
}
//
// Configure 26b data length.
//
if (Trb->AdmaLengthMode == SdMmcAdmaLen26b) {
AdmaMaxDataPerLine = ADMA_MAX_DATA_PER_LINE_26B;
}
Entries = DivU64x32 ((DataLen + AdmaMaxDataPerLine - 1), AdmaMaxDataPerLine);
TableSize = (UINTN)MultU64x32 (Entries, DescSize);
Trb->AdmaPages = (UINT32)EFI_SIZE_TO_PAGES (TableSize);
Status = PciIo->AllocateBuffer (
PciIo,
AllocateAnyPages,
EfiBootServicesData,
EFI_SIZE_TO_PAGES (TableSize),
(VOID **)&AdmaDesc,
0
);
if (EFI_ERROR (Status)) {
return EFI_OUT_OF_RESOURCES;
}
ZeroMem (AdmaDesc, TableSize);
Bytes = TableSize;
Status = PciIo->Map (
PciIo,
EfiPciIoOperationBusMasterCommonBuffer,
AdmaDesc,
&Bytes,
&Trb->AdmaDescPhy,
&Trb->AdmaMap
);
if (EFI_ERROR (Status) || (Bytes != TableSize)) {
//
// Map error or unable to map the whole RFis buffer into a contiguous region.
//
PciIo->FreeBuffer (
PciIo,
EFI_SIZE_TO_PAGES (TableSize),
AdmaDesc
);
return EFI_OUT_OF_RESOURCES;
}
if ((Trb->Mode == SdMmcAdma32bMode) &&
((UINT64)(UINTN)Trb->AdmaDescPhy > 0x100000000ul))
{
//
// The ADMA doesn't support 64bit addressing.
//
PciIo->Unmap (
PciIo,
Trb->AdmaMap
);
Trb->AdmaMap = NULL;
PciIo->FreeBuffer (
PciIo,
EFI_SIZE_TO_PAGES (TableSize),
AdmaDesc
);
return EFI_DEVICE_ERROR;
}
Remaining = DataLen;
Address = Data;
if (Trb->Mode == SdMmcAdma32bMode) {
Trb->Adma32Desc = AdmaDesc;
} else if (Trb->Mode == SdMmcAdma64bV3Mode) {
Trb->Adma64V3Desc = AdmaDesc;
} else {
Trb->Adma64V4Desc = AdmaDesc;
}
for (Index = 0; Index < Entries; Index++) {
if (Trb->Mode == SdMmcAdma32bMode) {
if (Remaining <= AdmaMaxDataPerLine) {
Trb->Adma32Desc[Index].Valid = 1;
Trb->Adma32Desc[Index].Act = 2;
if (Trb->AdmaLengthMode == SdMmcAdmaLen26b) {
Trb->Adma32Desc[Index].UpperLength = (UINT16)RShiftU64 (Remaining, 16);
}
Trb->Adma32Desc[Index].LowerLength = (UINT16)(Remaining & MAX_UINT16);
Trb->Adma32Desc[Index].Address = (UINT32)Address;
break;
} else {
Trb->Adma32Desc[Index].Valid = 1;
Trb->Adma32Desc[Index].Act = 2;
if (Trb->AdmaLengthMode == SdMmcAdmaLen26b) {
Trb->Adma32Desc[Index].UpperLength = 0;
}
Trb->Adma32Desc[Index].LowerLength = 0;
Trb->Adma32Desc[Index].Address = (UINT32)Address;
}
} else if (Trb->Mode == SdMmcAdma64bV3Mode) {
if (Remaining <= AdmaMaxDataPerLine) {
Trb->Adma64V3Desc[Index].Valid = 1;
Trb->Adma64V3Desc[Index].Act = 2;
if (Trb->AdmaLengthMode == SdMmcAdmaLen26b) {
Trb->Adma64V3Desc[Index].UpperLength = (UINT16)RShiftU64 (Remaining, 16);
}
Trb->Adma64V3Desc[Index].LowerLength = (UINT16)(Remaining & MAX_UINT16);
Trb->Adma64V3Desc[Index].LowerAddress = (UINT32)Address;
Trb->Adma64V3Desc[Index].UpperAddress = (UINT32)RShiftU64 (Address, 32);
break;
} else {
Trb->Adma64V3Desc[Index].Valid = 1;
Trb->Adma64V3Desc[Index].Act = 2;
if (Trb->AdmaLengthMode == SdMmcAdmaLen26b) {
Trb->Adma64V3Desc[Index].UpperLength = 0;
}
Trb->Adma64V3Desc[Index].LowerLength = 0;
Trb->Adma64V3Desc[Index].LowerAddress = (UINT32)Address;
Trb->Adma64V3Desc[Index].UpperAddress = (UINT32)RShiftU64 (Address, 32);
}
} else {
if (Remaining <= AdmaMaxDataPerLine) {
Trb->Adma64V4Desc[Index].Valid = 1;
Trb->Adma64V4Desc[Index].Act = 2;
if (Trb->AdmaLengthMode == SdMmcAdmaLen26b) {
Trb->Adma64V4Desc[Index].UpperLength = (UINT16)RShiftU64 (Remaining, 16);
}
Trb->Adma64V4Desc[Index].LowerLength = (UINT16)(Remaining & MAX_UINT16);
Trb->Adma64V4Desc[Index].LowerAddress = (UINT32)Address;
Trb->Adma64V4Desc[Index].UpperAddress = (UINT32)RShiftU64 (Address, 32);
break;
} else {
Trb->Adma64V4Desc[Index].Valid = 1;
Trb->Adma64V4Desc[Index].Act = 2;
if (Trb->AdmaLengthMode == SdMmcAdmaLen26b) {
Trb->Adma64V4Desc[Index].UpperLength = 0;
}
Trb->Adma64V4Desc[Index].LowerLength = 0;
Trb->Adma64V4Desc[Index].LowerAddress = (UINT32)Address;
Trb->Adma64V4Desc[Index].UpperAddress = (UINT32)RShiftU64 (Address, 32);
}
}
Remaining -= AdmaMaxDataPerLine;
Address += AdmaMaxDataPerLine;
}
//
// Set the last descriptor line as end of descriptor table
//
if (Trb->Mode == SdMmcAdma32bMode) {
Trb->Adma32Desc[Index].End = 1;
} else if (Trb->Mode == SdMmcAdma64bV3Mode) {
Trb->Adma64V3Desc[Index].End = 1;
} else {
Trb->Adma64V4Desc[Index].End = 1;
}
return EFI_SUCCESS;
}
/**
Prints the contents of the command packet to the debug port.
@param[in] DebugLevel Debug level at which the packet should be printed.
@param[in] Packet Pointer to packet to print.
**/
VOID
SdMmcPrintPacket (
IN UINT32 DebugLevel,
IN EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet
)
{
if (Packet == NULL) {
return;
}
DEBUG ((DebugLevel, "Printing EFI_SD_MMC_PASS_THRU_COMMAND_PACKET\n"));
if (Packet->SdMmcCmdBlk != NULL) {
DEBUG ((DebugLevel, "Command index: %d, argument: %X\n", Packet->SdMmcCmdBlk->CommandIndex, Packet->SdMmcCmdBlk->CommandArgument));
DEBUG ((DebugLevel, "Command type: %d, response type: %d\n", Packet->SdMmcCmdBlk->CommandType, Packet->SdMmcCmdBlk->ResponseType));
}
if (Packet->SdMmcStatusBlk != NULL) {
DEBUG ((
DebugLevel,
"Response 0: %X, 1: %X, 2: %X, 3: %X\n",
Packet->SdMmcStatusBlk->Resp0,
Packet->SdMmcStatusBlk->Resp1,
Packet->SdMmcStatusBlk->Resp2,
Packet->SdMmcStatusBlk->Resp3
));
}
DEBUG ((DebugLevel, "Timeout: %ld\n", Packet->Timeout));
DEBUG ((DebugLevel, "InDataBuffer: %p\n", Packet->InDataBuffer));
DEBUG ((DebugLevel, "OutDataBuffer: %p\n", Packet->OutDataBuffer));
DEBUG ((DebugLevel, "InTransferLength: %d\n", Packet->InTransferLength));
DEBUG ((DebugLevel, "OutTransferLength: %d\n", Packet->OutTransferLength));
DEBUG ((DebugLevel, "TransactionStatus: %r\n", Packet->TransactionStatus));
}
/**
Prints the contents of the TRB to the debug port.
@param[in] DebugLevel Debug level at which the TRB should be printed.
@param[in] Trb Pointer to the TRB structure.
**/
VOID
SdMmcPrintTrb (
IN UINT32 DebugLevel,
IN SD_MMC_HC_TRB *Trb
)
{
if (Trb == NULL) {
return;
}
DEBUG ((DebugLevel, "Printing SD_MMC_HC_TRB\n"));
DEBUG ((DebugLevel, "Slot: %d\n", Trb->Slot));
DEBUG ((DebugLevel, "BlockSize: %d\n", Trb->BlockSize));
DEBUG ((DebugLevel, "Data: %p\n", Trb->Data));
DEBUG ((DebugLevel, "DataLen: %d\n", Trb->DataLen));
DEBUG ((DebugLevel, "Read: %d\n", Trb->Read));
DEBUG ((DebugLevel, "DataPhy: %lX\n", Trb->DataPhy));
DEBUG ((DebugLevel, "DataMap: %p\n", Trb->DataMap));
DEBUG ((DebugLevel, "Mode: %d\n", Trb->Mode));
DEBUG ((DebugLevel, "AdmaLengthMode: %d\n", Trb->AdmaLengthMode));
DEBUG ((DebugLevel, "Event: %p\n", Trb->Event));
DEBUG ((DebugLevel, "Started: %d\n", Trb->Started));
DEBUG ((DebugLevel, "CommandComplete: %d\n", Trb->CommandComplete));
DEBUG ((DebugLevel, "Timeout: %ld\n", Trb->Timeout));
DEBUG ((DebugLevel, "Retries: %d\n", Trb->Retries));
DEBUG ((DebugLevel, "PioModeTransferCompleted: %d\n", Trb->PioModeTransferCompleted));
DEBUG ((DebugLevel, "PioBlockIndex: %d\n", Trb->PioBlockIndex));
DEBUG ((DebugLevel, "Adma32Desc: %p\n", Trb->Adma32Desc));
DEBUG ((DebugLevel, "Adma64V3Desc: %p\n", Trb->Adma64V3Desc));
DEBUG ((DebugLevel, "Adma64V4Desc: %p\n", Trb->Adma64V4Desc));
DEBUG ((DebugLevel, "AdmaMap: %p\n", Trb->AdmaMap));
DEBUG ((DebugLevel, "AdmaPages: %X\n", Trb->AdmaPages));
SdMmcPrintPacket (DebugLevel, Trb->Packet);
}
/**
Sets up host memory to allow DMA transfer.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] Packet A pointer to the SD command data structure.
@retval EFI_SUCCESS Memory has been mapped for DMA transfer.
@retval Others Memory has not been mapped.
**/
EFI_STATUS
SdMmcSetupMemoryForDmaTransfer (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN UINT8 Slot,
IN SD_MMC_HC_TRB *Trb
)
{
EFI_PCI_IO_PROTOCOL_OPERATION Flag;
EFI_PCI_IO_PROTOCOL *PciIo;
UINTN MapLength;
EFI_STATUS Status;
if (Trb->Read) {
Flag = EfiPciIoOperationBusMasterWrite;
} else {
Flag = EfiPciIoOperationBusMasterRead;
}
PciIo = Private->PciIo;
if ((Trb->Data != NULL) && (Trb->DataLen != 0)) {
MapLength = Trb->DataLen;
Status = PciIo->Map (
PciIo,
Flag,
Trb->Data,
&MapLength,
&Trb->DataPhy,
&Trb->DataMap
);
if (EFI_ERROR (Status) || (Trb->DataLen != MapLength)) {
return EFI_BAD_BUFFER_SIZE;
}
}
if ((Trb->Mode == SdMmcAdma32bMode) ||
(Trb->Mode == SdMmcAdma64bV3Mode) ||
(Trb->Mode == SdMmcAdma64bV4Mode))
{
Status = BuildAdmaDescTable (Trb, Private->ControllerVersion[Slot]);
if (EFI_ERROR (Status)) {
return Status;
}
}
return EFI_SUCCESS;
}
/**
Create a new TRB for the SD/MMC cmd request.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in] Packet A pointer to the SD command data structure.
@param[in] Event If Event is NULL, blocking I/O is performed. If Event is
not NULL, then nonblocking I/O is performed, and Event
will be signaled when the Packet completes.
@return Created Trb or NULL.
**/
SD_MMC_HC_TRB *
SdMmcCreateTrb (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN UINT8 Slot,
IN EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet,
IN EFI_EVENT Event
)
{
SD_MMC_HC_TRB *Trb;
EFI_STATUS Status;
EFI_TPL OldTpl;
Trb = AllocateZeroPool (sizeof (SD_MMC_HC_TRB));
if (Trb == NULL) {
return NULL;
}
Trb->Signature = SD_MMC_HC_TRB_SIG;
Trb->Slot = Slot;
Trb->BlockSize = 0x200;
Trb->Packet = Packet;
Trb->Event = Event;
Trb->Started = FALSE;
Trb->CommandComplete = FALSE;
Trb->Timeout = Packet->Timeout;
Trb->Retries = SD_MMC_TRB_RETRIES;
Trb->PioModeTransferCompleted = FALSE;
Trb->PioBlockIndex = 0;
Trb->Private = Private;
if ((Packet->InTransferLength != 0) && (Packet->InDataBuffer != NULL)) {
Trb->Data = Packet->InDataBuffer;
Trb->DataLen = Packet->InTransferLength;
Trb->Read = TRUE;
} else if ((Packet->OutTransferLength != 0) && (Packet->OutDataBuffer != NULL)) {
Trb->Data = Packet->OutDataBuffer;
Trb->DataLen = Packet->OutTransferLength;
Trb->Read = FALSE;
} else if ((Packet->InTransferLength == 0) && (Packet->OutTransferLength == 0)) {
Trb->Data = NULL;
Trb->DataLen = 0;
} else {
goto Error;
}
if ((Trb->DataLen != 0) && (Trb->DataLen < Trb->BlockSize)) {
Trb->BlockSize = (UINT16)Trb->DataLen;
}
if (((Private->Slot[Trb->Slot].CardType == EmmcCardType) &&
(Packet->SdMmcCmdBlk->CommandIndex == EMMC_SEND_TUNING_BLOCK)) ||
((Private->Slot[Trb->Slot].CardType == SdCardType) &&
(Packet->SdMmcCmdBlk->CommandIndex == SD_SEND_TUNING_BLOCK)))
{
Trb->Mode = SdMmcPioMode;
} else {
if (Trb->DataLen == 0) {
Trb->Mode = SdMmcNoData;
} else if (Private->Capability[Slot].Adma2 != 0) {
Trb->Mode = SdMmcAdma32bMode;
Trb->AdmaLengthMode = SdMmcAdmaLen16b;
if ((Private->ControllerVersion[Slot] == SD_MMC_HC_CTRL_VER_300) &&
(Private->Capability[Slot].SysBus64V3 == 1))
{
Trb->Mode = SdMmcAdma64bV3Mode;
} else if (((Private->ControllerVersion[Slot] == SD_MMC_HC_CTRL_VER_400) &&
(Private->Capability[Slot].SysBus64V3 == 1)) ||
((Private->ControllerVersion[Slot] >= SD_MMC_HC_CTRL_VER_410) &&
(Private->Capability[Slot].SysBus64V4 == 1)))
{
Trb->Mode = SdMmcAdma64bV4Mode;
}
if (Private->ControllerVersion[Slot] >= SD_MMC_HC_CTRL_VER_410) {
Trb->AdmaLengthMode = SdMmcAdmaLen26b;
}
Status = SdMmcSetupMemoryForDmaTransfer (Private, Slot, Trb);
if (EFI_ERROR (Status)) {
goto Error;
}
} else if (Private->Capability[Slot].Sdma != 0) {
Trb->Mode = SdMmcSdmaMode;
Status = SdMmcSetupMemoryForDmaTransfer (Private, Slot, Trb);
if (EFI_ERROR (Status)) {
goto Error;
}
} else {
Trb->Mode = SdMmcPioMode;
}
}
if (Event != NULL) {
OldTpl = gBS->RaiseTPL (TPL_NOTIFY);
InsertTailList (&Private->Queue, &Trb->TrbList);
gBS->RestoreTPL (OldTpl);
}
return Trb;
Error:
SdMmcFreeTrb (Trb);
return NULL;
}
/**
Free the resource used by the TRB.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
**/
VOID
SdMmcFreeTrb (
IN SD_MMC_HC_TRB *Trb
)
{
EFI_PCI_IO_PROTOCOL *PciIo;
PciIo = Trb->Private->PciIo;
if (Trb->AdmaMap != NULL) {
PciIo->Unmap (
PciIo,
Trb->AdmaMap
);
}
if (Trb->Adma32Desc != NULL) {
PciIo->FreeBuffer (
PciIo,
Trb->AdmaPages,
Trb->Adma32Desc
);
}
if (Trb->Adma64V3Desc != NULL) {
PciIo->FreeBuffer (
PciIo,
Trb->AdmaPages,
Trb->Adma64V3Desc
);
}
if (Trb->Adma64V4Desc != NULL) {
PciIo->FreeBuffer (
PciIo,
Trb->AdmaPages,
Trb->Adma64V4Desc
);
}
if (Trb->DataMap != NULL) {
PciIo->Unmap (
PciIo,
Trb->DataMap
);
}
FreePool (Trb);
return;
}
/**
Check if the env is ready for execute specified TRB.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@retval EFI_SUCCESS The env is ready for TRB execution.
@retval EFI_NOT_READY The env is not ready for TRB execution.
@retval Others Some erros happen.
**/
EFI_STATUS
SdMmcCheckTrbEnv (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb
)
{
EFI_STATUS Status;
EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet;
EFI_PCI_IO_PROTOCOL *PciIo;
UINT32 PresentState;
Packet = Trb->Packet;
if ((Packet->SdMmcCmdBlk->CommandType == SdMmcCommandTypeAdtc) ||
(Packet->SdMmcCmdBlk->ResponseType == SdMmcResponseTypeR1b) ||
(Packet->SdMmcCmdBlk->ResponseType == SdMmcResponseTypeR5b))
{
//
// Wait Command Inhibit (CMD) and Command Inhibit (DAT) in
// the Present State register to be 0
//
PresentState = BIT0 | BIT1;
} else {
//
// Wait Command Inhibit (CMD) in the Present State register
// to be 0
//
PresentState = BIT0;
}
PciIo = Private->PciIo;
Status = SdMmcHcCheckMmioSet (
PciIo,
Trb->Slot,
SD_MMC_HC_PRESENT_STATE,
sizeof (PresentState),
PresentState,
0
);
return Status;
}
/**
Wait for the env to be ready for execute specified TRB.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@retval EFI_SUCCESS The env is ready for TRB execution.
@retval EFI_TIMEOUT The env is not ready for TRB execution in time.
@retval Others Some erros happen.
**/
EFI_STATUS
SdMmcWaitTrbEnv (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb
)
{
EFI_STATUS Status;
EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet;
UINT64 Timeout;
BOOLEAN InfiniteWait;
//
// Wait Command Complete Interrupt Status bit in Normal Interrupt Status Register
//
Packet = Trb->Packet;
Timeout = Packet->Timeout;
if (Timeout == 0) {
InfiniteWait = TRUE;
} else {
InfiniteWait = FALSE;
}
while (InfiniteWait || (Timeout > 0)) {
//
// Check Trb execution result by reading Normal Interrupt Status register.
//
Status = SdMmcCheckTrbEnv (Private, Trb);
if (Status != EFI_NOT_READY) {
return Status;
}
//
// Stall for 1 microsecond.
//
gBS->Stall (1);
Timeout--;
}
return EFI_TIMEOUT;
}
/**
Execute the specified TRB.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@retval EFI_SUCCESS The TRB is sent to host controller successfully.
@retval Others Some erros happen when sending this request to the host controller.
**/
EFI_STATUS
SdMmcExecTrb (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb
)
{
EFI_STATUS Status;
EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet;
EFI_PCI_IO_PROTOCOL *PciIo;
UINT16 Cmd;
UINT16 IntStatus;
UINT32 Argument;
UINT32 BlkCount;
UINT16 BlkSize;
UINT16 TransMode;
UINT8 HostCtrl1;
UINT64 SdmaAddr;
UINT64 AdmaAddr;
BOOLEAN AddressingMode64;
AddressingMode64 = FALSE;
Packet = Trb->Packet;
PciIo = Trb->Private->PciIo;
//
// Clear all bits in Error Interrupt Status Register
//
IntStatus = 0xFFFF;
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_ERR_INT_STS, FALSE, sizeof (IntStatus), &IntStatus);
if (EFI_ERROR (Status)) {
return Status;
}
//
// Clear all bits in Normal Interrupt Status Register excepts for Card Removal & Card Insertion bits.
//
IntStatus = 0xFF3F;
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_NOR_INT_STS, FALSE, sizeof (IntStatus), &IntStatus);
if (EFI_ERROR (Status)) {
return Status;
}
if (Private->ControllerVersion[Trb->Slot] >= SD_MMC_HC_CTRL_VER_400) {
Status = SdMmcHcCheckMmioSet (
PciIo,
Trb->Slot,
SD_MMC_HC_HOST_CTRL2,
sizeof (UINT16),
SD_MMC_HC_64_ADDR_EN,
SD_MMC_HC_64_ADDR_EN
);
if (!EFI_ERROR (Status)) {
AddressingMode64 = TRUE;
}
}
//
// Set Host Control 1 register DMA Select field
//
if ((Trb->Mode == SdMmcAdma32bMode) ||
(Trb->Mode == SdMmcAdma64bV4Mode))
{
HostCtrl1 = BIT4;
Status = SdMmcHcOrMmio (PciIo, Trb->Slot, SD_MMC_HC_HOST_CTRL1, sizeof (HostCtrl1), &HostCtrl1);
if (EFI_ERROR (Status)) {
return Status;
}
} else if (Trb->Mode == SdMmcAdma64bV3Mode) {
HostCtrl1 = BIT4|BIT3;
Status = SdMmcHcOrMmio (PciIo, Trb->Slot, SD_MMC_HC_HOST_CTRL1, sizeof (HostCtrl1), &HostCtrl1);
if (EFI_ERROR (Status)) {
return Status;
}
}
SdMmcHcLedOnOff (PciIo, Trb->Slot, TRUE);
if (Trb->Mode == SdMmcSdmaMode) {
if ((!AddressingMode64) &&
((UINT64)(UINTN)Trb->DataPhy >= 0x100000000ul))
{
return EFI_INVALID_PARAMETER;
}
SdmaAddr = (UINT64)(UINTN)Trb->DataPhy;
if (Private->ControllerVersion[Trb->Slot] >= SD_MMC_HC_CTRL_VER_400) {
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_ADMA_SYS_ADDR, FALSE, sizeof (UINT64), &SdmaAddr);
} else {
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_SDMA_ADDR, FALSE, sizeof (UINT32), &SdmaAddr);
}
if (EFI_ERROR (Status)) {
return Status;
}
} else if ((Trb->Mode == SdMmcAdma32bMode) ||
(Trb->Mode == SdMmcAdma64bV3Mode) ||
(Trb->Mode == SdMmcAdma64bV4Mode))
{
AdmaAddr = (UINT64)(UINTN)Trb->AdmaDescPhy;
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_ADMA_SYS_ADDR, FALSE, sizeof (AdmaAddr), &AdmaAddr);
if (EFI_ERROR (Status)) {
return Status;
}
}
BlkSize = Trb->BlockSize;
if (Trb->Mode == SdMmcSdmaMode) {
//
// Set SDMA boundary to be 512K bytes.
//
BlkSize |= 0x7000;
}
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_BLK_SIZE, FALSE, sizeof (BlkSize), &BlkSize);
if (EFI_ERROR (Status)) {
return Status;
}
BlkCount = 0;
if (Trb->Mode != SdMmcNoData) {
//
// Calcuate Block Count.
//
BlkCount = (Trb->DataLen / Trb->BlockSize);
}
if (Private->ControllerVersion[Trb->Slot] >= SD_MMC_HC_CTRL_VER_410) {
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_SDMA_ADDR, FALSE, sizeof (UINT32), &BlkCount);
} else {
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_BLK_COUNT, FALSE, sizeof (UINT16), &BlkCount);
}
if (EFI_ERROR (Status)) {
return Status;
}
Argument = Packet->SdMmcCmdBlk->CommandArgument;
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_ARG1, FALSE, sizeof (Argument), &Argument);
if (EFI_ERROR (Status)) {
return Status;
}
TransMode = 0;
if (Trb->Mode != SdMmcNoData) {
if (Trb->Mode != SdMmcPioMode) {
TransMode |= BIT0;
}
if (Trb->Read) {
TransMode |= BIT4;
}
if (BlkCount > 1) {
TransMode |= BIT5 | BIT1;
}
//
// Only SD memory card needs to use AUTO CMD12 feature.
//
if (Private->Slot[Trb->Slot].CardType == SdCardType) {
if (BlkCount > 1) {
TransMode |= BIT2;
}
}
}
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_TRANS_MOD, FALSE, sizeof (TransMode), &TransMode);
if (EFI_ERROR (Status)) {
return Status;
}
Cmd = (UINT16)LShiftU64 (Packet->SdMmcCmdBlk->CommandIndex, 8);
if (Packet->SdMmcCmdBlk->CommandType == SdMmcCommandTypeAdtc) {
Cmd |= BIT5;
}
//
// Convert ResponseType to value
//
if (Packet->SdMmcCmdBlk->CommandType != SdMmcCommandTypeBc) {
switch (Packet->SdMmcCmdBlk->ResponseType) {
case SdMmcResponseTypeR1:
case SdMmcResponseTypeR5:
case SdMmcResponseTypeR6:
case SdMmcResponseTypeR7:
Cmd |= (BIT1 | BIT3 | BIT4);
break;
case SdMmcResponseTypeR2:
Cmd |= (BIT0 | BIT3);
break;
case SdMmcResponseTypeR3:
case SdMmcResponseTypeR4:
Cmd |= BIT1;
break;
case SdMmcResponseTypeR1b:
case SdMmcResponseTypeR5b:
Cmd |= (BIT0 | BIT1 | BIT3 | BIT4);
break;
default:
ASSERT (FALSE);
break;
}
}
//
// Execute cmd
//
Status = SdMmcHcRwMmio (PciIo, Trb->Slot, SD_MMC_HC_COMMAND, FALSE, sizeof (Cmd), &Cmd);
return Status;
}
/**
Performs SW reset based on passed error status mask.
@param[in] Private Pointer to driver private data.
@param[in] Slot Index of the slot to reset.
@param[in] ErrIntStatus Error interrupt status mask.
@retval EFI_SUCCESS Software reset performed successfully.
@retval Other Software reset failed.
**/
EFI_STATUS
SdMmcSoftwareReset (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN UINT8 Slot,
IN UINT16 ErrIntStatus
)
{
UINT8 SwReset;
EFI_STATUS Status;
SwReset = 0;
if ((ErrIntStatus & 0x0F) != 0) {
SwReset |= BIT1;
}
if ((ErrIntStatus & 0x70) != 0) {
SwReset |= BIT2;
}
Status = SdMmcHcRwMmio (
Private->PciIo,
Slot,
SD_MMC_HC_SW_RST,
FALSE,
sizeof (SwReset),
&SwReset
);
if (EFI_ERROR (Status)) {
return Status;
}
Status = SdMmcHcWaitMmioSet (
Private->PciIo,
Slot,
SD_MMC_HC_SW_RST,
sizeof (SwReset),
0xFF,
0,
SD_MMC_HC_GENERIC_TIMEOUT
);
if (EFI_ERROR (Status)) {
return Status;
}
return EFI_SUCCESS;
}
/**
Checks the error status in error status register
and issues appropriate software reset as described in
SD specification section 3.10.
@param[in] Private Pointer to driver private data.
@param[in] Slot Index of the slot for device.
@param[in] IntStatus Normal interrupt status mask.
@retval EFI_CRC_ERROR CRC error happened during CMD execution.
@retval EFI_SUCCESS No error reported.
@retval Others Some other error happened.
**/
EFI_STATUS
SdMmcCheckAndRecoverErrors (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN UINT8 Slot,
IN UINT16 IntStatus
)
{
UINT16 ErrIntStatus;
EFI_STATUS Status;
EFI_STATUS ErrorStatus;
if ((IntStatus & BIT15) == 0) {
return EFI_SUCCESS;
}
Status = SdMmcHcRwMmio (
Private->PciIo,
Slot,
SD_MMC_HC_ERR_INT_STS,
TRUE,
sizeof (ErrIntStatus),
&ErrIntStatus
);
if (EFI_ERROR (Status)) {
return Status;
}
DEBUG ((DEBUG_ERROR, "Error reported by SDHCI\n"));
DEBUG ((DEBUG_ERROR, "Interrupt status = %X\n", IntStatus));
DEBUG ((DEBUG_ERROR, "Error interrupt status = %X\n", ErrIntStatus));
//
// If the data timeout error is reported
// but data transfer is signaled as completed we
// have to ignore data timeout. We also assume that no
// other error is present on the link since data transfer
// completed successfully. Error interrupt status
// register is going to be reset when the next command
// is started.
//
if (((ErrIntStatus & BIT4) != 0) && ((IntStatus & BIT1) != 0)) {
return EFI_SUCCESS;
}
//
// We treat both CMD and DAT CRC errors and
// end bits errors as EFI_CRC_ERROR. This will
// let higher layer know that the error possibly
// happened due to random bus condition and the
// command can be retried.
//
if ((ErrIntStatus & (BIT1 | BIT2 | BIT5 | BIT6)) != 0) {
ErrorStatus = EFI_CRC_ERROR;
} else {
ErrorStatus = EFI_DEVICE_ERROR;
}
Status = SdMmcSoftwareReset (Private, Slot, ErrIntStatus);
if (EFI_ERROR (Status)) {
return Status;
}
return ErrorStatus;
}
/**
Reads the response data into the TRB buffer.
This function assumes that caller made sure that
command has completed.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@retval EFI_SUCCESS Response read successfully.
@retval Others Failed to get response.
**/
EFI_STATUS
SdMmcGetResponse (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb
)
{
EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet;
UINT8 Index;
UINT32 Response[4];
EFI_STATUS Status;
Packet = Trb->Packet;
if (Packet->SdMmcCmdBlk->CommandType == SdMmcCommandTypeBc) {
return EFI_SUCCESS;
}
for (Index = 0; Index < 4; Index++) {
Status = SdMmcHcRwMmio (
Private->PciIo,
Trb->Slot,
SD_MMC_HC_RESPONSE + Index * 4,
TRUE,
sizeof (UINT32),
&Response[Index]
);
if (EFI_ERROR (Status)) {
return Status;
}
}
CopyMem (Packet->SdMmcStatusBlk, Response, sizeof (Response));
return EFI_SUCCESS;
}
/**
Checks if the command completed. If the command
completed it gets the response and records the
command completion in the TRB.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@param[in] IntStatus Snapshot of the normal interrupt status register.
@retval EFI_SUCCESS Command completed successfully.
@retval EFI_NOT_READY Command completion still pending.
@retval Others Command failed to complete.
**/
EFI_STATUS
SdMmcCheckCommandComplete (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb,
IN UINT16 IntStatus
)
{
UINT16 Data16;
EFI_STATUS Status;
if ((IntStatus & BIT0) != 0) {
Data16 = BIT0;
Status = SdMmcHcRwMmio (
Private->PciIo,
Trb->Slot,
SD_MMC_HC_NOR_INT_STS,
FALSE,
sizeof (Data16),
&Data16
);
if (EFI_ERROR (Status)) {
return Status;
}
Status = SdMmcGetResponse (Private, Trb);
if (EFI_ERROR (Status)) {
return Status;
}
Trb->CommandComplete = TRUE;
return EFI_SUCCESS;
}
return EFI_NOT_READY;
}
/**
Transfers data from card using PIO method.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@param[in] IntStatus Snapshot of the normal interrupt status register.
@retval EFI_SUCCESS PIO transfer completed successfully.
@retval EFI_NOT_READY PIO transfer completion still pending.
@retval Others PIO transfer failed to complete.
**/
EFI_STATUS
SdMmcTransferDataWithPio (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb,
IN UINT16 IntStatus
)
{
EFI_STATUS Status;
UINT16 Data16;
UINT32 BlockCount;
EFI_PCI_IO_PROTOCOL_WIDTH Width;
UINTN Count;
BlockCount = (Trb->DataLen / Trb->BlockSize);
if (Trb->DataLen % Trb->BlockSize != 0) {
BlockCount += 1;
}
if (Trb->PioBlockIndex >= BlockCount) {
return EFI_SUCCESS;
}
switch (Trb->BlockSize % sizeof (UINT32)) {
case 0:
Width = EfiPciIoWidthFifoUint32;
Count = Trb->BlockSize / sizeof (UINT32);
break;
case 2:
Width = EfiPciIoWidthFifoUint16;
Count = Trb->BlockSize / sizeof (UINT16);
break;
case 1:
case 3:
default:
Width = EfiPciIoWidthFifoUint8;
Count = Trb->BlockSize;
break;
}
if (Trb->Read) {
if ((IntStatus & BIT5) == 0) {
return EFI_NOT_READY;
}
Data16 = BIT5;
SdMmcHcRwMmio (Private->PciIo, Trb->Slot, SD_MMC_HC_NOR_INT_STS, FALSE, sizeof (Data16), &Data16);
Status = Private->PciIo->Mem.Read (
Private->PciIo,
Width,
Trb->Slot,
SD_MMC_HC_BUF_DAT_PORT,
Count,
(VOID *)((UINT8 *)Trb->Data + (Trb->BlockSize * Trb->PioBlockIndex))
);
if (EFI_ERROR (Status)) {
return Status;
}
Trb->PioBlockIndex++;
} else {
if ((IntStatus & BIT4) == 0) {
return EFI_NOT_READY;
}
Data16 = BIT4;
SdMmcHcRwMmio (Private->PciIo, Trb->Slot, SD_MMC_HC_NOR_INT_STS, FALSE, sizeof (Data16), &Data16);
Status = Private->PciIo->Mem.Write (
Private->PciIo,
Width,
Trb->Slot,
SD_MMC_HC_BUF_DAT_PORT,
Count,
(VOID *)((UINT8 *)Trb->Data + (Trb->BlockSize * Trb->PioBlockIndex))
);
if (EFI_ERROR (Status)) {
return Status;
}
Trb->PioBlockIndex++;
}
if (Trb->PioBlockIndex >= BlockCount) {
Trb->PioModeTransferCompleted = TRUE;
return EFI_SUCCESS;
} else {
return EFI_NOT_READY;
}
}
/**
Update the SDMA address on the SDMA buffer boundary interrupt.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@retval EFI_SUCCESS Updated SDMA buffer address.
@retval Others Failed to update SDMA buffer address.
**/
EFI_STATUS
SdMmcUpdateSdmaAddress (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb
)
{
UINT64 SdmaAddr;
EFI_STATUS Status;
SdmaAddr = SD_MMC_SDMA_ROUND_UP ((UINTN)Trb->DataPhy, SD_MMC_SDMA_BOUNDARY);
if (Private->ControllerVersion[Trb->Slot] >= SD_MMC_HC_CTRL_VER_400) {
Status = SdMmcHcRwMmio (
Private->PciIo,
Trb->Slot,
SD_MMC_HC_ADMA_SYS_ADDR,
FALSE,
sizeof (UINT64),
&SdmaAddr
);
} else {
Status = SdMmcHcRwMmio (
Private->PciIo,
Trb->Slot,
SD_MMC_HC_SDMA_ADDR,
FALSE,
sizeof (UINT32),
&SdmaAddr
);
}
if (EFI_ERROR (Status)) {
return Status;
}
Trb->DataPhy = (UINT64)(UINTN)SdmaAddr;
return EFI_SUCCESS;
}
/**
Checks if the data transfer completed and performs any actions
neccessary to continue the data transfer such as SDMA system
address fixup or PIO data transfer.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@param[in] IntStatus Snapshot of the normal interrupt status register.
@retval EFI_SUCCESS Data transfer completed successfully.
@retval EFI_NOT_READY Data transfer completion still pending.
@retval Others Data transfer failed to complete.
**/
EFI_STATUS
SdMmcCheckDataTransfer (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb,
IN UINT16 IntStatus
)
{
UINT16 Data16;
EFI_STATUS Status;
if ((IntStatus & BIT1) != 0) {
Data16 = BIT1;
Status = SdMmcHcRwMmio (
Private->PciIo,
Trb->Slot,
SD_MMC_HC_NOR_INT_STS,
FALSE,
sizeof (Data16),
&Data16
);
return Status;
}
if ((Trb->Mode == SdMmcPioMode) && !Trb->PioModeTransferCompleted) {
Status = SdMmcTransferDataWithPio (Private, Trb, IntStatus);
if (EFI_ERROR (Status)) {
return Status;
}
}
if ((Trb->Mode == SdMmcSdmaMode) && ((IntStatus & BIT3) != 0)) {
Data16 = BIT3;
Status = SdMmcHcRwMmio (
Private->PciIo,
Trb->Slot,
SD_MMC_HC_NOR_INT_STS,
FALSE,
sizeof (Data16),
&Data16
);
if (EFI_ERROR (Status)) {
return Status;
}
Status = SdMmcUpdateSdmaAddress (Private, Trb);
if (EFI_ERROR (Status)) {
return Status;
}
}
return EFI_NOT_READY;
}
/**
Check the TRB execution result.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@retval EFI_SUCCESS The TRB is executed successfully.
@retval EFI_NOT_READY The TRB is not completed for execution.
@retval Others Some erros happen when executing this request.
**/
EFI_STATUS
SdMmcCheckTrbResult (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb
)
{
EFI_STATUS Status;
EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet;
UINT16 IntStatus;
Packet = Trb->Packet;
//
// Check Trb execution result by reading Normal Interrupt Status register.
//
Status = SdMmcHcRwMmio (
Private->PciIo,
Trb->Slot,
SD_MMC_HC_NOR_INT_STS,
TRUE,
sizeof (IntStatus),
&IntStatus
);
if (EFI_ERROR (Status)) {
goto Done;
}
//
// Check if there are any errors reported by host controller
// and if neccessary recover the controller before next command is executed.
//
Status = SdMmcCheckAndRecoverErrors (Private, Trb->Slot, IntStatus);
if (EFI_ERROR (Status)) {
goto Done;
}
//
// Tuning commands are the only ones that do not generate command
// complete interrupt. Process them here before entering the code
// that waits for command completion.
//
if (((Private->Slot[Trb->Slot].CardType == EmmcCardType) &&
(Packet->SdMmcCmdBlk->CommandIndex == EMMC_SEND_TUNING_BLOCK)) ||
((Private->Slot[Trb->Slot].CardType == SdCardType) &&
(Packet->SdMmcCmdBlk->CommandIndex == SD_SEND_TUNING_BLOCK)))
{
Status = SdMmcTransferDataWithPio (Private, Trb, IntStatus);
goto Done;
}
if (!Trb->CommandComplete) {
Status = SdMmcCheckCommandComplete (Private, Trb, IntStatus);
if (EFI_ERROR (Status)) {
goto Done;
}
}
if ((Packet->SdMmcCmdBlk->CommandType == SdMmcCommandTypeAdtc) ||
(Packet->SdMmcCmdBlk->ResponseType == SdMmcResponseTypeR1b) ||
(Packet->SdMmcCmdBlk->ResponseType == SdMmcResponseTypeR5b))
{
Status = SdMmcCheckDataTransfer (Private, Trb, IntStatus);
} else {
Status = EFI_SUCCESS;
}
Done:
if (Status != EFI_NOT_READY) {
SdMmcHcLedOnOff (Private->PciIo, Trb->Slot, FALSE);
if (EFI_ERROR (Status)) {
DEBUG ((DEBUG_ERROR, "TRB failed with %r\n", Status));
SdMmcPrintTrb (DEBUG_ERROR, Trb);
} else {
DEBUG ((DEBUG_VERBOSE, "TRB success\n"));
SdMmcPrintTrb (DEBUG_VERBOSE, Trb);
}
}
return Status;
}
/**
Wait for the TRB execution result.
@param[in] Private A pointer to the SD_MMC_HC_PRIVATE_DATA instance.
@param[in] Trb The pointer to the SD_MMC_HC_TRB instance.
@retval EFI_SUCCESS The TRB is executed successfully.
@retval Others Some erros happen when executing this request.
**/
EFI_STATUS
SdMmcWaitTrbResult (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb
)
{
EFI_STATUS Status;
EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet;
UINT64 Timeout;
BOOLEAN InfiniteWait;
Packet = Trb->Packet;
//
// Wait Command Complete Interrupt Status bit in Normal Interrupt Status Register
//
Timeout = Packet->Timeout;
if (Timeout == 0) {
InfiniteWait = TRUE;
} else {
InfiniteWait = FALSE;
}
while (InfiniteWait || (Timeout > 0)) {
//
// Check Trb execution result by reading Normal Interrupt Status register.
//
Status = SdMmcCheckTrbResult (Private, Trb);
if (Status != EFI_NOT_READY) {
return Status;
}
//
// Stall for 1 microsecond.
//
gBS->Stall (1);
Timeout--;
}
return EFI_TIMEOUT;
}
| 1 | 17,691 | @aimanrosli23 For the changes in file SdMmcPciHci.c, please make sure that you do not revert the changes made by the below commits: SHA-1: 49accdedf956f175041040e677163b7cbb746283 * MdeModulePkg/SdMmcPciHcDxe: Hook SwitchClockFreq after SD clock start SHA-1: c67617f3c677c342efde780e229f841f4e0f6c7e * MdeModulePkg/SdMmcPciHcDxe: Add retries for sync commands SHA-1: 6d387610855ee27b49cb720de29ed809ea59dadc * MdeModulePkg/SdMmcPciHcDxe: Read response on command completion SHA-1: 63fd7f389874081ea37452b1f79ac013d52755e3 * MdeModulePkg/SdMmcPciHcDxe: Do not map memory for non DMA transfer | tianocore-edk2 | c |
@@ -128,11 +128,18 @@ class SelectionAndFilterTests:
('<link href="javascript://foo" />', [webelem.Group.all,
webelem.Group.url]),
- ('<textarea />', [webelem.Group.all]),
+ ('<textarea />', [webelem.Group.all, webelem.Group.inputs]),
('<select />', [webelem.Group.all]),
('<input />', [webelem.Group.all]),
('<input type="hidden" />', []),
+ ('<input type="text" />', [webelem.Group.inputs]),
+ ('<input type="email" />', [webelem.Group.inputs]),
+ ('<input type="url" />', [webelem.Group.inputs]),
+ ('<input type="tel" />', [webelem.Group.inputs]),
+ ('<input type="number" />', [webelem.Group.inputs]),
+ ('<input type="password" />', [webelem.Group.inputs]),
+ ('<input type="search" />', [webelem.Group.inputs]),
('<button />', [webelem.Group.all]),
('<button href="foo" />', [webelem.Group.all, webelem.Group.prevnext, | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the webelement utils."""
from unittest import mock
import collections.abc
import operator
import itertools
import binascii
import os.path
import hypothesis
import hypothesis.strategies
from PyQt5.QtCore import PYQT_VERSION, QRect, QPoint
from PyQt5.QtWebKit import QWebElement
import pytest
from qutebrowser.browser import webelem
def get_webelem(geometry=None, frame=None, null=False, style=None,
display='', attributes=None, tagname=None, classes=None,
parent=None):
"""Factory for WebElementWrapper objects based on a mock.
Args:
geometry: The geometry of the QWebElement as QRect.
frame: The QWebFrame the element is in.
null: Whether the element is null or not.
style: A dict with the styleAttributes of the element.
attributes: Boolean HTML attributes to be added.
tagname: The tag name.
classes: HTML classes to be added.
"""
elem = mock.Mock()
elem.isNull.return_value = null
elem.geometry.return_value = geometry
elem.webFrame.return_value = frame
elem.tagName.return_value = tagname
elem.toOuterXml.return_value = '<fakeelem/>'
elem.toPlainText.return_value = 'text'
elem.parent.return_value = parent
attribute_dict = {}
if attributes is None:
pass
elif not isinstance(attributes, collections.abc.Mapping):
attribute_dict.update({e: None for e in attributes})
else:
attribute_dict.update(attributes)
elem.hasAttribute.side_effect = lambda k: k in attribute_dict
elem.attribute.side_effect = lambda k: attribute_dict.get(k, '')
elem.setAttribute.side_effect = (lambda k, v:
operator.setitem(attribute_dict, k, v))
elem.removeAttribute.side_effect = attribute_dict.pop
elem.attributeNames.return_value = list(attribute_dict)
if classes is not None:
elem.classes.return_value = classes.split(' ')
else:
elem.classes.return_value = []
style_dict = {'visibility': '', 'display': ''}
if style is not None:
style_dict.update(style)
def _style_property(name, strategy):
"""Helper function to act as styleProperty method."""
if strategy != QWebElement.ComputedStyle:
raise ValueError("styleProperty called with strategy != "
"ComputedStyle ({})!".format(strategy))
return style_dict[name]
elem.styleProperty.side_effect = _style_property
wrapped = webelem.WebElementWrapper(elem)
return wrapped
class SelectionAndFilterTests:
"""Generator for tests for TestSelectionsAndFilters."""
# A mapping of a HTML element to a list of groups where the selectors
# (after filtering) should match.
#
# Based on this, test cases are generated to make sure it matches those
# groups and not the others.
TESTS = [
('<foo />', []),
('<foo bar="baz"/>', []),
('<foo href="baz"/>', [webelem.Group.url]),
('<foo src="baz"/>', [webelem.Group.url]),
('<a />', [webelem.Group.all]),
('<a href="foo" />', [webelem.Group.all, webelem.Group.links,
webelem.Group.prevnext, webelem.Group.url]),
('<a href="javascript://foo" />', [webelem.Group.all,
webelem.Group.url]),
('<area />', [webelem.Group.all]),
('<area href="foo" />', [webelem.Group.all, webelem.Group.links,
webelem.Group.prevnext, webelem.Group.url]),
('<area href="javascript://foo" />', [webelem.Group.all,
webelem.Group.url]),
('<link />', [webelem.Group.all]),
('<link href="foo" />', [webelem.Group.all, webelem.Group.links,
webelem.Group.prevnext, webelem.Group.url]),
('<link href="javascript://foo" />', [webelem.Group.all,
webelem.Group.url]),
('<textarea />', [webelem.Group.all]),
('<select />', [webelem.Group.all]),
('<input />', [webelem.Group.all]),
('<input type="hidden" />', []),
('<button />', [webelem.Group.all]),
('<button href="foo" />', [webelem.Group.all, webelem.Group.prevnext,
webelem.Group.url]),
('<button href="javascript://foo" />', [webelem.Group.all,
webelem.Group.url]),
# We can't easily test <frame>/<iframe> as they vanish when setting
# them via QWebFrame::setHtml...
('<p onclick="foo" foo="bar"/>', [webelem.Group.all]),
('<p onmousedown="foo" foo="bar"/>', [webelem.Group.all]),
('<p role="option" foo="bar"/>', [webelem.Group.all]),
('<p role="button" foo="bar"/>', [webelem.Group.all]),
('<p role="button" href="bar"/>', [webelem.Group.all,
webelem.Group.prevnext,
webelem.Group.url]),
]
GROUPS = [e for e in webelem.Group if e != webelem.Group.focus]
COMBINATIONS = list(itertools.product(TESTS, GROUPS))
def __init__(self):
self.tests = list(self._generate_tests())
def _generate_tests(self):
for (val, matching_groups), group in self.COMBINATIONS:
if group in matching_groups:
yield group, val, True
else:
yield group, val, False
class TestSelectorsAndFilters:
TESTS = SelectionAndFilterTests().tests
def test_test_generator(self):
assert self.TESTS
@pytest.mark.parametrize('group, val, matching', TESTS)
def test_selectors(self, webframe, group, val, matching):
webframe.setHtml('<html><body>{}</body></html>'.format(val))
# Make sure setting HTML succeeded and there's a new element
assert len(webframe.findAllElements('*')) == 3
elems = webframe.findAllElements(webelem.SELECTORS[group])
elems = [webelem.WebElementWrapper(e) for e in elems]
filterfunc = webelem.FILTERS.get(group, lambda e: True)
elems = [e for e in elems if filterfunc(e)]
assert bool(elems) == matching
class TestWebElementWrapper:
"""Generic tests for WebElementWrapper.
Note: For some methods, there's a dedicated test class with more involved
tests.
"""
@pytest.fixture
def elem(self):
return get_webelem()
def test_nullelem(self):
"""Test __init__ with a null element."""
with pytest.raises(webelem.IsNullError):
get_webelem(null=True)
def test_double_wrap(self, elem):
"""Test wrapping a WebElementWrapper."""
with pytest.raises(TypeError) as excinfo:
webelem.WebElementWrapper(elem)
assert str(excinfo.value) == "Trying to wrap a wrapper!"
@pytest.mark.parametrize('code', [
str,
lambda e: e[None],
lambda e: operator.setitem(e, None, None),
lambda e: operator.delitem(e, None),
lambda e: None in e,
len,
lambda e: e.is_visible(None),
lambda e: e.rect_on_view(),
lambda e: e.is_writable(),
lambda e: e.is_content_editable(),
lambda e: e.is_editable(),
lambda e: e.is_text_input(),
lambda e: e.debug_text(),
list, # __iter__
])
def test_vanished(self, elem, code):
"""Make sure methods check if the element is vanished."""
elem._elem.isNull.return_value = True
with pytest.raises(webelem.IsNullError):
code(elem)
def test_str(self, elem):
assert str(elem) == 'text'
@pytest.mark.parametrize('is_null, expected', [
(False, "<qutebrowser.browser.webelem.WebElementWrapper "
"html='<fakeelem/>'>"),
(True, '<qutebrowser.browser.webelem.WebElementWrapper html=None>'),
])
def test_repr(self, elem, is_null, expected):
elem._elem.isNull.return_value = is_null
assert repr(elem) == expected
def test_getitem(self):
elem = get_webelem(attributes={'foo': 'bar'})
assert elem['foo'] == 'bar'
def test_getitem_keyerror(self, elem):
with pytest.raises(KeyError):
elem['foo'] # pylint: disable=pointless-statement
def test_setitem(self, elem):
elem['foo'] = 'bar'
assert elem._elem.attribute('foo') == 'bar'
def test_delitem(self):
elem = get_webelem(attributes={'foo': 'bar'})
del elem['foo']
assert not elem._elem.hasAttribute('foo')
def test_setitem_keyerror(self, elem):
with pytest.raises(KeyError):
del elem['foo']
def test_contains(self):
elem = get_webelem(attributes={'foo': 'bar'})
assert 'foo' in elem
assert 'bar' not in elem
@pytest.mark.parametrize('attributes, expected', [
({'one': '1', 'two': '2'}, {'one', 'two'}),
({}, set()),
])
def test_iter(self, attributes, expected):
elem = get_webelem(attributes=attributes)
assert set(elem) == expected
@pytest.mark.parametrize('attributes, length', [
({'one': '1', 'two': '2'}, 2),
({}, 0),
])
def test_len(self, attributes, length):
elem = get_webelem(attributes=attributes)
assert len(elem) == length
@pytest.mark.parametrize('attributes, writable', [
([], True),
(['disabled'], False),
(['readonly'], False),
(['disabled', 'readonly'], False),
])
def test_is_writable(self, attributes, writable):
elem = get_webelem(attributes=attributes)
assert elem.is_writable() == writable
@pytest.mark.parametrize('attributes, expected', [
({}, False),
({'contenteditable': 'false'}, False),
({'contenteditable': 'inherit'}, False),
({'contenteditable': 'true'}, True),
])
def test_is_content_editable(self, attributes, expected):
elem = get_webelem(attributes=attributes)
assert elem.is_content_editable() == expected
@pytest.mark.parametrize('tagname, attributes, expected', [
('input', {}, True),
('textarea', {}, True),
('select', {}, False),
('foo', {'role': 'combobox'}, True),
('foo', {'role': 'textbox'}, True),
('foo', {'role': 'bar'}, False),
('input', {'role': 'bar'}, True),
])
def test_is_text_input(self, tagname, attributes, expected):
elem = get_webelem(tagname=tagname, attributes=attributes)
assert elem.is_text_input() == expected
@pytest.mark.parametrize('xml, expected', [
('<fakeelem/>', '<fakeelem/>'),
('<foo>\n<bar/>\n</foo>', '<foo><bar/></foo>'),
('<foo>{}</foo>'.format('x' * 500), '<foo>{}…'.format('x' * 494)),
], ids=['fakeelem', 'newlines', 'long'])
def test_debug_text(self, elem, xml, expected):
elem._elem.toOuterXml.return_value = xml
assert elem.debug_text() == expected
class TestRemoveBlankTarget:
@pytest.mark.parametrize('tagname', ['a', 'area'])
@pytest.mark.parametrize('target', ['_self', '_parent', '_top', ''])
def test_keep_target(self, tagname, target):
elem = get_webelem(tagname=tagname, attributes={'target': target})
elem.remove_blank_target()
assert elem['target'] == target
@pytest.mark.parametrize('tagname', ['a', 'area'])
def test_no_target(self, tagname):
elem = get_webelem(tagname=tagname)
elem.remove_blank_target()
assert 'target' not in elem
@pytest.mark.parametrize('tagname', ['a', 'area'])
def test_blank_target(self, tagname):
elem = get_webelem(tagname=tagname, attributes={'target': '_blank'})
elem.remove_blank_target()
assert elem['target'] == '_top'
@pytest.mark.parametrize('tagname', ['a', 'area'])
def test_ancestor_blank_target(self, tagname):
elem = get_webelem(tagname=tagname, attributes={'target': '_blank'})
elem_child = get_webelem(tagname='img', parent=elem._elem)
elem_child._elem.encloseWith(elem._elem)
elem_child.remove_blank_target()
assert elem['target'] == '_top'
@pytest.mark.parametrize('depth', [1, 5, 10])
def test_no_link(self, depth):
elem = [None] * depth
elem[0] = get_webelem(tagname='div')
for i in range(1, depth):
elem[i] = get_webelem(tagname='div', parent=elem[i-1])
elem[i]._elem.encloseWith(elem[i-1]._elem)
elem[-1].remove_blank_target()
for i in range(depth):
assert 'target' not in elem[i]
class TestIsVisible:
@pytest.fixture
def frame(self, stubs):
return stubs.FakeWebFrame(QRect(0, 0, 100, 100))
def test_invalid_frame_geometry(self, stubs):
"""Test with an invalid frame geometry."""
rect = QRect(0, 0, 0, 0)
assert not rect.isValid()
frame = stubs.FakeWebFrame(rect)
elem = get_webelem(QRect(0, 0, 10, 10), frame)
assert not elem.is_visible(frame)
def test_invalid_invisible(self, frame):
"""Test elements with an invalid geometry which are invisible."""
elem = get_webelem(QRect(0, 0, 0, 0), frame)
assert not elem.geometry().isValid()
assert elem.geometry().x() == 0
assert not elem.is_visible(frame)
def test_invalid_visible(self, frame):
"""Test elements with an invalid geometry which are visible.
This seems to happen sometimes in the real world, with real elements
which *are* visible, but don't have a valid geometry.
"""
elem = get_webelem(QRect(10, 10, 0, 0), frame)
assert not elem.geometry().isValid()
assert elem.is_visible(frame)
@pytest.mark.parametrize('geometry, visible', [
(QRect(5, 5, 4, 4), False),
(QRect(10, 10, 1, 1), True),
])
def test_scrolled(self, geometry, visible, stubs):
scrolled_frame = stubs.FakeWebFrame(QRect(0, 0, 100, 100),
scroll=QPoint(10, 10))
elem = get_webelem(geometry, scrolled_frame)
assert elem.is_visible(scrolled_frame) == visible
@pytest.mark.parametrize('style, visible', [
({'visibility': 'visible'}, True),
({'visibility': 'hidden'}, False),
({'display': 'inline'}, True),
({'display': 'none'}, False),
({'visibility': 'visible', 'display': 'none'}, False),
({'visibility': 'hidden', 'display': 'inline'}, False),
])
def test_css_attributes(self, frame, style, visible):
elem = get_webelem(QRect(0, 0, 10, 10), frame, style=style)
assert elem.is_visible(frame) == visible
class TestIsVisibleIframe:
"""Tests for is_visible with a child frame.
Attributes:
frame: The FakeWebFrame we're using to test.
iframe: The iframe inside frame.
elem1-elem4: FakeWebElements to test.
"""
Objects = collections.namedtuple('Objects', ['frame', 'iframe', 'elems'])
@pytest.fixture
def objects(self, stubs):
"""Set up the following base situation.
0, 0 300, 0
##############################
# #
0,10 # iframe 100,10 #
#********** #
#*e * elems[0]: 0, 0 in iframe (visible)
#* * #
#* e * elems[1]: 20,90 in iframe (visible)
#********** #
0,110 #. .100,110 #
#. . #
#. e . elems[2]: 20,150 in iframe (not visible)
#.......... #
# e elems[3]: 30, 180 in main frame (visible)
# #
# frame #
##############################
300, 0 300, 300
Returns an Objects namedtuple with frame/iframe/elems attributes.
"""
frame = stubs.FakeWebFrame(QRect(0, 0, 300, 300))
iframe = stubs.FakeWebFrame(QRect(0, 10, 100, 100), parent=frame)
assert frame.geometry().contains(iframe.geometry())
elems = [
get_webelem(QRect(0, 0, 10, 10), iframe),
get_webelem(QRect(20, 90, 10, 10), iframe),
get_webelem(QRect(20, 150, 10, 10), iframe),
get_webelem(QRect(30, 180, 10, 10), frame),
]
assert elems[0].is_visible(frame)
assert elems[1].is_visible(frame)
assert not elems[2].is_visible(frame)
assert elems[3].is_visible(frame)
return self.Objects(frame=frame, iframe=iframe, elems=elems)
def test_iframe_scrolled(self, objects):
"""Scroll iframe down so elem3 gets visible and elem1/elem2 not."""
objects.iframe.scrollPosition.return_value = QPoint(0, 100)
assert not objects.elems[0].is_visible(objects.frame)
assert not objects.elems[1].is_visible(objects.frame)
assert objects.elems[2].is_visible(objects.frame)
assert objects.elems[3].is_visible(objects.frame)
def test_mainframe_scrolled_iframe_visible(self, objects):
"""Scroll mainframe down so iframe is partly visible but elem1 not."""
objects.frame.scrollPosition.return_value = QPoint(0, 50)
geom = objects.frame.geometry().translated(
objects.frame.scrollPosition())
assert not geom.contains(objects.iframe.geometry())
assert geom.intersects(objects.iframe.geometry())
assert not objects.elems[0].is_visible(objects.frame)
assert objects.elems[1].is_visible(objects.frame)
assert not objects.elems[2].is_visible(objects.frame)
assert objects.elems[3].is_visible(objects.frame)
def test_mainframe_scrolled_iframe_invisible(self, objects):
"""Scroll mainframe down so iframe is invisible."""
objects.frame.scrollPosition.return_value = QPoint(0, 110)
geom = objects.frame.geometry().translated(
objects.frame.scrollPosition())
assert not geom.contains(objects.iframe.geometry())
assert not geom.intersects(objects.iframe.geometry())
assert not objects.elems[0].is_visible(objects.frame)
assert not objects.elems[1].is_visible(objects.frame)
assert not objects.elems[2].is_visible(objects.frame)
assert objects.elems[3].is_visible(objects.frame)
@pytest.fixture
def invalid_objects(self, stubs):
"""Set up the following base situation.
0, 0 300, 0
##############################
# #
0,10 # iframe 100,10 #
#********** #
#* e * elems[0]: 10, 10 in iframe (visible)
#* * #
#* * #
#********** #
0,110 #. .100,110 #
#. . #
#. e . elems[2]: 20,150 in iframe (not visible)
#.......... #
##############################
300, 0 300, 300
Returns an Objects namedtuple with frame/iframe/elems attributes.
"""
frame = stubs.FakeWebFrame(QRect(0, 0, 300, 300))
iframe = stubs.FakeWebFrame(QRect(0, 10, 100, 100), parent=frame)
assert frame.geometry().contains(iframe.geometry())
elems = [
get_webelem(QRect(10, 10, 0, 0), iframe),
get_webelem(QRect(20, 150, 0, 0), iframe),
]
for e in elems:
assert not e.geometry().isValid()
return self.Objects(frame=frame, iframe=iframe, elems=elems)
def test_invalid_visible(self, invalid_objects):
"""Test elements with an invalid geometry which are visible.
This seems to happen sometimes in the real world, with real elements
which *are* visible, but don't have a valid geometry.
"""
elem = invalid_objects.elems[0]
assert elem.is_visible(invalid_objects.frame)
def test_invalid_invisible(self, invalid_objects):
"""Test elements with an invalid geometry which are invisible."""
assert not invalid_objects.elems[1].is_visible(invalid_objects.frame)
def test_focus_element(stubs):
"""Test getting focus element with a fake frame/element.
Testing this with a real webpage is almost impossible because the window
and the element would have focus, which is hard to achieve consistently in
a test.
"""
frame = stubs.FakeWebFrame(QRect(0, 0, 100, 100))
elem = get_webelem()
frame.focus_elem = elem._elem
assert webelem.focus_elem(frame)._elem is elem._elem
class TestRectOnView:
def test_simple(self, stubs):
geometry = QRect(5, 5, 4, 4)
frame = stubs.FakeWebFrame(QRect(0, 0, 100, 100))
elem = get_webelem(geometry, frame)
assert elem.rect_on_view() == QRect(5, 5, 4, 4)
def test_scrolled(self, stubs):
geometry = QRect(20, 20, 4, 4)
frame = stubs.FakeWebFrame(QRect(0, 0, 100, 100),
scroll=QPoint(10, 10))
elem = get_webelem(geometry, frame)
assert elem.rect_on_view() == QRect(20 - 10, 20 - 10, 4, 4)
def test_iframe(self, stubs):
"""Test an element in an iframe.
0, 0 200, 0
##############################
# #
0,10 # iframe 100,10 #
#********** #
#* * #
#* * #
#* e * elem: 20,90 in iframe
#********** #
0,100 # #
##############################
200, 0 200, 200
"""
frame = stubs.FakeWebFrame(QRect(0, 0, 200, 200))
iframe = stubs.FakeWebFrame(QRect(0, 10, 100, 100), parent=frame)
assert frame.geometry().contains(iframe.geometry())
elem = get_webelem(QRect(20, 90, 10, 10), iframe)
assert elem.rect_on_view() == QRect(20, 10 + 90, 10, 10)
def test_passed_geometry(self, stubs):
"""Make sure geometry isn't called when a geometry is passed."""
raw_elem = get_webelem()._elem
rect = QRect(10, 20, 30, 40)
assert webelem.rect_on_view(raw_elem, rect) == rect
assert not raw_elem.geometry.called
class TestJavascriptEscape:
TESTS = {
'foo\\bar': r'foo\\bar',
'foo\nbar': r'foo\nbar',
'foo\rbar': r'foo\rbar',
"foo'bar": r"foo\'bar",
'foo"bar': r'foo\"bar',
'one\\two\rthree\nfour\'five"six': r'one\\two\rthree\nfour\'five\"six',
'\x00': r'\x00',
'hellö': 'hellö',
'☃': '☃',
'\x80Ā': '\x80Ā',
'𐀀\x00𐀀\x00': r'𐀀\x00𐀀\x00',
'𐀀\ufeff': r'𐀀\ufeff',
'\ufeff': r'\ufeff',
# http://stackoverflow.com/questions/2965293/
'\u2028': r'\u2028',
'\u2029': r'\u2029',
}
# Once there was this warning here:
# load glyph failed err=6 face=0x2680ba0, glyph=1912
# http://qutebrowser.org:8010/builders/debian-jessie/builds/765/steps/unittests/
# Should that be ignored?
@pytest.mark.parametrize('before, after', sorted(TESTS.items()), ids=repr)
def test_fake_escape(self, before, after):
"""Test javascript escaping with some expected outcomes."""
assert webelem.javascript_escape(before) == after
def _test_escape(self, text, qtbot, webframe):
"""Helper function for test_real_escape*."""
try:
self._test_escape_simple(text, webframe)
except AssertionError:
# Try another method if the simple method failed.
#
# See _test_escape_hexlified documentation on why this is
# necessary.
self._test_escape_hexlified(text, qtbot, webframe)
def _test_escape_hexlified(self, text, qtbot, webframe):
"""Test conversion by hexlifying in javascript.
Since the conversion of QStrings to Python strings is broken in some
older PyQt versions in some corner cases, we load a HTML file which
generates an MD5 of the escaped text and use that for comparisons.
"""
escaped = webelem.javascript_escape(text)
path = os.path.join(os.path.dirname(__file__),
'test_webelem_jsescape.html')
with open(path, encoding='utf-8') as f:
html_source = f.read().replace('%INPUT%', escaped)
with qtbot.waitSignal(webframe.loadFinished) as blocker:
webframe.setHtml(html_source)
assert blocker.args == [True]
result = webframe.evaluateJavaScript('window.qute_test_result')
assert result is not None
assert '|' in result
result_md5, result_text = result.split('|', maxsplit=1)
text_md5 = binascii.hexlify(text.encode('utf-8')).decode('ascii')
assert result_md5 == text_md5, result_text
def _test_escape_simple(self, text, webframe):
"""Test conversion by using evaluateJavaScript."""
escaped = webelem.javascript_escape(text)
result = webframe.evaluateJavaScript('"{}";'.format(escaped))
assert result == text
@pytest.mark.parametrize('text', sorted(TESTS), ids=repr)
def test_real_escape(self, webframe, qtbot, text):
"""Test javascript escaping with a real QWebPage."""
self._test_escape(text, qtbot, webframe)
@pytest.mark.qt_log_ignore('^load glyph failed',
'^OpenType support missing for script',
extend=True)
@hypothesis.given(hypothesis.strategies.text())
def test_real_escape_hypothesis(self, webframe, qtbot, text):
"""Test javascript escaping with a real QWebPage and hypothesis."""
# We can't simply use self._test_escape because of this:
# https://github.com/pytest-dev/pytest-qt/issues/69
# self._test_escape(text, qtbot, webframe)
try:
self._test_escape_simple(text, webframe)
except AssertionError:
if PYQT_VERSION >= 0x050300:
self._test_escape_hexlified(text, qtbot, webframe)
class TestGetChildFrames:
"""Check get_child_frames."""
def test_single_frame(self, stubs):
"""Test get_child_frames with a single frame without children."""
frame = stubs.FakeChildrenFrame()
children = webelem.get_child_frames(frame)
assert len(children) == 1
assert children[0] is frame
frame.childFrames.assert_called_once_with()
def test_one_level(self, stubs):
r"""Test get_child_frames with one level of children.
o parent
/ \
child1 o o child2
"""
child1 = stubs.FakeChildrenFrame()
child2 = stubs.FakeChildrenFrame()
parent = stubs.FakeChildrenFrame([child1, child2])
children = webelem.get_child_frames(parent)
assert len(children) == 3
assert children[0] is parent
assert children[1] is child1
assert children[2] is child2
parent.childFrames.assert_called_once_with()
child1.childFrames.assert_called_once_with()
child2.childFrames.assert_called_once_with()
def test_multiple_levels(self, stubs):
r"""Test get_child_frames with multiple levels of children.
o root
/ \
o o first
/\ /\
o o o o second
"""
second = [stubs.FakeChildrenFrame() for _ in range(4)]
first = [stubs.FakeChildrenFrame(second[0:2]),
stubs.FakeChildrenFrame(second[2:4])]
root = stubs.FakeChildrenFrame(first)
children = webelem.get_child_frames(root)
assert len(children) == 7
assert children[0] is root
for frame in [root] + first + second:
frame.childFrames.assert_called_once_with()
class TestIsEditable:
"""Tests for is_editable."""
@pytest.fixture
def stubbed_config(self, config_stub, monkeypatch):
"""Fixture to create a config stub with an input section."""
config_stub.data = {'input': {}}
monkeypatch.setattr('qutebrowser.browser.webelem.config', config_stub)
return config_stub
@pytest.mark.parametrize('tagname, attributes, editable', [
('input', {}, True),
('input', {'type': 'text'}, True),
('INPUT', {'TYPE': 'TEXT'}, True), # caps attributes/name
('input', {'type': 'email'}, True),
('input', {'type': 'url'}, True),
('input', {'type': 'tel'}, True),
('input', {'type': 'number'}, True),
('input', {'type': 'password'}, True),
('input', {'type': 'search'}, True),
('textarea', {}, True),
('input', {'type': 'button'}, False),
('input', {'type': 'checkbox'}, False),
('select', {}, False),
('input', {'disabled': None}, False),
('input', {'readonly': None}, False),
('textarea', {'disabled': None}, False),
('textarea', {'readonly': None}, False),
('foobar', {}, False),
('foobar', {'contenteditable': 'true'}, True),
('foobar', {'contenteditable': 'false'}, False),
('foobar', {'contenteditable': 'true', 'disabled': None}, False),
('foobar', {'contenteditable': 'true', 'readonly': None}, False),
('foobar', {'role': 'foobar'}, False),
('foobar', {'role': 'combobox'}, True),
('foobar', {'role': 'textbox'}, True),
('foobar', {'role': 'combobox', 'disabled': None}, False),
('foobar', {'role': 'combobox', 'readonly': None}, False),
])
def test_is_editable(self, tagname, attributes, editable):
elem = get_webelem(tagname=tagname, attributes=attributes)
assert elem.is_editable() == editable
@pytest.mark.parametrize('classes, editable', [
(None, False),
('foo-kix-bar', False),
('foo kix-foo', True),
('KIX-FOO', False),
('foo CodeMirror-foo', True),
])
def test_is_editable_div(self, classes, editable):
elem = get_webelem(tagname='div', classes=classes)
assert elem.is_editable() == editable
@pytest.mark.parametrize('setting, tagname, attributes, editable', [
(True, 'embed', {}, True),
(True, 'embed', {}, True),
(False, 'applet', {}, False),
(False, 'applet', {}, False),
(True, 'object', {'type': 'application/foo'}, True),
(False, 'object', {'type': 'application/foo'}, False),
(True, 'object', {'type': 'foo', 'classid': 'foo'}, True),
(False, 'object', {'type': 'foo', 'classid': 'foo'}, False),
(True, 'object', {}, False),
(True, 'object', {'type': 'image/gif'}, False),
])
def test_is_editable_plugin(self, stubbed_config, setting, tagname,
attributes, editable):
stubbed_config.data['input']['insert-mode-on-plugins'] = setting
elem = get_webelem(tagname=tagname, attributes=attributes)
assert elem.is_editable() == editable
| 1 | 14,770 | You'll also need to add `webelem.Group.all` everywhere as that matches as well | qutebrowser-qutebrowser | py |
@@ -0,0 +1,14 @@
+$(document).ready(initializeDocument);
+
+function initializeDocument() {
+ //only enable bank account field when BA80 is selected as an expense_type
+ $("input:radio[name='ncr_proposal[expense_type]']").click(function(event){
+ if ($("input:radio[name='ncr_proposal[expense_type]']:checked").val() == 'BA80') {
+ $('#ncr_proposal_RWA_number').attr('disabled', false);
+ } else {
+ $('#ncr_proposal_RWA_number').attr('disabled', true);
+ }
+ });
+
+ $('#ncr_proposal_building_number').chosen({placeholder_text_multiple: 'If applicable, which building will the charge support?'})
+} | 1 | 1 | 12,271 | Does this need to be an ERB template? | 18F-C2 | rb |
|
@@ -0,0 +1,18 @@
+package feedbackmock
+
+import (
+ "context"
+
+ feedbackv1 "github.com/lyft/clutch/backend/api/feedback/v1"
+ "github.com/lyft/clutch/backend/service/feedback"
+)
+
+type svc struct{}
+
+func (s svc) SubmitFeedback(ctx context.Context, id string, feedback *feedbackv1.Feedback, metadata *feedbackv1.FeedbackMetadata) error {
+ return nil
+}
+
+func New() feedback.Service {
+ return &svc{}
+} | 1 | 1 | 12,158 | do we want to register this in the mock server for testing? | lyft-clutch | go |
|
@@ -231,10 +231,10 @@ class AnomalyLikelihoodTest(TestCaseBase):
# Generate an estimate using fake distribution of anomaly scores.
data1 = _generateSampleData(mean=0.2)
- data1 = data1 + [(2, 2), (2, 2, 2, 2), (), (2)] # Malformed records
+ data1 = data1[0:1000] + [(2, 2), (2, 2, 2, 2), (), (2)] # Malformed records
likelihoods, avgRecordList, estimatorParams = (
- an.estimateAnomalyLikelihoods(data1[0:1000])
+ an.estimateAnomalyLikelihoods(data1[0:1004])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000) | 1 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for anomaly likelihood module."""
import copy
import datetime
import math
import numpy
import unittest2 as unittest
from nupic.algorithms import anomaly_likelihood as an
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
def _sampleDistribution(params, numSamples, verbosity=0):
"""
Given the parameters of a distribution, generate numSamples points from it.
This routine is mostly for testing.
:returns: A numpy array of samples.
"""
if params.has_key("name"):
if params["name"] == "normal":
samples = numpy.random.normal(loc=params["mean"],
scale=math.sqrt(params["variance"]),
size=numSamples)
elif params["name"] == "pareto":
samples = numpy.random.pareto(params["alpha"], size=numSamples)
elif params["name"] == "beta":
samples = numpy.random.beta(a=params["alpha"], b=params["beta"],
size=numSamples)
else:
raise ValueError("Undefined distribution: " + params["name"])
else:
raise ValueError("Bad distribution params: " + str(params))
if verbosity > 0:
print "\nSampling from distribution:", params
print "After estimation, mean=", numpy.mean(samples), \
"var=", numpy.var(samples), "stdev=", math.sqrt(numpy.var(samples))
return samples
def _generateSampleData(mean=0.2, variance=0.2, metricMean=0.2,
metricVariance=0.2):
"""
Generate 1440 samples of fake metrics data with a particular distribution
of anomaly scores and metric values. Here we generate values every minute.
"""
data = []
p = {"mean": mean,
"name": "normal",
"stdev": math.sqrt(variance),
"variance": variance}
samples = _sampleDistribution(p, 1440)
p = {"mean": metricMean,
"name": "normal",
"stdev": math.sqrt(metricVariance),
"variance": metricVariance}
metricValues = _sampleDistribution(p, 1440)
for hour in range(0, 24):
for minute in range(0, 60):
data.append(
[
datetime.datetime(2013, 2, 2, hour, minute, 0),
metricValues[hour * 60 + minute],
samples[hour * 60 + minute],
]
)
return data
class AnomalyLikelihoodTest(TestCaseBase):
def assertWithinEpsilon(self, a, b, epsilon=0.001):
self.assertLessEqual(abs(a - b), epsilon,
"Values %g and %g are not within %g" % (a, b, epsilon))
def testNormalProbability(self):
"""
Test that the normalProbability function returns correct normal values
"""
# Test a standard normal distribution
# Values taken from http://en.wikipedia.org/wiki/Standard_normal_table
p = {"name": "normal", "mean": 0.0, "variance": 1.0, "stdev": 1.0}
self.assertWithinEpsilon(an.normalProbability(0.0, p), 0.5)
self.assertWithinEpsilon(an.normalProbability(0.3, p), 0.3820885780)
self.assertWithinEpsilon(an.normalProbability(1.0, p), 0.1587)
self.assertWithinEpsilon(1.0 - an.normalProbability(1.0, p),
an.normalProbability(-1.0, p))
self.assertWithinEpsilon(an.normalProbability(-0.3, p),
1.0 - an.normalProbability(0.3, p))
# Non standard normal distribution
p = {"name": "normal", "mean": 1.0, "variance": 4.0, "stdev": 2.0}
self.assertWithinEpsilon(an.normalProbability(1.0, p), 0.5)
self.assertWithinEpsilon(an.normalProbability(2.0, p), 0.3085)
self.assertWithinEpsilon(an.normalProbability(3.0, p), 0.1587)
self.assertWithinEpsilon(an.normalProbability(3.0, p),
1.0 - an.normalProbability(-1.0, p))
self.assertWithinEpsilon(an.normalProbability(0.0, p),
1.0 - an.normalProbability(2.0, p))
# Non standard normal distribution
p = {"name": "normal", "mean": -2.0, "variance": 0.5,
"stdev": math.sqrt(0.5)}
self.assertWithinEpsilon(an.normalProbability(-2.0, p), 0.5)
self.assertWithinEpsilon(an.normalProbability(-1.5, p), 0.241963652)
self.assertWithinEpsilon(an.normalProbability(-2.5, p),
1.0 - an.normalProbability(-1.5, p))
def testEstimateNormal(self):
"""
This passes in a known set of data and ensures the estimateNormal
function returns the expected results.
"""
# 100 samples drawn from mean=0.4, stdev = 0.5
samples = numpy.array(
[0.32259025, -0.44936321, -0.15784842, 0.72142628, 0.8794327,
0.06323451, -0.15336159, -0.02261703, 0.04806841, 0.47219226,
0.31102718, 0.57608799, 0.13621071, 0.92446815, 0.1870912,
0.46366935, -0.11359237, 0.66582357, 1.20613048, -0.17735134,
0.20709358, 0.74508479, 0.12450686, -0.15468728, 0.3982757,
0.87924349, 0.86104855, 0.23688469, -0.26018254, 0.10909429,
0.65627481, 0.39238532, 0.77150761, 0.47040352, 0.9676175,
0.42148897, 0.0967786, -0.0087355, 0.84427985, 1.46526018,
1.19214798, 0.16034816, 0.81105554, 0.39150407, 0.93609919,
0.13992161, 0.6494196, 0.83666217, 0.37845278, 0.0368279,
-0.10201944, 0.41144746, 0.28341277, 0.36759426, 0.90439446,
0.05669459, -0.11220214, 0.34616676, 0.49898439, -0.23846184,
1.06400524, 0.72202135, -0.2169164, 1.136582, -0.69576865,
0.48603271, 0.72781008, -0.04749299, 0.15469311, 0.52942518,
0.24816816, 0.3483905, 0.7284215, 0.93774676, 0.07286373,
1.6831539, 0.3851082, 0.0637406, -0.92332861, -0.02066161,
0.93709862, 0.82114131, 0.98631562, 0.05601529, 0.72214694,
0.09667526, 0.3857222, 0.50313998, 0.40775344, -0.69624046,
-0.4448494, 0.99403206, 0.51639049, 0.13951548, 0.23458214,
1.00712699, 0.40939048, -0.06436434, -0.02753677, -0.23017904])
params = an.estimateNormal(samples)
self.assertWithinEpsilon(params["mean"], 0.3721)
self.assertWithinEpsilon(params["variance"], 0.22294)
self.assertWithinEpsilon(params["stdev"], 0.47216)
self.assertEqual(params["name"], "normal")
def testSampleDistribution(self):
"""
Test that sampleDistribution from a generated distribution returns roughly
the same parameters.
"""
# 1000 samples drawn from mean=0.4, stdev = 0.1
p = {"mean": 0.5,
"name": "normal",
"stdev": math.sqrt(0.1),
"variance": 0.1}
samples = _sampleDistribution(p, 1000)
# Ensure estimate is reasonable
np = an.estimateNormal(samples)
self.assertWithinEpsilon(p["mean"], np["mean"], 0.1)
self.assertWithinEpsilon(p["variance"], np["variance"], 0.1)
self.assertWithinEpsilon(p["stdev"], np["stdev"], 0.1)
self.assertTrue(np["name"], "normal")
def testEstimateAnomalyLikelihoods(self):
"""
This calls estimateAnomalyLikelihoods to estimate the distribution on fake
data and validates the results
"""
# Generate an estimate using fake distribution of anomaly scores.
data1 = _generateSampleData(mean=0.2)
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1000])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Check that the sum is correct
avgParams = estimatorParams["movingAverage"]
total = 0
for v in avgRecordList:
total = total + v[2]
self.assertTrue(avgParams["total"], total)
# Check that the estimated mean is correct
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"],
total / float(len(avgRecordList)))
# Number of points with lower than 2% probability should be pretty low
# but not zero. Can't use exact 2% here due to random variations
self.assertLessEqual(numpy.sum(likelihoods < 0.02), 50)
self.assertGreaterEqual(numpy.sum(likelihoods < 0.02), 1)
def testEstimateAnomalyLikelihoodsMalformedRecords(self):
"""
This calls estimateAnomalyLikelihoods with malformed records, which should
be quietly skipped.
"""
# Generate an estimate using fake distribution of anomaly scores.
data1 = _generateSampleData(mean=0.2)
data1 = data1 + [(2, 2), (2, 2, 2, 2), (), (2)] # Malformed records
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1000])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Check that the sum is correct
avgParams = estimatorParams["movingAverage"]
total = 0
for v in avgRecordList:
total = total + v[2]
self.assertTrue(avgParams["total"], total)
# Check that the estimated mean is correct
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"],
total / float(len(avgRecordList)))
def testSkipRecords(self):
"""
This calls estimateAnomalyLikelihoods with various values of skipRecords
"""
# Check happy path
data1 = _generateSampleData(mean=0.1)[0:200]
data1 = data1 + (_generateSampleData(mean=0.9)[0:200])
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, skipRecords=200)
)
# Check results are correct, i.e. we are actually skipping the first 50
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"], 0.9, epsilon=0.1)
# Check case where skipRecords > num records
# In this case a null distribution should be returned which makes all
# the likelihoods reasonably high
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, skipRecords=500)
)
self.assertEqual(len(likelihoods), len(data1))
self.assertTrue(likelihoods.sum() >= 0.3 * len(likelihoods))
# Check the case where skipRecords == num records
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, skipRecords=len(data1))
)
self.assertEqual(len(likelihoods), len(data1))
self.assertTrue(likelihoods.sum() >= 0.3 * len(likelihoods))
def testUpdateAnomalyLikelihoods(self):
"""
A slight more complex test. This calls estimateAnomalyLikelihoods
to estimate the distribution on fake data, followed by several calls
to updateAnomalyLikelihoods.
"""
#------------------------------------------
# Step 1. Generate an initial estimate using fake distribution of anomaly
# scores.
data1 = _generateSampleData(mean=0.2)[0:1000]
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, averagingWindow=5)
)
#------------------------------------------
# Step 2. Generate some new data with a higher average anomaly
# score. Using the estimator from step 1, to compute likelihoods. Now we
# should see a lot more anomalies.
data2 = _generateSampleData(mean=0.6)[0:300]
likelihoods2, avgRecordList2, estimatorParams2 = (
an.updateAnomalyLikelihoods(data2, estimatorParams)
)
self.assertEqual(len(likelihoods2), len(data2))
self.assertEqual(len(avgRecordList2), len(data2))
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# The new running total should be different
self.assertNotEqual(estimatorParams2["movingAverage"]["total"],
estimatorParams["movingAverage"]["total"])
# We should have many more samples where likelihood is < 0.01, but not all
self.assertGreaterEqual(numpy.sum(likelihoods2 < 0.01), 25)
self.assertLessEqual(numpy.sum(likelihoods2 < 0.01), 250)
#------------------------------------------
# Step 3. Generate some new data with the expected average anomaly score. We
# should see fewer anomalies than in Step 2.
data3 = _generateSampleData(mean=0.2)[0:1000]
likelihoods3, avgRecordList3, estimatorParams3 = (
an.updateAnomalyLikelihoods(data3, estimatorParams2)
)
self.assertEqual(len(likelihoods3), len(data3))
self.assertEqual(len(avgRecordList3), len(data3))
self.assertTrue(an.isValidEstimatorParams(estimatorParams3))
# The new running total should be different
self.assertNotEqual(estimatorParams3["movingAverage"]["total"],
estimatorParams["movingAverage"]["total"])
self.assertNotEqual(estimatorParams3["movingAverage"]["total"],
estimatorParams2["movingAverage"]["total"])
# We should have a small number samples where likelihood is < 0.02, but at
# least one
self.assertGreaterEqual(numpy.sum(likelihoods3 < 0.01), 1)
self.assertLessEqual(numpy.sum(likelihoods3 < 0.01), 100)
#------------------------------------------
# Step 4. Validate that sending data incrementally is the same as sending
# in one batch
allData = data1
allData.extend(data2)
allData.extend(data3)
# Compute moving average of all the data and check it's the same
_, historicalValuesAll, totalAll = (
an._anomalyScoreMovingAverage(allData, windowSize=5)
)
self.assertEqual(sum(historicalValuesAll),
sum(estimatorParams3["movingAverage"]["historicalValues"]))
self.assertEqual(totalAll,
estimatorParams3["movingAverage"]["total"])
def testFlatAnomalyScores(self):
"""
This calls estimateAnomalyLikelihoods with flat distributions and
ensures things don't crash.
"""
# Generate an estimate using fake distribution of anomaly scores.
data1 = _generateSampleData(mean=42.0, variance=1e-10)
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1000])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
## Check that the estimated mean is correct
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"], data1[0][2])
# If you deviate from the mean, you should get probability 0
# Test this by sending in just slightly different values.
data2 = _generateSampleData(mean=42.5, variance=1e-10)
likelihoods2, _, _ = (
an.updateAnomalyLikelihoods(data2[0:10], estimatorParams)
)
# The likelihoods should go to zero very quickly
self.assertLessEqual(likelihoods2.sum(), 0.01)
# Test edge case where anomaly scores are very close to 0
# In this case we don't let likelihood to get too low. An average
# anomaly score of 0.1 should be essentially zero, but an average
# of 0.04 should be higher
data3 = _generateSampleData(mean=0.01, variance=1e-6)
_, _, estimatorParams3 = (
an.estimateAnomalyLikelihoods(data3[0:1000])
)
data4 = _generateSampleData(mean=0.1, variance=1e-6)
likelihoods4, _, estimatorParams4 = (
an.updateAnomalyLikelihoods(data4[0:20], estimatorParams3)
)
# Average of 0.1 should go to zero
self.assertLessEqual(likelihoods4[10:].mean(), 0.002)
data5 = _generateSampleData(mean=0.05, variance=1e-6)
likelihoods5, _, _ = (
an.updateAnomalyLikelihoods(data5[0:20], estimatorParams4)
)
# The likelihoods should be low but not near zero
self.assertLessEqual(likelihoods5[10:].mean(), 0.28)
self.assertGreater(likelihoods5[10:].mean(), 0.015)
def testFlatMetricScores(self):
"""
This calls estimateAnomalyLikelihoods with flat metric values. In this case
we should use the null distribution, which gets reasonably high likelihood
for everything.
"""
# Generate samples with very flat metric values
data1 = _generateSampleData(
metricMean=42.0, metricVariance=1e-10)[0:1000]
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1)
)
# Check that we do indeed get reasonable likelihood values
self.assertEqual(len(likelihoods), len(data1))
self.assertTrue(likelihoods.sum() >= 0.4 * len(likelihoods))
# Check that we do indeed get null distribution
self.assertDictEqual(estimatorParams["distribution"], an.nullDistribution())
def testVeryFewScores(self):
"""
This calls estimateAnomalyLikelihoods and updateAnomalyLikelihoods
with one or no scores.
"""
# Generate an estimate using two data points
data1 = _generateSampleData(mean=42.0, variance=1e-10)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:2])
)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Check that the estimated mean is that value
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"], data1[0][2])
# Can't generate an estimate using no data points
data1 = numpy.zeros(0)
with self.assertRaises(ValueError):
an.estimateAnomalyLikelihoods(data1)
# Can't update with no scores
with self.assertRaises(ValueError):
an.updateAnomalyLikelihoods(data1, estimatorParams)
def testBadParams(self):
"""
Calls updateAnomalyLikelihoods with bad params.
"""
# Generate an estimate using one data point
data1 = _generateSampleData(mean=42.0, variance=1e-10)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1])
)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Can't pass in a bad params structure
with self.assertRaises(ValueError):
an.updateAnomalyLikelihoods(data1, {"haha": "heehee"})
# Can't pass in something not a dict
with self.assertRaises(ValueError):
an.updateAnomalyLikelihoods(data1, 42.0)
def testFilterLikelihodsInputType(self):
"""
Calls _filterLikelihoods with both input types -- numpy array of floats and
list of floats.
"""
l =[0.0, 0.0, 0.3, 0.3, 0.5]
l2 = an._filterLikelihoods(l)
n = numpy.array(l)
n2 = an._filterLikelihoods(n)
filtered = [0.0, 0.001, 0.3, 0.3, 0.5]
[self.assertAlmostEqual(l2[i], filtered[i],
msg="Input of type list returns incorrect result")
for i in range(len(l))]
[self.assertAlmostEqual(n2[i], filtered[i],
msg="Input of type numpy array returns incorrect result")
for i in range(len(n))]
def testFilterLikelihoods(self):
"""
Tests _filterLikelihoods function for several cases:
i. Likelihood goes straight to redzone, skipping over yellowzone, repeats
ii. Case (i) with different values, and numpy array instead of float list
iii. A scenario where changing the redzone from four to five 9s should
filter differently
"""
redThreshold = 0.9999
yellowThreshold = 0.999
# Case (i): values at indices 1 and 7 should be filtered to yellowzone
l = [1.0, 1.0, 0.9, 0.8, 0.5, 0.4, 1.0, 1.0, 0.6, 0.0]
l = [1 - x for x in l]
l2 = copy.copy(l)
l2[1] = 1 - yellowThreshold
l2[7] = 1 - yellowThreshold
l3 = an._filterLikelihoods(l, redThreshold=redThreshold)
[self.assertAlmostEqual(l2[i], l3[i], msg="Failure in case (i)")
for i in range(len(l2))]
# Case (ii): values at indices 1-10 should be filtered to yellowzone
l = numpy.array([0.999978229, 0.999978229, 0.999999897, 1, 1, 1, 1,
0.999999994, 0.999999966, 0.999999966, 0.999994331,
0.999516576, 0.99744487])
l = 1.0 - l
l2 = copy.copy(l)
l2[1:11] = 1 - yellowThreshold
l3 = an._filterLikelihoods(l, redThreshold=redThreshold)
[self.assertAlmostEqual(l2[i], l3[i], msg="Failure in case (ii)")
for i in range(len(l2))]
# Case (iii): redThreshold difference should be at index 2
l = numpy.array([0.999968329, 0.999999897, 1, 1, 1,
1, 0.999999994, 0.999999966, 0.999999966,
0.999994331, 0.999516576, 0.99744487])
l = 1.0 - l
l2a = copy.copy(l)
l2b = copy.copy(l)
l2a[1:10] = 1 - yellowThreshold
l2b[2:10] = 1 - yellowThreshold
l3a = an._filterLikelihoods(l, redThreshold=redThreshold)
l3b = an._filterLikelihoods(l, redThreshold=0.99999)
[self.assertAlmostEqual(l2a[i], l3a[i], msg="Failure in case (iii), list a")
for i in range(len(l2a))]
[self.assertAlmostEqual(l2b[i], l3b[i], msg="Failure in case (iii), list b")
for i in range(len(l2b))]
self.assertFalse(numpy.array_equal(l3a, l3b),
msg="Failure in case (iii), list 3")
if __name__ == "__main__":
unittest.main()
| 1 | 17,404 | shouldnt this be `0:996` (+4) so the 1000s below fit? | numenta-nupic | py |
@@ -49,11 +49,11 @@ class sorted_context(object):
self.enabled = enabled
def __enter__(self):
- self._enabled = MultiDimensionalMapping._sorted
- MultiDimensionalMapping._sorted = self.enabled
+ self._enabled = MultiDimensionalMapping.sort
+ MultiDimensionalMapping.sort = self.enabled
def __exit__(self, exc_type, exc_val, exc_tb):
- MultiDimensionalMapping._sorted = self._enabled
+ MultiDimensionalMapping.sort = self._enabled
| 1 | """
Supplies MultiDimensionalMapping and NdMapping which are multi-dimensional
map types. The former class only allows indexing whereas the latter
also enables slicing over multiple dimension ranges.
"""
from itertools import cycle
from operator import itemgetter
import numpy as np
import param
from . import util
from .dimension import OrderedDict, Dimension, Dimensioned, ViewableElement
from .util import (unique_iterator, sanitize_identifier, dimension_sort,
basestring, wrap_tuple, process_ellipses, get_ndmapping_label, pd)
class item_check(object):
"""
Context manager to allow creating NdMapping types without
performing the usual item_checks, providing significant
speedups when there are a lot of items. Should only be
used when both keys and values are guaranteed to be the
right type, as is the case for many internal operations.
"""
def __init__(self, enabled):
self.enabled = enabled
def __enter__(self):
self._enabled = MultiDimensionalMapping._check_items
MultiDimensionalMapping._check_items = self.enabled
def __exit__(self, exc_type, exc_val, exc_tb):
MultiDimensionalMapping._check_items = self._enabled
class sorted_context(object):
"""
Context manager to allow creating NdMapping types without
performing the usual sorting, providing significant
speedups when there are a lot of items. Should only be
used if values are guaranteed to be sorted before or after
the operation is performed.
"""
def __init__(self, enabled):
self.enabled = enabled
def __enter__(self):
self._enabled = MultiDimensionalMapping._sorted
MultiDimensionalMapping._sorted = self.enabled
def __exit__(self, exc_type, exc_val, exc_tb):
MultiDimensionalMapping._sorted = self._enabled
class MultiDimensionalMapping(Dimensioned):
"""
An MultiDimensionalMapping is a Dimensioned mapping (like a
dictionary or array) that uses fixed-length multidimensional
keys. This behaves like a sparse N-dimensional array that does not
require a dense sampling over the multidimensional space.
If the underlying value for each (key,value) pair also supports
indexing (such as a dictionary, array, or list), fully qualified
(deep) indexing may be used from the top level, with the first N
dimensions of the index selecting a particular Dimensioned object
and the remaining dimensions indexing into that object.
For instance, for a MultiDimensionalMapping with dimensions "Year"
and "Month" and underlying values that are 2D floating-point
arrays indexed by (r,c), a 2D array may be indexed with x[2000,3]
and a single floating-point number may be indexed as
x[2000,3,1,9].
In practice, this class is typically only used as an abstract base
class, because the NdMapping subclass extends it with a range of
useful slicing methods for selecting subsets of the data. Even so,
keeping the slicing support separate from the indexing and data
storage methods helps make both classes easier to understand.
"""
group = param.String(default='MultiDimensionalMapping', constant=True)
kdims = param.List(default=[Dimension("Default")], constant=True)
vdims = param.List(default=[], bounds=(0, 0), constant=True)
data_type = None # Optional type checking of elements
_deep_indexable = False
_sorted = True
_check_items = True
def __init__(self, initial_items=None, **params):
if isinstance(initial_items, NdMapping):
map_type = type(initial_items)
own_params = self.params()
new_params = dict(initial_items.get_param_values(onlychanged=True))
if new_params.get('group') == map_type.__name__:
new_params.pop('group')
params = dict({name: value for name, value in new_params.items()
if name in own_params}, **params)
super(MultiDimensionalMapping, self).__init__(OrderedDict(), **params)
self._next_ind = 0
self._check_key_type = True
self._cached_index_types = [d.type for d in self.kdims]
self._cached_index_values = {d.name:d.values for d in self.kdims}
self._cached_categorical = any(d.values for d in self.kdims)
self._instantiated = not any(v == 'initial' for v in self._cached_index_values.values())
if initial_items is None: initial_items = []
if isinstance(initial_items, tuple):
self._add_item(initial_items[0], initial_items[1])
elif not self._check_items and self._instantiated:
if isinstance(initial_items, dict):
initial_items = initial_items.items()
elif isinstance(initial_items, MultiDimensionalMapping):
initial_items = initial_items.data.items()
self.data = OrderedDict((k if isinstance(k, tuple) else (k,), v)
for k, v in initial_items)
self._resort()
elif initial_items is not None:
self.update(OrderedDict(initial_items))
self._instantiated = True
def _item_check(self, dim_vals, data):
"""
Applies optional checks to individual data elements before
they are inserted ensuring that they are of a certain
type. Subclassed may implement further element restrictions.
"""
if self.data_type is not None and not isinstance(data, self.data_type):
if isinstance(self.data_type, tuple):
data_type = tuple(dt.__name__ for dt in self.data_type)
else:
data_type = self.data_type.__name__
raise TypeError('{slf} does not accept {data} type, data elements have '
'to be a {restr}.'.format(slf=type(self).__name__,
data=type(data).__name__,
restr=data_type))
elif not len(dim_vals) == self.ndims:
raise KeyError('Key has to match number of dimensions.')
def _add_item(self, dim_vals, data, sort=True, update=True):
"""
Adds item to the data, applying dimension types and ensuring
key conforms to Dimension type and values.
"""
if not isinstance(dim_vals, tuple):
dim_vals = (dim_vals,)
self._item_check(dim_vals, data)
# Apply dimension types
dim_types = zip(self._cached_index_types, dim_vals)
dim_vals = tuple(v if None in [t, v] else t(v) for t, v in dim_types)
# Check and validate for categorical dimensions
if self._cached_categorical:
valid_vals = zip(self.kdims, dim_vals)
else:
valid_vals = []
for dim, val in valid_vals:
vals = self._cached_index_values[dim.name]
if vals == 'initial': self._cached_index_values[dim.name] = []
if not self._instantiated and self.get_dimension(dim).values == 'initial':
if val not in vals:
self._cached_index_values[dim.name].append(val)
elif vals and val is not None and val not in vals:
raise KeyError('%s dimension value %s not in'
' specified dimension values.' % (dim, repr(val)))
# Updates nested data structures rather than simply overriding them.
if (update and (dim_vals in self.data)
and isinstance(self.data[dim_vals], (MultiDimensionalMapping, OrderedDict))):
self.data[dim_vals].update(data)
else:
self.data[dim_vals] = data
if sort:
self._resort()
def _apply_key_type(self, keys):
"""
If a type is specified by the corresponding key dimension,
this method applies the type to the supplied key.
"""
typed_key = ()
for dim, key in zip(self.kdims, keys):
key_type = dim.type
if key_type is None:
typed_key += (key,)
elif isinstance(key, slice):
sl_vals = [key.start, key.stop, key.step]
typed_key += (slice(*[key_type(el) if el is not None else None
for el in sl_vals]),)
elif key is Ellipsis:
typed_key += (key,)
elif isinstance(key, list):
typed_key += ([key_type(k) for k in key],)
else:
typed_key += (key_type(key),)
return typed_key
def _split_index(self, key):
"""
Partitions key into key and deep dimension groups. If only key
indices are supplied, the data is indexed with an empty tuple.
Keys with indices than there are dimensions will be padded.
"""
if not isinstance(key, tuple):
key = (key,)
elif key == ():
return (), ()
if key[0] is Ellipsis:
num_pad = self.ndims - len(key) + 1
key = (slice(None),) * num_pad + key[1:]
elif len(key) < self.ndims:
num_pad = self.ndims - len(key)
key = key + (slice(None),) * num_pad
map_slice = key[:self.ndims]
if self._check_key_type:
map_slice = self._apply_key_type(map_slice)
if len(key) == self.ndims:
return map_slice, ()
else:
return map_slice, key[self.ndims:]
def _dataslice(self, data, indices):
"""
Returns slice of data element if the item is deep
indexable. Warns if attempting to slice an object that has not
been declared deep indexable.
"""
if self._deep_indexable and isinstance(data, Dimensioned) and indices:
return data[indices]
elif len(indices) > 0:
self.warning('Cannot index into data element, extra data'
' indices ignored.')
return data
def _resort(self):
if self._sorted:
resorted = dimension_sort(self.data, self.kdims, self.vdims,
self._cached_categorical,
range(self.ndims),
self._cached_index_values)
self.data = OrderedDict(resorted)
def clone(self, data=None, shared_data=True, *args, **overrides):
"""
Overrides Dimensioned clone to avoid checking items if data
is unchanged.
"""
with item_check(not shared_data and self._check_items):
return super(MultiDimensionalMapping, self).clone(data, shared_data,
*args, **overrides)
def groupby(self, dimensions, container_type=None, group_type=None, **kwargs):
"""
Splits the mapping into groups by key dimension which are then
returned together in a mapping of class container_type. The
individual groups are of the same type as the original map.
"""
if self.ndims == 1:
self.warning('Cannot split Map with only one dimension.')
return self
container_type = container_type if container_type else type(self)
group_type = group_type if group_type else type(self)
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
sort = not self._sorted
with item_check(False):
return util.ndmapping_groupby(self, dimensions, container_type,
group_type, sort=sort, **kwargs)
def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs):
"""
Create a new object with an additional key dimensions.
Requires the dimension name or object, the desired position
in the key dimensions and a key value scalar or sequence of
the same length as the existing keys.
"""
if not isinstance(dimension, Dimension):
dimension = Dimension(dimension)
if dimension in self.dimensions():
raise Exception('{dim} dimension already defined'.format(dim=dimension.name))
if vdim and self._deep_indexable:
raise Exception('Cannot add value dimension to object that is deep indexable')
if vdim:
dims = self.vdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(vdims=dims)
dim_pos += self.ndims
else:
dims = self.kdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(kdims=dims)
if isinstance(dim_val, basestring) or not hasattr(dim_val, '__iter__'):
dim_val = cycle([dim_val])
else:
if not len(dim_val) == len(self):
raise ValueError("Added dimension values must be same length"
"as existing keys.")
items = OrderedDict()
for dval, (key, val) in zip(dim_val, self.data.items()):
if vdim:
new_val = list(val)
new_val.insert(dim_pos, dval)
items[key] = tuple(new_val)
else:
new_key = list(key)
new_key.insert(dim_pos, dval)
items[tuple(new_key)] = val
return self.clone(items, **dict(dimensions, **kwargs))
def drop_dimension(self, dimensions):
"""
Returns a new mapping with the named dimension(s) removed.
"""
dimensions = [dimensions] if np.isscalar(dimensions) else dimensions
dims = [d for d in self.kdims if d not in dimensions]
dim_inds = [self.get_dimension_index(d) for d in dims]
key_getter = itemgetter(*dim_inds)
return self.clone([(key_getter(k), v) for k, v in self.data.items()],
kdims=dims)
def dimension_values(self, dimension, expanded=True, flat=True):
"Returns the values along the specified dimension."
dimension = self.get_dimension(dimension, strict=True)
if dimension in self.kdims:
return np.array([k[self.get_dimension_index(dimension)] for k in self.data.keys()])
if dimension in self.dimensions():
values = [el.dimension_values(dimension) for el in self
if dimension in el.dimensions()]
vals = np.concatenate(values)
return vals if expanded else util.unique_array(vals)
else:
return super(MultiDimensionalMapping, self).dimension_values(dimension, expanded, flat)
def reindex(self, kdims=[], force=False):
"""
Create a new object with a re-ordered or reduced set of key
dimensions.
Reducing the number of key dimensions will discard information
from the keys. All data values are accessible in the newly
created object as the new labels must be sufficient to address
each value uniquely.
"""
old_kdims = [d.name for d in self.kdims]
if not len(kdims):
kdims = [d for d in old_kdims
if not len(set(self.dimension_values(d))) == 1]
indices = [self.get_dimension_index(el) for el in kdims]
keys = [tuple(k[i] for i in indices) for k in self.data.keys()]
reindexed_items = OrderedDict(
(k, v) for (k, v) in zip(keys, self.data.values()))
reduced_dims = set([d.name for d in self.kdims]).difference(kdims)
dimensions = [self.get_dimension(d) for d in kdims
if d not in reduced_dims]
if len(set(keys)) != len(keys) and not force:
raise Exception("Given dimension labels not sufficient"
"to address all values uniquely")
if len(keys):
cdims = {self.get_dimension(d): self.dimension_values(d)[0] for d in reduced_dims}
else:
cdims = {}
with item_check(indices == sorted(indices)):
return self.clone(reindexed_items, kdims=dimensions,
cdims=cdims)
@property
def last(self):
"Returns the item highest data item along the map dimensions."
return list(self.data.values())[-1] if len(self) else None
@property
def last_key(self):
"Returns the last key value."
return list(self.keys())[-1] if len(self) else None
@property
def info(self):
"""
Prints information about the Dimensioned object, including the
number and type of objects contained within it and information
about its dimensions.
"""
if (len(self.values()) > 0):
info_str = self.__class__.__name__ +\
" containing %d items of type %s\n" % (len(self.keys()),
type(self.values()[0]).__name__)
else:
info_str = self.__class__.__name__ + " containing no items\n"
info_str += ('-' * (len(info_str)-1)) + "\n\n"
aliases = {v: k for k, v in self._dim_aliases.items()}
for group in self._dim_groups:
dimensions = getattr(self, group)
if dimensions:
group = aliases[group].split('_')[0]
info_str += '%s Dimensions: \n' % group.capitalize()
for d in dimensions:
dmin, dmax = self.range(d.name)
if d.value_format:
dmin, dmax = d.value_format(dmin), d.value_format(dmax)
info_str += '\t %s: %s...%s \n' % (str(d), dmin, dmax)
print(info_str)
def table(self, datatype=None, **kwargs):
"Creates a table from the stored keys and data."
if datatype is None:
datatype = ['dataframe' if pd else 'dictionary']
tables = []
for key, value in self.data.items():
value = value.table(datatype=datatype, **kwargs)
for idx, (dim, val) in enumerate(zip(self.kdims, key)):
value = value.add_dimension(dim, idx, val)
tables.append(value)
return value.interface.concatenate(tables)
def dframe(self):
"Creates a pandas DataFrame from the stored keys and data."
try:
import pandas
except ImportError:
raise Exception("Cannot build a DataFrame without the pandas library.")
labels = self.dimensions('key', True) + [self.group]
return pandas.DataFrame(
[dict(zip(labels, k + (v,))) for (k, v) in self.data.items()])
def update(self, other):
"""
Updates the current mapping with some other mapping or
OrderedDict instance, making sure that they are indexed along
the same set of dimensions. The order of key dimensions remains
unchanged after the update.
"""
if isinstance(other, NdMapping):
dims = [d for d in other.kdims if d not in self.kdims]
if len(dims) == other.ndims:
raise KeyError("Cannot update with NdMapping that has"
" a different set of key dimensions.")
elif dims:
other = other.drop_dimension(dims)
other = other.data
for key, data in other.items():
self._add_item(key, data, sort=False)
self._resort()
def keys(self):
" Returns the keys of all the elements."
if self.ndims == 1:
return [k[0] for k in self.data.keys()]
else:
return list(self.data.keys())
def values(self):
" Returns the values of all the elements."
return list(self.data.values())
def items(self):
"Returns all elements as a list in (key,value) format."
return list(zip(list(self.keys()), list(self.values())))
def get(self, key, default=None):
"Standard get semantics for all mapping types"
try:
if key is None:
return None
return self[key]
except:
return default
def pop(self, key, default=None):
"Standard pop semantics for all mapping types"
if not isinstance(key, tuple): key = (key,)
return self.data.pop(key, default)
def __getitem__(self, key):
"""
Allows multi-dimensional indexing in the order of the
specified key dimensions, passing any additional indices to
the data elements.
"""
if key in [Ellipsis, ()]:
return self
map_slice, data_slice = self._split_index(key)
return self._dataslice(self.data[map_slice], data_slice)
def __setitem__(self, key, value):
self._add_item(key, value, update=False)
def __str__(self):
return repr(self)
def __iter__(self):
return iter(self.values())
def __contains__(self, key):
if self.ndims == 1:
return key in self.data.keys()
else:
return key in self.keys()
def __len__(self):
return len(self.data)
class NdMapping(MultiDimensionalMapping):
"""
NdMapping supports the same indexing semantics as
MultiDimensionalMapping but also supports slicing semantics.
Slicing semantics on an NdMapping is dependent on the ordering
semantics of the keys. As MultiDimensionalMapping sort the keys, a
slice on an NdMapping is effectively a way of filtering out the
keys that are outside the slice range.
"""
group = param.String(default='NdMapping', constant=True)
def __getitem__(self, indexslice):
"""
Allows slicing operations along the key and data
dimensions. If no data slice is supplied it will return all
data elements, otherwise it will return the requested slice of
the data.
"""
if isinstance(indexslice, np.ndarray) and indexslice.dtype.kind == 'b':
if not len(indexslice) == len(self):
raise IndexError("Boolean index must match length of sliced object")
selection = zip(indexslice, self.data.items())
return self.clone([item for c, item in selection if c])
elif indexslice == () and not self.kdims:
return self.data[()]
elif indexslice in [Ellipsis, ()]:
return self
elif Ellipsis in wrap_tuple(indexslice):
indexslice = process_ellipses(self, indexslice)
map_slice, data_slice = self._split_index(indexslice)
map_slice = self._transform_indices(map_slice)
map_slice = self._expand_slice(map_slice)
if all(not (isinstance(el, (slice, set, list, tuple)) or callable(el))
for el in map_slice):
return self._dataslice(self.data[map_slice], data_slice)
else:
conditions = self._generate_conditions(map_slice)
items = self.data.items()
for cidx, (condition, dim) in enumerate(zip(conditions, self.kdims)):
values = self._cached_index_values.get(dim.name, None)
items = [(k, v) for k, v in items
if condition(values.index(k[cidx])
if values else k[cidx])]
sliced_items = []
for k, v in items:
val_slice = self._dataslice(v, data_slice)
if val_slice or isinstance(val_slice, tuple):
sliced_items.append((k, val_slice))
if len(sliced_items) == 0:
raise KeyError('No items within specified slice.')
with item_check(False):
return self.clone(sliced_items)
def _expand_slice(self, indices):
"""
Expands slices containing steps into a list.
"""
keys = list(self.data.keys())
expanded = []
for idx, ind in enumerate(indices):
if isinstance(ind, slice) and ind.step is not None:
dim_ind = slice(ind.start, ind.stop)
if dim_ind == slice(None):
condition = self._all_condition()
elif dim_ind.start is None:
condition = self._upto_condition(dim_ind)
elif dim_ind.stop is None:
condition = self._from_condition(dim_ind)
else:
condition = self._range_condition(dim_ind)
dim_vals = unique_iterator(k[idx] for k in keys)
expanded.append(set([k for k in dim_vals if condition(k)][::int(ind.step)]))
else:
expanded.append(ind)
return tuple(expanded)
def _transform_indices(self, indices):
"""
Identity function here but subclasses can implement transforms
of the dimension indices from one coordinate system to another.
"""
return indices
def _generate_conditions(self, map_slice):
"""
Generates filter conditions used for slicing the data structure.
"""
conditions = []
for dim, dim_slice in zip(self.kdims, map_slice):
if isinstance(dim_slice, slice):
start, stop = dim_slice.start, dim_slice.stop
if dim.values:
values = self._cached_index_values[dim.name]
dim_slice = slice(None if start is None else values.index(start),
None if stop is None else values.index(stop))
if dim_slice == slice(None):
conditions.append(self._all_condition())
elif start is None:
conditions.append(self._upto_condition(dim_slice))
elif stop is None:
conditions.append(self._from_condition(dim_slice))
else:
conditions.append(self._range_condition(dim_slice))
elif isinstance(dim_slice, (set, list)):
if dim.values:
dim_slice = [self._cached_index_values[dim.name].index(dim_val)
for dim_val in dim_slice]
conditions.append(self._values_condition(dim_slice))
elif dim_slice is Ellipsis:
conditions.append(self._all_condition())
elif callable(dim_slice):
conditions.append(dim_slice)
elif isinstance(dim_slice, (tuple)):
raise IndexError("Keys may only be selected with sets or lists, not tuples.")
else:
if dim.values:
dim_slice = self._cached_index_values[dim.name].index(dim_slice)
conditions.append(self._value_condition(dim_slice))
return conditions
def _value_condition(self, value):
return lambda x: x == value
def _values_condition(self, values):
return lambda x: x in values
def _range_condition(self, slice):
if slice.step is None:
lmbd = lambda x: slice.start <= x < slice.stop
else:
lmbd = lambda x: slice.start <= x < slice.stop and not (
(x-slice.start) % slice.step)
return lmbd
def _upto_condition(self, slice):
if slice.step is None:
lmbd = lambda x: x < slice.stop
else:
lmbd = lambda x: x < slice.stop and not (x % slice.step)
return lmbd
def _from_condition(self, slice):
if slice.step is None:
lmbd = lambda x: x > slice.start
else:
lmbd = lambda x: x > slice.start and ((x-slice.start) % slice.step)
return lmbd
def _all_condition(self):
return lambda x: True
class UniformNdMapping(NdMapping):
"""
A UniformNdMapping is a map of Dimensioned objects and is itself
indexed over a number of specified dimensions. The dimension may
be a spatial dimension (i.e., a ZStack), time (specifying a frame
sequence) or any other combination of Dimensions.
UniformNdMapping objects can be sliced, sampled, reduced, overlaid
and split along its and its containing Views
dimensions. Subclasses should implement the appropriate slicing,
sampling and reduction methods for their Dimensioned type.
"""
data_type = (ViewableElement, NdMapping)
_abstract = True
_deep_indexable = True
_auxiliary_component = False
def __init__(self, initial_items=None, group=None, label=None, **params):
self._type = None
self._group_check, self.group = None, group
self._label_check, self.label = None, label
super(UniformNdMapping, self).__init__(initial_items, **params)
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):
"""
Returns a clone of the object with matching parameter values
containing the specified args and kwargs.
If shared_data is set to True and no data explicitly supplied,
the clone will share data with the original.
"""
settings = dict(self.get_param_values())
if settings.get('group', None) != self._group:
settings.pop('group')
if settings.get('label', None) != self._label:
settings.pop('label')
if new_type is None:
clone_type = self.__class__
else:
clone_type = new_type
new_params = new_type.params()
settings = {k: v for k, v in settings.items()
if k in new_params}
settings = dict(settings, **overrides)
if 'id' not in settings:
settings['id'] = self.id
if data is None and shared_data:
data = self.data
# Apply name mangling for __ attribute
pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', [])
with item_check(not shared_data and self._check_items):
return clone_type(data, *args, **{k:v for k,v in settings.items()
if k not in pos_args})
@property
def group(self):
if self._group:
return self._group
group = get_ndmapping_label(self, 'group') if len(self) else None
if group is None:
return type(self).__name__
return group
@group.setter
def group(self, group):
if group is not None and not sanitize_identifier.allowable(group):
raise ValueError("Supplied group %s contains invalid "
"characters." % self.group)
self._group = group
@property
def label(self):
if self._label:
return self._label
else:
if len(self):
label = get_ndmapping_label(self, 'label')
return '' if label is None else label
else:
return ''
@label.setter
def label(self, label):
if label is not None and not sanitize_identifier.allowable(label):
raise ValueError("Supplied group %s contains invalid "
"characters." % self.group)
self._label = label
@property
def type(self):
"""
The type of elements stored in the map.
"""
if self._type is None and len(self):
self._type = self.values()[0].__class__
return self._type
@property
def empty_element(self):
return self.type(None)
def _item_check(self, dim_vals, data):
if self.type is not None and (type(data) != self.type):
raise AssertionError("%s must only contain one type of object, not both %s and %s." %
(self.__class__.__name__, type(data).__name__, self.type.__name__))
super(UniformNdMapping, self)._item_check(dim_vals, data)
def dframe(self):
"""
Gets a dframe for each Element in the HoloMap, appends the
dimensions of the HoloMap as series and concatenates the
dframes.
"""
import pandas
dframes = []
for key, view in self.data.items():
view_frame = view.dframe()
key_dims = reversed(list(zip(key, self.dimensions('key', True))))
for val, dim in key_dims:
dimn = 1
while dim in view_frame:
dim = dim+'_%d' % dimn
if dim in view_frame:
dimn += 1
view_frame.insert(0, dim, val)
dframes.append(view_frame)
return pandas.concat(dframes)
| 1 | 16,877 | The docstring of this context_manager should be updated. As now ``sort=False`` is valid, it should just say it disables sorting regardless of whether the NdMapping has ``sort=True`` or ``sort=False``. I also think the line 'Should only be used if values are guaranteed to be sorted before or after the operation is performed.' should just say something else - maybe just that the initial ordering (whatever it is) should be preserved? | holoviz-holoviews | py |
@@ -52,8 +52,8 @@ describe('Lifecycle methods', () => {
}
}
- spyAll(Receiver.prototype);
spyAll(Receiver);
+ spyAll(Receiver.prototype);
function throwExpectedError() {
throw (expectedError = new Error('Error!')); | 1 | import { setupRerender } from 'preact/test-utils';
import { createElement, render, Component } from 'preact';
import {
setupScratch,
teardown,
spyAll,
resetAllSpies
} from '../../_util/helpers';
/** @jsx createElement */
describe('Lifecycle methods', () => {
/* eslint-disable react/display-name */
/** @type {HTMLDivElement} */
let scratch;
/** @type {() => void} */
let rerender;
beforeEach(() => {
scratch = setupScratch();
rerender = setupRerender();
});
afterEach(() => {
teardown(scratch);
});
describe('#getDerivedStateFromError', () => {
/** @type {Error} */
let expectedError;
/** @type {typeof import('../../../').Component} */
let ThrowErr;
/** @type {Receiver} */
let receiver;
class Receiver extends Component {
constructor() {
super();
receiver = this;
}
static getDerivedStateFromError(error) {
return { error };
}
render() {
return this.state.error
? String(this.state.error)
: this.props.children;
}
}
spyAll(Receiver.prototype);
spyAll(Receiver);
function throwExpectedError() {
throw (expectedError = new Error('Error!'));
}
beforeEach(() => {
ThrowErr = class ThrowErr extends Component {
static getDerivedStateFromError() {
expect.fail("Throwing component should not catch it's own error.");
return {};
}
render() {
return <div>ThrowErr: getDerivedStateFromError</div>;
}
};
sinon.spy(ThrowErr, 'getDerivedStateFromError');
expectedError = undefined;
receiver = undefined;
resetAllSpies(Receiver.prototype);
resetAllSpies(Receiver);
});
afterEach(() => {
expect(
ThrowErr.getDerivedStateFromError,
"Throwing component should not catch it's own error."
).to.not.be.called;
});
it('should be called when child fails in constructor', () => {
class ThrowErr extends Component {
constructor(props, context) {
super(props, context);
throwExpectedError();
}
static getDerivedStateFromError() {
expect.fail("Throwing component should not catch it's own error");
return {};
}
render() {
return <div />;
}
}
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
rerender();
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
// https://github.com/preactjs/preact/issues/1570
it('should handle double child throws', () => {
const Child = ({ i }) => {
throw new Error(`error! ${i}`);
};
const fn = () =>
render(
<Receiver>
{[1, 2].map(i => (
<Child key={i} i={i} />
))}
</Receiver>,
scratch
);
expect(fn).to.not.throw();
rerender();
expect(scratch.innerHTML).to.equal('Error: error! 2');
});
it('should be called when child fails in componentWillMount', () => {
ThrowErr.prototype.componentWillMount = throwExpectedError;
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when child fails in render', () => {
ThrowErr.prototype.render = throwExpectedError;
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when child fails in componentDidMount', () => {
ThrowErr.prototype.componentDidMount = throwExpectedError;
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when child fails in getDerivedStateFromProps', () => {
ThrowErr.getDerivedStateFromProps = throwExpectedError;
sinon.spy(ThrowErr.prototype, 'render');
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
expect(ThrowErr.prototype.render).not.to.have.been.called;
});
it('should be called when child fails in getSnapshotBeforeUpdate', () => {
ThrowErr.prototype.getSnapshotBeforeUpdate = throwExpectedError;
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
receiver.forceUpdate();
rerender();
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when child fails in componentDidUpdate', () => {
ThrowErr.prototype.componentDidUpdate = throwExpectedError;
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
receiver.forceUpdate();
rerender();
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when child fails in componentWillUpdate', () => {
ThrowErr.prototype.componentWillUpdate = throwExpectedError;
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
receiver.forceUpdate();
rerender();
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when child fails in componentWillReceiveProps', () => {
ThrowErr.prototype.componentWillReceiveProps = throwExpectedError;
let receiver;
class Receiver extends Component {
constructor() {
super();
this.state = { foo: 'bar' };
receiver = this;
}
static getDerivedStateFromError(error) {
return { error };
}
render() {
return this.state.error ? (
String(this.state.error)
) : (
<ThrowErr foo={this.state.foo} />
);
}
}
sinon.spy(Receiver, 'getDerivedStateFromError');
render(<Receiver />, scratch);
receiver.setState({ foo: 'baz' });
rerender();
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when child fails in shouldComponentUpdate', () => {
ThrowErr.prototype.shouldComponentUpdate = throwExpectedError;
let receiver;
class Receiver extends Component {
constructor() {
super();
this.state = { foo: 'bar' };
receiver = this;
}
static getDerivedStateFromError(error) {
return { error };
}
render() {
return this.state.error ? (
String(this.state.error)
) : (
<ThrowErr foo={this.state.foo} />
);
}
}
sinon.spy(Receiver, 'getDerivedStateFromError');
render(<Receiver />, scratch);
receiver.setState({ foo: 'baz' });
rerender();
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when child fails in componentWillUnmount', () => {
ThrowErr.prototype.componentWillUnmount = throwExpectedError;
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
render(
<Receiver>
<div />
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when applying a Component ref', () => {
const Foo = () => <div />;
const ref = value => {
if (value) {
throwExpectedError();
}
};
// In React, an error boundary handles it's own refs:
// https://codesandbox.io/s/react-throwing-refs-lk958
class Receiver extends Component {
static getDerivedStateFromError(error) {
return { error };
}
render() {
return this.state.error ? (
String(this.state.error)
) : (
<Foo ref={ref} />
);
}
}
sinon.spy(Receiver, 'getDerivedStateFromError');
render(<Receiver />, scratch);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when applying a DOM ref', () => {
const ref = value => {
if (value) {
throwExpectedError();
}
};
// In React, an error boundary handles it's own refs:
// https://codesandbox.io/s/react-throwing-refs-lk958
class Receiver extends Component {
static getDerivedStateFromError(error) {
return { error };
}
render() {
return this.state.error ? (
String(this.state.error)
) : (
<div ref={ref} />
);
}
}
sinon.spy(Receiver, 'getDerivedStateFromError');
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should be called when unmounting a ref', () => {
const ref = value => {
if (value == null) {
throwExpectedError();
}
};
ThrowErr.prototype.render = () => <div ref={ref} />;
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
render(
<Receiver>
<div />
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledOnceWith(
expectedError
);
});
it('should be called when functional child fails', () => {
function ThrowErr() {
throwExpectedError();
}
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should re-render with new content', () => {
class ThrowErr extends Component {
componentWillMount() {
throw new Error('Error contents');
}
render() {
return 'No error!?!?';
}
}
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
rerender();
expect(scratch).to.have.property('textContent', 'Error: Error contents');
});
it('should be able to adapt and rethrow errors', () => {
let adaptedError;
class Adapter extends Component {
static getDerivedStateFromError(error) {
throw (adaptedError = new Error(
'Adapted ' +
String(error && 'message' in error ? error.message : error)
));
}
render() {
return <div>{this.props.children}</div>;
}
}
function ThrowErr() {
throwExpectedError();
}
sinon.spy(Adapter, 'getDerivedStateFromError');
render(
<Receiver>
<Adapter>
<ThrowErr />
</Adapter>
</Receiver>,
scratch
);
expect(Adapter.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
adaptedError
);
rerender();
expect(scratch).to.have.property('textContent', 'Error: Adapted Error!');
});
it('should bubble on repeated errors', () => {
class Adapter extends Component {
static getDerivedStateFromError(error) {
return { error };
}
render() {
// But fail at doing so
if (this.state.error) {
throw this.state.error;
}
return <div>{this.props.children}</div>;
}
}
function ThrowErr() {
throwExpectedError();
}
sinon.spy(Adapter, 'getDerivedStateFromError');
render(
<Receiver>
<Adapter>
<ThrowErr />
</Adapter>
</Receiver>,
scratch
);
rerender();
expect(Adapter.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
expect(scratch).to.have.property('textContent', 'Error: Error!');
});
it('should bubble on ignored errors', () => {
class Adapter extends Component {
static getDerivedStateFromError(error) {
// Ignore the error
return null;
}
render() {
return <div>{this.props.children}</div>;
}
}
function ThrowErr() {
throw new Error('Error!');
}
sinon.spy(Adapter, 'getDerivedStateFromError');
render(
<Receiver>
<Adapter>
<ThrowErr />
</Adapter>
</Receiver>,
scratch
);
rerender();
expect(Adapter.getDerivedStateFromError).to.have.been.called;
expect(Receiver.getDerivedStateFromError).to.have.been.called;
expect(scratch).to.have.property('textContent', 'Error: Error!');
});
it('should not bubble on caught errors', () => {
class TopReceiver extends Component {
static getDerivedStateFromError(error) {
return { error };
}
render() {
return (
<div>
{this.state.error
? String(this.state.error)
: this.props.children}
</div>
);
}
}
function ThrowErr() {
throwExpectedError();
}
sinon.spy(TopReceiver, 'getDerivedStateFromError');
render(
<TopReceiver>
<Receiver>
<ThrowErr />
</Receiver>
</TopReceiver>,
scratch
);
rerender();
expect(TopReceiver.getDerivedStateFromError).not.to.have.been.called;
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
expect(scratch).to.have.property('textContent', 'Error: Error!');
});
it('should be called through non-component parent elements', () => {
ThrowErr.prototype.render = throwExpectedError;
render(
<Receiver>
<div>
<ThrowErr />
</div>
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it('should bubble up when ref throws on component that is not an error boundary', () => {
const ref = value => {
if (value) {
throwExpectedError();
}
};
function ThrowErr() {
return <div ref={ref} />;
}
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
expect(Receiver.getDerivedStateFromError).to.have.been.calledWith(
expectedError
);
});
it.skip('should successfully unmount constantly throwing ref', () => {
const buggyRef = throwExpectedError;
function ThrowErr() {
return <div ref={buggyRef}>ThrowErr</div>;
}
render(
<Receiver>
<ThrowErr />
</Receiver>,
scratch
);
rerender();
expect(scratch.innerHTML).to.equal('<div>Error: Error!</div>');
});
});
});
| 1 | 14,570 | This tests failed unless I swapped the order here. Perhaps some new class transform broke the old form? | preactjs-preact | js |
@@ -699,10 +699,14 @@ class Util:
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
- if '_debug_port_' in opts:
+ if 'address=_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
+ elif 'address=' in opts:
+ if not cls.debug_java_port:
+ cls.debug_java_port = ops.split("address=")[1]
+
return opts
@classmethod | 1 | import os
import re
import glob
import json
import time
import logging
import threading
import subprocess
import six
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file,
to_str, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_JAVA11 = 'java11'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_RUNTIME_PROVIDED = 'provided'
LAMBDA_EVENT_FILE = 'event_file.json'
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
EVENT_SOURCE_SQS = 'aws:sqs'
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
# return final result
return result
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return None, 'Lambda executed asynchronously.'
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE,
env_vars=env_vars, stdin=True)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output))
return result
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
docker_host = config.DOCKER_HOST_FROM_CONTAINER
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
environment['_HANDLER'] = handler
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if is_java_lambda(runtime):
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = '%s cp "%s" "%s:/var/task";' % (docker_cmd, event_file, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str, env_vars_str, network_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'%s stop -t0 %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'%s rm %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.next_port = 1
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = str(self.next_port + self.port_offset)
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
self.next_port = (self.next_port + 1) % self.max_port
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # env
' %s' # network
' %s' # --rm flag
' %s %s' # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port, env_vars_string, network_str, rm_flag,
docker_image, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' %s' # --rm flag
' %s %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
network_str, rm_flag, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
with CaptureOutput() as c:
process.run()
result = queue.get()
# TODO: Interweaving stdout/stderr currently not supported
log_output = ''
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
# store logs to CloudWatch
_store_logs(func_details, log_output)
return result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file, Util.get_java_classpath(main_file))
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def docker_image_for_runtime(cls, runtime):
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
| 1 | 10,739 | extract the port and set to `debug_java_port` | localstack-localstack | py |
@@ -93,6 +93,13 @@ def parse_compile_commands_json(logfile, add_compiler_defaults=False):
counter = 0
for entry in data:
sourcefile = entry['file']
+
+ if not os.path.isabs(sourcefile):
+ # Newest versions of intercept-build can create the 'file' in the
+ # JSON Compilation Database as a relative path.
+ sourcefile = os.path.join(os.path.abspath(entry['directory']),
+ sourcefile)
+
lang = option_parser.get_language(sourcefile[sourcefile.rfind('.'):])
if not lang: | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
import os
import sys
import traceback
import subprocess
import shlex
# TODO: This is a cross-subpackage import!
from libcodechecker.log import build_action
from libcodechecker.log import option_parser
from libcodechecker.logger import LoggerFactory
LOG = LoggerFactory.get_new_logger('LOG PARSER')
# -----------------------------------------------------------------------------
def get_compiler_includes(compiler):
"""
Returns a list of default includes of the given compiler.
"""
LOG.debug('getting include paths for ' + compiler)
start_mark = "#include <...> search starts here:"
end_mark = "End of search list."
cmd = compiler + " -E -x c++ - -v " # what if not c++?
include_paths = []
try:
proc = subprocess.Popen(shlex.split(cmd),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate("")
do_append = False
for line in err.splitlines(True):
line = line.strip()
if line.startswith(end_mark):
do_append = False
if do_append:
include_paths.append("-I"+line)
if line.startswith(start_mark):
do_append = True
except OSError as oerr:
LOG.error("Cannot find include paths:" + oerr.strerror+"\n")
return include_paths
# -----------------------------------------------------------------------------
def get_compiler_defines(compiler):
"""
Returns a list of default defines of the given compiler.
"""
cmd = compiler + " -dM -E -"
defines = []
try:
with open(os.devnull, 'r') as FNULL:
proc = subprocess.Popen(shlex.split(cmd),
stdin=FNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate("")
for line in out.splitlines(True):
LOG.debug("define:"+line)
define = line.strip().split(" ")[1:]
d = "-D"+define[0] + '=' + '"' + ' '.join(define[1:]) + '"'
defines.append(d)
except OSError as oerr:
LOG.error("Cannot find defines:" + oerr.strerror+"\n")
return defines
# -----------------------------------------------------------------------------
def parse_compile_commands_json(logfile, add_compiler_defaults=False):
import json
LOG.debug('parse_compile_commands_json: ' + str(add_compiler_defaults))
actions = []
filtered_build_actions = {}
logfile.seek(0)
data = json.load(logfile)
compiler_defines = {}
compiler_includes = {}
counter = 0
for entry in data:
sourcefile = entry['file']
lang = option_parser.get_language(sourcefile[sourcefile.rfind('.'):])
if not lang:
continue
action = build_action.BuildAction(counter)
command = entry['command']
results = option_parser.parse_options(command)
action.original_command = command
action.analyzer_options = results.compile_opts
action.lang = results.lang
action.target = results.arch
# store the compiler built in include paths
# and defines
if add_compiler_defaults and results.compiler:
if not (results.compiler in compiler_defines):
compiler_defines[results.compiler] = \
get_compiler_defines(results.compiler)
compiler_includes[results.compiler] = \
get_compiler_includes(results.compiler)
action.compiler_defines = compiler_defines[results.compiler]
action.compiler_includes = compiler_includes[results.compiler]
if results.action == option_parser.ActionType.COMPILE or \
results.action == option_parser.ActionType.LINK:
action.skip = False
# TODO: check arch.
action.directory = entry['directory']
action.sources = sourcefile
# Filter out duplicate compilation commands.
unique_key = action.cmp_key
if filtered_build_actions.get(unique_key) is None:
filtered_build_actions[unique_key] = action
del action
counter += 1
for ba_hash, ba in filtered_build_actions.items():
actions.append(ba)
return actions
# -----------------------------------------------------------------------------
def parse_log(logfilepath, add_compiler_defaults=False):
LOG.debug('Parsing log file: ' + logfilepath)
actions = []
with open(logfilepath) as logfile:
try:
actions = \
parse_compile_commands_json(logfile, add_compiler_defaults)
except (ValueError, KeyError, TypeError) as ex:
if os.stat(logfilepath).st_size == 0:
LOG.error('The compile database is empty.')
else:
LOG.error('The compile database is not valid.')
LOG.debug(traceback.format_exc())
LOG.debug(ex)
sys.exit(1)
LOG.debug('Parsing log file done.')
return actions
| 1 | 6,593 | When the argument list is `["one", "two three"]` then you concatenate it as you do it here, you won't be able to get the original list back with split. You need to annotate the list items better. (Or if you won't do the split by yourself, the called shell will do it. So you need shell escaping. How portable is that?) The main driver to change this in `intercept-build` was to get rid of these concatenation and splitting errors. The current compilation database parser in Clang understands the `arguments` filed... So the question, why don't you pass the command around as a list instead of a string? | Ericsson-codechecker | c |
@@ -95,6 +95,10 @@ func (a *FakeWebAPI) RegisterPiped(ctx context.Context, req *webservice.Register
}, nil
}
+func (a *FakeWebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "")
+}
+
func (a *FakeWebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
fakeProjectID = "debug-project"
)
// FakeWebAPI implements the fake behaviors for the gRPC definitions of WebAPI.
type FakeWebAPI struct {
}
// NewFakeWebAPI creates a new FakeWebAPI instance.
func NewFakeWebAPI() *FakeWebAPI {
return &FakeWebAPI{}
}
// Register registers all handling of this service into the specified gRPC server.
func (a *FakeWebAPI) Register(server *grpc.Server) {
webservice.RegisterWebServiceServer(server, a)
}
func (a *FakeWebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) {
now := time.Now()
envs := []*model.Environment{
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
Name: "development",
Desc: "For development",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "staging"),
Name: "staging",
Desc: "For staging",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "production"),
Name: "production",
Desc: "For production",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
return &webservice.ListEnvironmentsResponse{
Environments: envs,
}, nil
}
func (a *FakeWebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) {
return &webservice.RegisterPipedResponse{
Id: "e357d99f-0f83-4ce0-8c8b-27f11f432ef9",
Key: "9bf9752a-54a2-451a-a541-444add56f96b",
}, nil
}
func (a *FakeWebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) {
now := time.Now()
pipeds := []*webservice.Piped{
{
Id: "492220b1-c080-4781-9e55-7e278760e0ef",
Desc: "piped for debug 1",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "bdd71c9e-5406-46fb-a0e4-b2124ea1c1ea",
Desc: "piped for debug 2",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "42e9fa90-22c1-4436-b10c-094044329c27",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
if req.WithStatus {
pipeds[0].Status = webservice.PipedConnectionStatus_PIPED_CONNECTION_ONLINE
pipeds[1].Status = webservice.PipedConnectionStatus_PIPED_CONNECTION_ONLINE
pipeds[2].Status = webservice.PipedConnectionStatus_PIPED_CONNECTION_OFFLINE
}
return &webservice.ListPipedsResponse{
Pipeds: pipeds,
}, nil
}
func (a *FakeWebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) {
now := time.Now()
return &webservice.GetPipedResponse{
Piped: &webservice.Piped{
Id: "492220b1-c080-4781-9e55-7e278760e0ef",
Desc: "piped for debug 1",
ProjectId: fakeProjectID,
Version: "debug-version",
StartedAt: now.Add(-30 * time.Minute).Unix(),
CloudProviders: []*model.Piped_CloudProvider{
{
Name: "kubernetes-default",
Type: model.CloudProviderKubernetes.String(),
},
},
RepositoryIds: []string{
"piped-repo-1",
"piped-repo-2",
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}, nil
}
func (a *FakeWebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) {
return &webservice.AddApplicationResponse{}, nil
}
func (a *FakeWebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) {
return &webservice.EnableApplicationResponse{}, nil
}
func (a *FakeWebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) {
return &webservice.DisableApplicationResponse{}, nil
}
func (a *FakeWebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) {
now := time.Now()
fakeApplications := []*model.Application{
{
Id: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
Name: "debug-app",
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
CloudProvider: "kubernetes-default",
MostRecentlySuccessfulDeployment: &model.ApplicationDeploymentReference{
DeploymentId: "debug-deployment-id-01",
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Unix(),
},
Commander: "",
Timestamp: now.Unix(),
}, Version: "v0.1.0",
StartedAt: now.Add(-3 * 24 * time.Hour).Unix(),
CompletedAt: now.Add(-3 * 24 * time.Hour).Unix(),
},
SyncState: &model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_SYNCED,
ShortReason: "Short resson",
Reason: "Reason",
HeadDeploymentId: "debug-deployment-id-01",
Timestamp: now.Unix(),
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
return &webservice.ListApplicationsResponse{
Applications: fakeApplications,
}, nil
}
func (a *FakeWebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) {
return &webservice.SyncApplicationResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) {
now := time.Now()
application := model.Application{
Id: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
Name: "debug-app",
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
CloudProvider: "kubernetes-default",
MostRecentlySuccessfulDeployment: &model.ApplicationDeploymentReference{
DeploymentId: "debug-deployment-id-01",
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Unix(),
},
Commander: "",
Timestamp: now.Unix(),
},
Version: "v0.1.0",
StartedAt: now.Add(-3 * 24 * time.Hour).Unix(),
CompletedAt: now.Add(-3 * 24 * time.Hour).Unix(),
},
SyncState: &model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_SYNCED,
ShortReason: "Short resson",
Reason: "Reason",
HeadDeploymentId: "debug-deployment-id-01",
Timestamp: now.Unix(),
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetApplicationResponse{
Application: &application,
}, nil
}
func (a *FakeWebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) {
now := time.Now()
deploymentTime := now
fakeDeployments := make([]*model.Deployment, 15)
for i := 0; i < 15; i++ {
// 5 hour intervals
deploymentTime := deploymentTime.Add(time.Duration(-5*i) * time.Hour)
fakeDeployments[i] = &model.Deployment{
Id: fmt.Sprintf("debug-deployment-id-%02d", i),
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: deploymentTime.Unix(),
},
Commander: "",
Timestamp: deploymentTime.Unix(),
},
RunningCommitHash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Description: fmt.Sprintf("This deployment is debug-%02d", i),
Status: model.DeploymentStatus_DEPLOYMENT_SUCCESS,
Stages: []*model.PipelineStage{
{
Id: "fake-stage-id-0-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_RUNNING,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-1",
Name: model.StageK8sPrimaryRollout.String(),
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-2",
Name: model.StageK8sCanaryRollout.String(),
Index: 2,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_FAILURE,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-0",
Name: model.StageK8sCanaryClean.String(),
Desc: "waiting approval",
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-1",
Name: model.StageK8sCanaryClean.String(),
Desc: "approved by cakecatz",
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-3-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-2-0",
"fake-stage-id-2-1",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
CreatedAt: deploymentTime.Unix(),
UpdatedAt: deploymentTime.Unix(),
}
}
return &webservice.ListDeploymentsResponse{
Deployments: fakeDeployments,
}, nil
}
func (a *FakeWebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) {
now := time.Now()
resp := &model.Deployment{
Id: "debug-deployment-id-01",
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Add(-30 * time.Minute).Unix(),
},
Commander: "cakecatz",
Timestamp: now.Add(-30 * time.Minute).Unix(),
},
RunningCommitHash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Description: "This deployment is debug",
Status: model.DeploymentStatus_DEPLOYMENT_RUNNING,
Stages: []*model.PipelineStage{
{
Id: "fake-stage-id-0-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_RUNNING,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-1",
Name: model.StageK8sPrimaryRollout.String(),
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-2",
Name: model.StageK8sCanaryRollout.String(),
Index: 2,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_FAILURE,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-0",
Name: model.StageK8sCanaryClean.String(),
Desc: "waiting approval",
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-1",
Name: model.StageK8sCanaryClean.String(),
Desc: "approved by cakecatz",
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-3-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-2-0",
"fake-stage-id-2-1",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetDeploymentResponse{
Deployment: resp,
}, nil
}
func (a *FakeWebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) {
startTime := time.Now().Add(-10 * time.Minute)
resp := []*model.LogBlock{
{
Index: 1,
Log: "+ make build",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Unix(),
},
{
Index: 2,
Log: "bazelisk --output_base=/workspace/bazel_out build --config=ci -- //...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(5 * time.Second).Unix(),
},
{
Index: 3,
Log: "2020/06/01 08:52:07 Downloading https://releases.bazel.build/3.1.0/release/bazel-3.1.0-linux-x86_64...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(10 * time.Second).Unix(),
},
{
Index: 4,
Log: "Extracting Bazel installation...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(15 * time.Second).Unix(),
},
{
Index: 5,
Log: "Starting local Bazel server and connecting to it...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(20 * time.Second).Unix(),
},
{
Index: 6,
Log: "(08:52:14) Loading: 0 packages loaded",
Severity: model.LogSeverity_SUCCESS,
CreatedAt: startTime.Add(30 * time.Second).Unix(),
},
{
Index: 7,
Log: "(08:53:21) Analyzing: 157 targets (88 packages loaded, 0 targets configured)",
Severity: model.LogSeverity_SUCCESS,
CreatedAt: startTime.Add(35 * time.Second).Unix(),
},
{
Index: 8,
Log: "Error: Error building: logged 2 error(s)",
Severity: model.LogSeverity_ERROR,
CreatedAt: startTime.Add(45 * time.Second).Unix(),
},
}
return &webservice.GetStageLogResponse{
Blocks: resp,
}, nil
}
func (a *FakeWebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) {
return &webservice.CancelDeploymentResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) {
return &webservice.ApproveStageResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) {
now := time.Now()
snapshot := &model.ApplicationLiveStateSnapshot{
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
Kubernetes: &model.KubernetesApplicationLiveState{
Resources: []*model.KubernetesResourceState{
{
Id: "f2c832a3-1f5b-4982-8f6e-72345ecb3c82",
Name: "demo-application",
ApiVersion: "networking.k8s.io/v1beta1",
Kind: "Ingress",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "8423fb53-5170-4864-a7d2-b84f8d36cb02",
Name: "demo-application",
ApiVersion: "v1",
Kind: "Service",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
Name: "demo-application",
ApiVersion: "apps/v1",
Kind: "Deployment",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "8621f186-6641-4f7a-9be4-5983eb647f8d",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
Name: "demo-application-9504e8601a",
ApiVersion: "apps/v1",
Kind: "ReplicaSet",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "ae5d0031-1f63-4396-b929-fa9987d1e6de",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-7vrdw",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "f55c7891-ba25-44bb-bca4-ffbc16b0089f",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-vlgd5",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "c2a81415-5bbf-44e8-9101-98bbd636bbeb",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-tmwp5",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
},
Version: &model.ApplicationLiveStateVersion{
Index: 1,
Timestamp: now.Unix(),
},
}
return &webservice.GetApplicationLiveStateResponse{
Snapshot: snapshot,
}, nil
}
func (a *FakeWebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) {
now := time.Now()
cmd := model.Command{
Id: uuid.New().String(),
PipedId: "debug-piped",
ApplicationId: "debug-application-id",
DeploymentId: "debug-deployment-id",
Commander: "anonymous",
Status: model.CommandStatus_COMMAND_NOT_HANDLED_YET,
Type: model.Command_CANCEL_DEPLOYMENT,
CancelDeployment: &model.Command_CancelDeployment{
DeploymentId: "debug-deployment-id-01",
},
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetCommandResponse{
Command: &cmd,
}, nil
}
| 1 | 8,256 | `ctx` is unused in EnablePiped | pipe-cd-pipe | go |
@@ -57,7 +57,9 @@ class _Session(object):
return hashlib.sha256(auth_string + "@" +
_Session.__initial_salt).hexdigest()
- def __init__(self, token, phash):
+ def __init__(self, token, phash, user, authenticated):
+ self.authenticated = authenticated
+ self.user = user
self.token = token
self.persistent_hash = phash
self.last_access = datetime.now() | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Handles the allocation and destruction of privileged sessions associated
with a particular CodeChecker server.
"""
from datetime import datetime
import getpass
import hashlib
import json
import os
import shutil
import stat
import time
import uuid
import portalocker
from libcodechecker.logger import LoggerFactory
unsupported_methods = []
try:
from libcodechecker.libauth import cc_ldap
except ImportError:
unsupported_methods.append("ldap")
try:
from libcodechecker.libauth import cc_pam
except ImportError:
unsupported_methods.append("pam")
LOG = LoggerFactory.get_new_logger("SESSION MANAGER")
SESSION_COOKIE_NAME = "__ccPrivilegedAccessToken"
session_lifetimes = {}
class _Session(object):
"""A session for an authenticated, privileged client connection."""
# Create an initial salt from system environment for use with the session
# permanent persistency routine.
__initial_salt = hashlib.sha256(SESSION_COOKIE_NAME + "__" +
str(time.time()) + "__" +
os.urandom(16)).hexdigest()
@staticmethod
def calc_persistency_hash(auth_string):
"""Calculates a more secure persistency hash for the session. This
persistency hash is intended to be used for the "session recycle"
feature to prevent NAT endpoints from accidentally getting each
other's session."""
return hashlib.sha256(auth_string + "@" +
_Session.__initial_salt).hexdigest()
def __init__(self, token, phash):
self.token = token
self.persistent_hash = phash
self.last_access = datetime.now()
def still_valid(self, do_revalidate=False):
"""Returns if the session is still valid, and optionally revalidates
it. A session is valid in its soft-lifetime."""
if (datetime.now() - self.last_access).total_seconds() <= \
session_lifetimes["soft"] \
and (datetime.now() - self.last_access).total_seconds() <= \
session_lifetimes["hard"]:
# If the session is still valid within the "reuse enabled" (soft)
# past and the check comes from a real user access, we revalidate
# the session by extending its lifetime --- the user retains their
# data.
if do_revalidate:
self.revalidate()
# The session is still valid if it has been used in the past
# (length of "past" is up to server host).
return True
# If the session is older than the "soft" limit,
# the user needs to authenticate again.
return False
def still_reusable(self):
"""Returns whether the session is still reusable, ie. within its
hard lifetime: while a session is reusable, a valid authentication
from the session's user will return the user to the session."""
return (datetime.now() - self.last_access).total_seconds() <= \
session_lifetimes["hard"]
def revalidate(self):
if self.still_reusable():
# A session is only revalidated if it has yet to exceed its
# "hard" lifetime. After a session hasn't been used for this
# timeframe, it can NOT be resurrected at all --- the user needs
# to log in into a brand-new session.
self.last_access = datetime.now()
def check_file_owner_rw(file_to_check):
"""
Check the file permissions.
Return:
True if only the owner can read or write the file.
False if other users or groups can read or write the file.
"""
mode = os.stat(file_to_check)[stat.ST_MODE]
if mode & stat.S_IRGRP \
or mode & stat.S_IWGRP \
or mode & stat.S_IROTH \
or mode & stat.S_IWOTH:
LOG.warning("'{0}' is readable by users other than you!"
" This poses a risk of leaking sensitive"
" information, such as passwords, session tokens, etc.!\n"
"Please 'chmod 0600 {0}' so only you can access the file."
.format(file_to_check))
return False
return True
def load_session_cfg(session_cfg_file):
"""
Tries to load the session config file which should be a
valid json file, if loading fails returns an empty dict.
"""
scfg_dict = {}
try:
with open(session_cfg_file, 'r') as scfg:
scfg_dict = json.loads(scfg.read())
check_file_owner_rw(session_cfg_file)
except IOError:
LOG.debug('Failed to open user authentication file: ' +
session_cfg_file)
except ValueError as verr:
LOG.warning(verr)
LOG.warning('Not valid user authentication file: ' +
session_cfg_file)
return scfg_dict
class SessionManager:
CodeChecker_Workspace = None
__valid_sessions = []
__logins_since_prune = 0
def __init__(self):
LOG.debug('Loading session config')
# Check whether workspace's configuration exists.
session_cfg_file = os.path.join(SessionManager.CodeChecker_Workspace,
"session_config.json")
if not os.path.exists(session_cfg_file):
LOG.info("CodeChecker server's authentication example "
"configuration file created at " + session_cfg_file)
shutil.copyfile(os.path.join(os.environ['CC_PACKAGE_ROOT'],
"config", "session_config.json"),
session_cfg_file)
LOG.debug(session_cfg_file)
scfg_dict = {'authentication': {'enabled': False}}
scfg_dict.update(load_session_cfg(session_cfg_file))
self.__auth_config = scfg_dict["authentication"]
# If no methods are configured as enabled, disable authentication.
if scfg_dict["authentication"].get("enabled"):
found_auth_method = False
if "method_dictionary" in self.__auth_config and \
self.__auth_config["method_dictionary"].get("enabled"):
found_auth_method = True
if "method_ldap" in self.__auth_config and \
self.__auth_config["method_ldap"].get("enabled"):
if "ldap" not in unsupported_methods:
found_auth_method = True
else:
LOG.warning("LDAP authentication was enabled but "
"prerequisites are NOT installed on the system"
"... Disabling LDAP authentication.")
self.__auth_config["method_ldap"]["enabled"] = False
if "method_pam" in self.__auth_config and \
self.__auth_config["method_pam"].get("enabled"):
if "pam" not in unsupported_methods:
found_auth_method = True
else:
LOG.warning("PAM authentication was enabled but "
"prerequisites are NOT installed on the system"
"... Disabling PAM authentication.")
self.__auth_config["method_pam"]["enabled"] = False
#
if not found_auth_method:
LOG.warning("Authentication is enabled but no valid "
"authentication backends are configured... "
"Falling back to no authentication.")
self.__auth_config["enabled"] = False
session_lifetimes["soft"] = \
self.__auth_config.get("soft_expire") or 60
session_lifetimes["hard"] = \
self.__auth_config.get("session_lifetime") or 300
def isEnabled(self):
return self.__auth_config.get("enabled")
def getRealm(self):
return {
"realm": self.__auth_config.get("realm_name"),
"error": self.__auth_config.get("realm_error")
}
def __handle_validation(self, auth_string):
"""Validate an oncoming authorization request
against some authority controller."""
return self.__try_auth_dictionary(auth_string) \
or self.__try_auth_pam(auth_string) \
or self.__try_auth_ldap(auth_string)
def __is_method_enabled(self, method):
return method not in unsupported_methods and \
"method_" + method in self.__auth_config and \
self.__auth_config["method_" + method].get("enabled")
def __try_auth_dictionary(self, auth_string):
return self.__is_method_enabled("dictionary") and \
auth_string in \
self.__auth_config.get("method_dictionary").get("auths")
def __try_auth_pam(self, auth_string):
"""
Try to authenticate user based on the PAM configuration.
"""
if self.__is_method_enabled("pam"):
username, password = auth_string.split(":")
return cc_pam.auth_user(self.__auth_config["method_pam"],
username, password)
return False
def __try_auth_ldap(self, auth_string):
"""
Try to authenticate user to all the configured authorities.
"""
if self.__is_method_enabled("ldap"):
username, password = auth_string.split(":")
ldap_authorities = self.__auth_config["method_ldap"] \
.get("authorities")
for ldap_conf in ldap_authorities:
if cc_ldap.auth_user(ldap_conf, username, password):
return True
return False
def create_or_get_session(self, auth_string):
"""Create a new session for the given auth-string, if it is valid. If
an existing session is found, return that instead."""
if not self.__auth_config["enabled"]:
return None
self.__logins_since_prune += 1
if self.__logins_since_prune >= \
self.__auth_config["logins_until_cleanup"]:
self.__cleanup_sessions()
if self.__handle_validation(auth_string):
session_already = next(
(s for s
in SessionManager.__valid_sessions if s.still_reusable() and
s.persistent_hash ==
_Session.calc_persistency_hash(auth_string)),
None)
if session_already:
session_already.revalidate()
session = session_already
else:
# TODO: Use a more secure way for token generation?
token = uuid.UUID(bytes=os.urandom(16)).__str__().replace("-",
"")
session = _Session(token,
_Session.calc_persistency_hash(auth_string))
SessionManager.__valid_sessions.append(session)
return session.token
else:
return None
def is_valid(self, token, access=False):
"""Validates a given token (cookie) against
the known list of privileged sessions."""
if not self.isEnabled():
return True
else:
return any(_sess.token == token and _sess.still_valid(access)
for _sess in SessionManager.__valid_sessions)
def invalidate(self, token):
"""Remove a user's previous session from the store."""
for session in SessionManager.__valid_sessions[:]:
if session.token == token:
SessionManager.__valid_sessions.remove(session)
return True
return False
def __cleanup_sessions(self):
SessionManager.__valid_sessions = [s for s
in SessionManager.__valid_sessions
if s.still_reusable()]
self.__logins_since_prune = 0
class SessionManager_Client:
def __init__(self):
LOG.debug('Loading session config')
# Check whether user's configuration exists.
user_home = os.path.expanduser("~")
session_cfg_file = os.path.join(user_home,
".codechecker.passwords.json")
LOG.debug(session_cfg_file)
if not os.path.exists(session_cfg_file):
# Filenames were different pre-5.8.
old_file = os.path.join(user_home, ".codechecker_passwords.json")
if os.path.exists(old_file):
shutil.move(old_file, session_cfg_file)
scfg_dict = load_session_cfg(session_cfg_file)
if not scfg_dict.get("credentials"):
scfg_dict["credentials"] = {}
self.__save = scfg_dict
self.__autologin = scfg_dict.get("client_autologin") \
if "client_autologin" in scfg_dict else True
# Check and load token storage for user
self.token_file = os.path.join(user_home, ".codechecker.session.json")
LOG.debug(self.token_file)
if not os.path.exists(self.token_file):
# Filenames were different pre-5.8.
old_file = os.path.join(user_home,
".codechecker_{0}.session.json".format(
getpass.getuser()
))
if os.path.exists(old_file):
shutil.move(old_file, self.token_file)
if os.path.exists(self.token_file):
with open(self.token_file, 'r') as f:
input = json.loads(f.read())
self.__tokens = input.get("tokens")
check_file_owner_rw(self.token_file)
else:
with open(self.token_file, 'w') as f:
json.dump({'tokens': {}}, f)
os.chmod(self.token_file, stat.S_IRUSR | stat.S_IWUSR)
self.__tokens = {}
def is_autologin_enabled(self):
return self.__autologin
def getToken(self, host, port):
return self.__tokens.get("{0}:{1}".format(host, port))
def getAuthString(self, host, port):
ret = self.__save["credentials"].get("{0}:{1}".format(host, port))
if not ret:
ret = self.__save["credentials"].get(host)
if not ret:
ret = self.__save["credentials"].get("*:{0}".format(port))
if not ret:
ret = self.__save["credentials"].get("*")
return ret
def saveToken(self, host, port, token, destroy=False):
if destroy:
del self.__tokens["{0}:{1}".format(host, port)]
else:
self.__tokens["{0}:{1}".format(host, port)] = token
with open(self.token_file, 'w') as scfg:
portalocker.lock(scfg, portalocker.LOCK_EX)
json.dump({'tokens': self.__tokens}, scfg,
indent=2, sort_keys=True)
portalocker.unlock(scfg)
| 1 | 7,066 | Why do we have both a list of valid sessions and an instance variable if a session is destroyed? | Ericsson-codechecker | c |
@@ -258,10 +258,11 @@ func (au *accountUpdates) committedUpTo(rnd basics.Round) basics.Round {
// Keep track of how many changes to each account we flush to the
// account DB, so that we can drop the corresponding refcounts in
// au.accounts.
- flushcount := make(map[basics.Address]int)
+ var flushcount map[basics.Address]int
offset := uint64(newBase - au.dbRound)
err := au.dbs.wdb.Atomic(func(tx *sql.Tx) error {
+ flushcount = make(map[basics.Address]int)
for i := uint64(0); i < offset; i++ {
rnd := au.dbRound + basics.Round(i) + 1
err := accountsNewRound(tx, rnd, au.deltas[i], au.roundTotals[i+1].RewardsLevel, au.protos[i+1]) | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package ledger
import (
"database/sql"
"fmt"
"time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/logging"
)
// A modifiedAccount represents an account that has been modified since
// the persistent state stored in the account DB (i.e., in the range of
// rounds covered by the accountUpdates tracker).
type modifiedAccount struct {
// data stores the most recent AccountData for this modified
// account.
data basics.AccountData
// ndelta keeps track of how many times this account appears in
// accountUpdates.deltas. This is used to evict modifiedAccount
// entries when all changes to an account have been reflected in
// the account DB, and no outstanding modifications remain.
ndeltas int
}
type accountUpdates struct {
// Connection to the database.
dbs dbPair
// Prepared SQL statements for fast accounts DB lookups.
accountsq *accountsDbQueries
// dbRound is always exactly accountsRound(),
// cached to avoid SQL queries.
dbRound basics.Round
// deltas stores updates for every round after dbRound.
deltas []map[basics.Address]accountDelta
// accounts stores the most recent account state for every
// address that appears in deltas.
accounts map[basics.Address]modifiedAccount
// protos stores consensus parameters dbRound and every
// round after it; i.e., protos is one longer than deltas.
protos []config.ConsensusParams
// totals stores the totals for dbRound and every round after it;
// i.e., totals is one longer than deltas.
roundTotals []AccountTotals
// initAccounts specifies initial account values for database.
initAccounts map[basics.Address]basics.AccountData
// initProto specifies the initial consensus parameters.
initProto config.ConsensusParams
// log copied from ledger
log logging.Logger
// lastFlushTime is the time we last flushed updates to
// the accounts DB (bumping dbRound).
lastFlushTime time.Time
}
func (au *accountUpdates) loadFromDisk(l ledgerForTracker) error {
au.dbs = l.trackerDB()
au.log = l.trackerLog()
if au.initAccounts == nil {
return fmt.Errorf("accountUpdates.loadFromDisk: initAccounts not set")
}
err := au.dbs.wdb.Atomic(func(tx *sql.Tx) error {
var err0 error
err0 = accountsInit(tx, au.initAccounts, au.initProto)
if err0 != nil {
return err0
}
au.dbRound, err0 = accountsRound(tx)
if err0 != nil {
return err0
}
totals, err0 := accountsTotals(tx)
if err0 != nil {
return err0
}
au.roundTotals = []AccountTotals{totals}
return nil
})
if err != nil {
return err
}
au.accountsq, err = accountsDbInit(au.dbs.rdb.Handle)
if err != nil {
return err
}
hdr, err := l.BlockHdr(au.dbRound)
if err != nil {
return err
}
au.protos = []config.ConsensusParams{config.Consensus[hdr.CurrentProtocol]}
latest := l.Latest()
au.deltas = nil
au.accounts = make(map[basics.Address]modifiedAccount)
loaded := au.dbRound
for loaded < latest {
next := loaded + 1
blk, aux, err := l.blockAux(next)
if err != nil {
return err
}
delta, err := l.trackerEvalVerified(blk, aux)
if err != nil {
return err
}
au.newBlock(blk, delta)
loaded = next
}
return nil
}
func (au *accountUpdates) close() {
}
func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err error) {
if rnd < au.dbRound {
err = fmt.Errorf("round %d before dbRound %d", rnd, au.dbRound)
return
}
off := uint64(rnd - au.dbRound)
if off > uint64(len(au.deltas)) {
err = fmt.Errorf("round %d too high: dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
return
}
return off, nil
}
func (au *accountUpdates) lookup(rnd basics.Round, addr basics.Address, withRewards bool) (data basics.AccountData, err error) {
offset, err := au.roundOffset(rnd)
if err != nil {
return
}
offsetForRewards := offset
defer func() {
if withRewards {
totals := au.roundTotals[offsetForRewards]
proto := au.protos[offsetForRewards]
data = data.WithUpdatedRewards(proto, totals.RewardsLevel)
}
}()
// Check if this is the most recent round, in which case, we can
// use a cache of the most recent account state.
if offset == uint64(len(au.deltas)) {
macct, ok := au.accounts[addr]
if ok {
return macct.data, nil
}
} else {
// Check if the account has been updated recently. Traverse the deltas
// backwards to ensure that later updates take priority if present.
for offset > 0 {
offset--
d, ok := au.deltas[offset][addr]
if ok {
return d.new, nil
}
}
}
// No updates of this account in the in-memory deltas; use on-disk DB.
// The check in roundOffset() made sure the round is exactly the one
// present in the on-disk DB. As an optimization, we avoid creating
// a separate transaction here, and directly use a prepared SQL query
// against the database.
return au.accountsq.lookup(addr)
}
func (au *accountUpdates) allBalances(rnd basics.Round) (bals map[basics.Address]basics.AccountData, err error) {
offsetLimit, err := au.roundOffset(rnd)
if err != nil {
return
}
err = au.dbs.rdb.Atomic(func(tx *sql.Tx) error {
var err0 error
bals, err0 = accountsAll(tx)
return err0
})
if err != nil {
return
}
for offset := uint64(0); offset < offsetLimit; offset++ {
for addr, delta := range au.deltas[offset] {
bals[addr] = delta.new
}
}
return
}
func (au *accountUpdates) committedUpTo(rnd basics.Round) basics.Round {
lookback := basics.Round(au.protos[len(au.protos)-1].MaxBalLookback)
if rnd < lookback {
return 0
}
newBase := rnd - lookback
if newBase <= au.dbRound {
// Already forgotten
return au.dbRound
}
if newBase > au.dbRound+basics.Round(len(au.deltas)) {
au.log.Panicf("committedUpTo: block %d too far in the future, lookback %d, dbRound %d, deltas %d", rnd, lookback, au.dbRound, len(au.deltas))
}
// If we recently flushed, wait to aggregate some more blocks.
flushTime := time.Now()
if !flushTime.After(au.lastFlushTime.Add(5 * time.Second)) {
return au.dbRound
}
// Keep track of how many changes to each account we flush to the
// account DB, so that we can drop the corresponding refcounts in
// au.accounts.
flushcount := make(map[basics.Address]int)
offset := uint64(newBase - au.dbRound)
err := au.dbs.wdb.Atomic(func(tx *sql.Tx) error {
for i := uint64(0); i < offset; i++ {
rnd := au.dbRound + basics.Round(i) + 1
err := accountsNewRound(tx, rnd, au.deltas[i], au.roundTotals[i+1].RewardsLevel, au.protos[i+1])
if err != nil {
return err
}
for addr := range au.deltas[i] {
flushcount[addr] = flushcount[addr] + 1
}
}
return nil
})
if err != nil {
au.log.Warnf("unable to advance account snapshot: %v", err)
return au.dbRound
}
// Drop reference counts to modified accounts, and evict them
// from in-memory cache when no references remain.
for addr, cnt := range flushcount {
macct, ok := au.accounts[addr]
if !ok {
au.log.Panicf("inconsistency: flushed %d changes to %s, but not in au.accounts", cnt, addr)
}
if cnt > macct.ndeltas {
au.log.Panicf("inconsistency: flushed %d changes to %s, but au.accounts had %d", cnt, addr, macct.ndeltas)
}
macct.ndeltas -= cnt
if macct.ndeltas == 0 {
delete(au.accounts, addr)
} else {
au.accounts[addr] = macct
}
}
au.deltas = au.deltas[offset:]
au.protos = au.protos[offset:]
au.roundTotals = au.roundTotals[offset:]
au.dbRound = newBase
au.lastFlushTime = flushTime
return au.dbRound
}
func (au *accountUpdates) newBlock(blk bookkeeping.Block, delta stateDelta) {
proto := config.Consensus[blk.CurrentProtocol]
rnd := blk.Round()
if rnd <= au.latest() {
// Duplicate, ignore.
return
}
if rnd != au.latest()+1 {
au.log.Panicf("accountUpdates: newBlock %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
}
au.deltas = append(au.deltas, delta.accts)
au.protos = append(au.protos, proto)
var ot basics.OverflowTracker
newTotals := au.roundTotals[len(au.roundTotals)-1]
allBefore := newTotals.All()
newTotals.applyRewards(delta.hdr.RewardsLevel, &ot)
for addr, data := range delta.accts {
newTotals.delAccount(proto, data.old, &ot)
newTotals.addAccount(proto, data.new, &ot)
macct := au.accounts[addr]
macct.ndeltas++
macct.data = data.new
au.accounts[addr] = macct
}
if ot.Overflowed {
au.log.Panicf("accountUpdates: newBlock %d overflowed totals", rnd)
}
allAfter := newTotals.All()
if allBefore != allAfter {
au.log.Panicf("accountUpdates: sum of money changed from %d to %d", allBefore.Raw, allAfter.Raw)
}
au.roundTotals = append(au.roundTotals, newTotals)
}
func (au *accountUpdates) latest() basics.Round {
return au.dbRound + basics.Round(len(au.deltas))
}
func (au *accountUpdates) totals(rnd basics.Round) (totals AccountTotals, err error) {
offset, err := au.roundOffset(rnd)
if err != nil {
return
}
totals = au.roundTotals[offset]
return
}
| 1 | 36,265 | nit : I think that it would be "cleaner" to set the `flushcount` to nil in case of an error nice catch ;-) | algorand-go-algorand | go |
@@ -19,13 +19,12 @@
package org.phoenicis.repository.repositoryTypes;
import org.junit.Test;
-import org.phoenicis.repository.repositoryTypes.NullRepository;
import static org.junit.Assert.assertEquals;
public class NullRepositoryTest {
@Test
public void testNullRepositoryTest() {
- assertEquals(0, new NullRepository().fetchInstallableApplications().size());
+ assertEquals(null, new NullRepository().fetchInstallableApplications());
}
} | 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.repository.repositoryTypes;
import org.junit.Test;
import org.phoenicis.repository.repositoryTypes.NullRepository;
import static org.junit.Assert.assertEquals;
public class NullRepositoryTest {
@Test
public void testNullRepositoryTest() {
assertEquals(0, new NullRepository().fetchInstallableApplications().size());
}
} | 1 | 10,215 | You may want to use assertNull | PhoenicisOrg-phoenicis | java |
@@ -58,9 +58,12 @@ public class TracerTest {
public void shouldBeAbleToCreateATracer() {
List<SpanData> allSpans = new ArrayList<>();
Tracer tracer = createTracer(allSpans);
+ long timeStamp = 1593493828L;
try (Span span = tracer.getCurrentContext().createSpan("parent")) {
span.setAttribute("cheese", "gouda");
+ span.addEvent("Grating cheese");
+ span.addEvent("Melting cheese", timeStamp);
span.setStatus(Status.NOT_FOUND);
}
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.tracing.opentelemetry;
import io.opentelemetry.OpenTelemetry;
import io.opentelemetry.sdk.OpenTelemetrySdk;
import io.opentelemetry.sdk.trace.TracerSdkProvider;
import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.sdk.trace.export.SimpleSpansProcessor;
import io.opentelemetry.sdk.trace.export.SpanExporter;
import org.junit.Test;
import org.openqa.selenium.grid.web.CombinedHandler;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.http.Routable;
import org.openqa.selenium.remote.http.Route;
import org.openqa.selenium.remote.tracing.HttpTracing;
import org.openqa.selenium.remote.tracing.Span;
import org.openqa.selenium.remote.tracing.Status;
import org.openqa.selenium.remote.tracing.Tracer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.stream.Collectors;
import static org.assertj.core.api.Assertions.assertThat;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import static org.openqa.selenium.remote.tracing.HttpTracing.newSpanAsChildOf;
public class TracerTest {
@Test
public void shouldBeAbleToCreateATracer() {
List<SpanData> allSpans = new ArrayList<>();
Tracer tracer = createTracer(allSpans);
try (Span span = tracer.getCurrentContext().createSpan("parent")) {
span.setAttribute("cheese", "gouda");
span.setStatus(Status.NOT_FOUND);
}
Set<SpanData> values = allSpans.stream()
.filter(data -> data.getAttributes().containsKey("cheese"))
.collect(Collectors.toSet());
assertThat(values).hasSize(1);
assertThat(values).element(0)
.extracting(SpanData::getStatus).isEqualTo(io.opentelemetry.trace.Status.NOT_FOUND);
assertThat(values).element(0)
.extracting(el -> el.getAttributes().get("cheese").getStringValue()).isEqualTo("gouda");
}
@Test
public void nestingSpansInTheSameThreadShouldWork() {
List<SpanData> allSpans = new ArrayList<>();
Tracer tracer = createTracer(allSpans);
try (Span parent = tracer.getCurrentContext().createSpan("parent")) {
try (Span child = parent.createSpan("child")) {
child.setAttribute("cheese", "camembert");
}
}
SpanData parent = allSpans.stream().filter(data -> data.getName().equals("parent"))
.findFirst().orElseThrow(NoSuchElementException::new);
SpanData child = allSpans.stream().filter(data -> data.getName().equals("child"))
.findFirst().orElseThrow(NoSuchElementException::new);
assertThat(child.getParentSpanId()).isEqualTo(parent.getSpanId());
}
@Test
public void nestingSpansFromDifferentThreadsIsFineToo() throws ExecutionException, InterruptedException {
List<SpanData> allSpans = new ArrayList<>();
Tracer tracer = createTracer(allSpans);
try (Span parent = tracer.getCurrentContext().createSpan("parent")) {
Future<?> future = Executors.newSingleThreadExecutor().submit(() -> {
try (Span child = parent.createSpan("child")) {
child.setAttribute("cheese", "gruyere");
}
});
future.get();
}
SpanData parent = allSpans.stream().filter(data -> data.getName().equals("parent"))
.findFirst().orElseThrow(NoSuchElementException::new);
SpanData child = allSpans.stream().filter(data -> data.getName().equals("child"))
.findFirst().orElseThrow(NoSuchElementException::new);
assertThat(child.getParentSpanId()).isEqualTo(parent.getSpanId());
}
@Test
public void currentSpanIsKeptOnTracerCorrectlyWithinSameThread() {
List<SpanData> allSpans = new ArrayList<>();
Tracer tracer = createTracer(allSpans);
try (Span parent = tracer.getCurrentContext().createSpan("parent")) {
assertThat(parent.getId()).isEqualTo(tracer.getCurrentContext().getId());
try (Span child = parent.createSpan("child")) {
assertThat(child.getId()).isEqualTo(tracer.getCurrentContext().getId());
}
assertThat(parent.getId()).isEqualTo(tracer.getCurrentContext().getId());
}
}
@Test
public void currentSpanIsKeptOnTracerCorrectlyBetweenThreads() throws ExecutionException, InterruptedException {
List<SpanData> allSpans = new ArrayList<>();
Tracer tracer = createTracer(allSpans);
try (Span parent = tracer.getCurrentContext().createSpan("parent")) {
assertThat(parent.getId()).isEqualTo(tracer.getCurrentContext().getId());
Future<?> future = Executors.newSingleThreadExecutor().submit(() -> {
Span child = null;
try {
child = parent.createSpan("child");
assertThat(child.getId()).isEqualTo(tracer.getCurrentContext().getId());
} finally {
assert child != null;
child.close();
}
// At this point, the parent span is undefind, but shouldn't be null
assertThat(parent.getId()).isNotEqualTo(tracer.getCurrentContext().getId());
assertThat(child.getId()).isNotEqualTo(tracer.getCurrentContext().getId());
assertThat(tracer.getCurrentContext().getId()).isNotNull();
});
future.get();
assertThat(parent.getId()).isEqualTo(tracer.getCurrentContext().getId());
}
}
@Test
public void cleverShenanigansRepresentingWhatWeSeeInTheRouter() {
List<SpanData> allSpans = new ArrayList<>();
Tracer tracer = createTracer(allSpans);
CombinedHandler handler = new CombinedHandler();
ExecutorService executors = Executors.newCachedThreadPool();
handler.addHandler(Route.get("/status").to(() -> req -> {
try (Span span = HttpTracing.newSpanAsChildOf(tracer, req, "status")) {
executors.submit(span.wrap(() -> new HashSet<>(Arrays.asList("cheese", "peas")))).get();
CompletableFuture<String> toReturn = new CompletableFuture<>();
executors.submit(() -> {
try {
HttpRequest cheeseReq = new HttpRequest(GET, "/cheeses");
HttpTracing.inject(tracer, span, cheeseReq);
handler.execute(cheeseReq);
toReturn.complete("nom, nom, nom");
} catch (RuntimeException e) {
toReturn.completeExceptionally(e);
}
});
toReturn.get();
} catch (Exception e) {
throw new RuntimeException(e);
}
return new HttpResponse();
}));
handler.addHandler(Route.get("/cheeses").to(() -> req -> new HttpResponse()));
Routable routable = handler.with(delegate -> req -> {
try (Span span = newSpanAsChildOf(tracer, req, "httpclient.execute")) {
return delegate.execute(req);
}
});
routable.execute(new HttpRequest(GET, "/"));
}
private Tracer createTracer(List<SpanData> exportTo) {
TracerSdkProvider provider = OpenTelemetrySdk.getTracerProvider();
provider.addSpanProcessor(SimpleSpansProcessor.create(new SpanExporter() {
@Override
public ResultCode export(Collection<SpanData> spans) {
exportTo.addAll(spans);
return ResultCode.SUCCESS;
}
@Override public ResultCode flush() {
return ResultCode.SUCCESS;
}
@Override
public void shutdown() {
}
}));
io.opentelemetry.trace.Tracer otTracer = provider.get("get");
return new OpenTelemetryTracer(
otTracer,
OpenTelemetry.getPropagators().getHttpTextFormat());
}
}
| 1 | 17,762 | Break out tests for events into their own tests rather than placing them in other ones. That makes it easier for us to figure out where problems lie and to do a TDD-driven implementation over new APIs. | SeleniumHQ-selenium | py |
@@ -224,7 +224,6 @@ struct wlr_output *wlr_wl_output_create(struct wlr_backend *_backend) {
wlr_output->width = 640;
wlr_output->height = 480;
- wlr_output->scale = 1;
strncpy(wlr_output->make, "wayland", sizeof(wlr_output->make));
strncpy(wlr_output->model, "wayland", sizeof(wlr_output->model));
snprintf(wlr_output->name, sizeof(wlr_output->name), "WL-%zd", | 1 | #include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <wayland-client.h>
#include <GLES3/gl3.h>
#include <wlr/interfaces/wlr_output.h>
#include <wlr/util/log.h>
#include "backend/wayland.h"
#include "xdg-shell-unstable-v6-client-protocol.h"
int os_create_anonymous_file(off_t size);
static struct wl_callback_listener frame_listener;
static void surface_frame_callback(void *data, struct wl_callback *cb, uint32_t time) {
struct wlr_output *wlr_output = data;
assert(wlr_output);
wl_signal_emit(&wlr_output->events.frame, wlr_output);
wl_callback_destroy(cb);
}
static struct wl_callback_listener frame_listener = {
.done = surface_frame_callback
};
static void wlr_wl_output_make_current(struct wlr_output *_output) {
struct wlr_wl_backend_output *output = (struct wlr_wl_backend_output *)_output;
if (!eglMakeCurrent(output->backend->egl.display,
output->egl_surface, output->egl_surface,
output->backend->egl.context)) {
wlr_log(L_ERROR, "eglMakeCurrent failed: %s", egl_error());
}
}
static void wlr_wl_output_swap_buffers(struct wlr_output *_output) {
struct wlr_wl_backend_output *output = (struct wlr_wl_backend_output *)_output;
output->frame_callback = wl_surface_frame(output->surface);
wl_callback_add_listener(output->frame_callback, &frame_listener, output);
if (!eglSwapBuffers(output->backend->egl.display, output->egl_surface)) {
wlr_log(L_ERROR, "eglSwapBuffers failed: %s", egl_error());
}
}
static void wlr_wl_output_transform(struct wlr_output *_output,
enum wl_output_transform transform) {
struct wlr_wl_backend_output *output = (struct wlr_wl_backend_output *)_output;
output->wlr_output.transform = transform;
}
static bool wlr_wl_output_set_cursor(struct wlr_output *_output,
const uint8_t *buf, int32_t stride, uint32_t width, uint32_t height) {
struct wlr_wl_backend_output *output = (struct wlr_wl_backend_output *)_output;
struct wlr_wl_backend *backend = output->backend;
stride *= 4; // stride is given in pixels, we need it in bytes
if (!backend->shm || !backend->pointer) {
wlr_log(L_INFO, "cannot set cursor, no shm or pointer");
return false;
}
if (!output->cursor_surface) {
output->cursor_surface = wl_compositor_create_surface(output->backend->compositor);
}
uint32_t size = stride * height;
if (output->cursor_buf_size != size) {
if (output->cursor_buffer) {
wl_buffer_destroy(output->cursor_buffer);
}
if (size > output->cursor_buf_size) {
if (output->cursor_pool) {
wl_shm_pool_destroy(output->cursor_pool);
output->cursor_pool = NULL;
munmap(output->cursor_data, output->cursor_buf_size);
}
}
if (!output->cursor_pool) {
int fd = os_create_anonymous_file(size);
if (fd < 0) {
wlr_log_errno(L_INFO, "creating anonymous file for cursor buffer failed");
return false;
}
output->cursor_data = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (output->cursor_data == MAP_FAILED) {
close(fd);
wlr_log_errno(L_INFO, "mmap failed");
return false;
}
output->cursor_pool = wl_shm_create_pool(backend->shm, fd, size);
close(fd);
}
output->cursor_buffer = wl_shm_pool_create_buffer(output->cursor_pool,
0, width, height, stride, WL_SHM_FORMAT_ARGB8888);
output->cursor_buf_size = size;
}
memcpy(output->cursor_data, buf, size);
wl_surface_attach(output->cursor_surface, output->cursor_buffer, 0, 0);
wl_surface_damage(output->cursor_surface, 0, 0, width, height);
wl_surface_commit(output->cursor_surface);
wlr_wl_output_update_cursor(output, output->enter_serial);
return true;
}
static void wlr_wl_output_destroy(struct wlr_output *_output) {
struct wlr_wl_backend_output *output = (struct wlr_wl_backend_output *)_output;
wl_signal_emit(&output->backend->backend.events.output_remove, &output->wlr_output);
if (output->cursor_buf_size != 0) {
assert(output->cursor_data);
assert(output->cursor_buffer);
assert(output->cursor_pool);
wl_buffer_destroy(output->cursor_buffer);
munmap(output->cursor_data, output->cursor_buf_size);
wl_shm_pool_destroy(output->cursor_pool);
}
if (output->cursor_surface) {
wl_surface_destroy(output->cursor_surface);
}
if (output->frame_callback) {
wl_callback_destroy(output->frame_callback);
}
eglDestroySurface(output->backend->egl.display, output->surface);
wl_egl_window_destroy(output->egl_window);
zxdg_toplevel_v6_destroy(output->xdg_toplevel);
zxdg_surface_v6_destroy(output->xdg_surface);
wl_surface_destroy(output->surface);
free(output);
}
void wlr_wl_output_update_cursor(struct wlr_wl_backend_output *output, uint32_t serial) {
if (output->cursor_surface && output->backend->pointer && serial) {
wl_pointer_set_cursor(output->backend->pointer, serial,
output->cursor_surface, 0, 0);
}
}
bool wlr_wl_output_move_cursor(struct wlr_output *_output, int x, int y) {
// TODO: only return true if x == current x and y == current y
return true;
}
static struct wlr_output_impl output_impl = {
.transform = wlr_wl_output_transform,
.destroy = wlr_wl_output_destroy,
.make_current = wlr_wl_output_make_current,
.swap_buffers = wlr_wl_output_swap_buffers,
.set_cursor = wlr_wl_output_set_cursor,
.move_cursor = wlr_wl_output_move_cursor
};
static void xdg_surface_handle_configure(void *data, struct zxdg_surface_v6 *xdg_surface,
uint32_t serial) {
struct wlr_wl_backend_output *output = data;
assert(output && output->xdg_surface == xdg_surface);
zxdg_surface_v6_ack_configure(xdg_surface, serial);
// nothing else?
}
static struct zxdg_surface_v6_listener xdg_surface_listener = {
.configure = xdg_surface_handle_configure,
};
static void xdg_toplevel_handle_configure(void *data, struct zxdg_toplevel_v6 *xdg_toplevel,
int32_t width, int32_t height, struct wl_array *states) {
struct wlr_wl_backend_output *output = data;
assert(output && output->xdg_toplevel == xdg_toplevel);
if (width == 0 && height == 0) {
return;
}
// loop over states for maximized etc?
wl_egl_window_resize(output->egl_window, width, height, 0, 0);
output->wlr_output.width = width;
output->wlr_output.height = height;
wlr_output_update_matrix(&output->wlr_output);
wl_signal_emit(&output->wlr_output.events.resolution, output);
}
static void xdg_toplevel_handle_close(void *data, struct zxdg_toplevel_v6 *xdg_toplevel) {
struct wlr_wl_backend_output *output = data;
assert(output && output->xdg_toplevel == xdg_toplevel);
wl_display_terminate(output->backend->local_display);
}
static struct zxdg_toplevel_v6_listener xdg_toplevel_listener = {
.configure = xdg_toplevel_handle_configure,
.close = xdg_toplevel_handle_close,
};
struct wlr_output *wlr_wl_output_create(struct wlr_backend *_backend) {
assert(wlr_backend_is_wl(_backend));
struct wlr_wl_backend *backend = (struct wlr_wl_backend *)_backend;
if (!backend->remote_display) {
++backend->requested_outputs;
return NULL;
}
struct wlr_wl_backend_output *output;
if (!(output = calloc(sizeof(struct wlr_wl_backend_output), 1))) {
wlr_log(L_ERROR, "Failed to allocate wlr_wl_backend_output");
return NULL;
}
wlr_output_init(&output->wlr_output, &output_impl);
struct wlr_output *wlr_output = &output->wlr_output;
wlr_output->width = 640;
wlr_output->height = 480;
wlr_output->scale = 1;
strncpy(wlr_output->make, "wayland", sizeof(wlr_output->make));
strncpy(wlr_output->model, "wayland", sizeof(wlr_output->model));
snprintf(wlr_output->name, sizeof(wlr_output->name), "WL-%zd",
backend->outputs->length + 1);
wlr_output_update_matrix(wlr_output);
output->backend = backend;
output->surface = wl_compositor_create_surface(backend->compositor);
if (!output->surface) {
wlr_log_errno(L_ERROR, "Could not create output surface");
goto error;
}
output->xdg_surface =
zxdg_shell_v6_get_xdg_surface(backend->shell, output->surface);
if (!output->xdg_surface) {
wlr_log_errno(L_ERROR, "Could not get xdg surface");
goto error;
}
output->xdg_toplevel =
zxdg_surface_v6_get_toplevel(output->xdg_surface);
if (!output->xdg_toplevel) {
wlr_log_errno(L_ERROR, "Could not get xdg toplevel");
goto error;
}
zxdg_toplevel_v6_set_app_id(output->xdg_toplevel, "wlroots");
zxdg_toplevel_v6_set_title(output->xdg_toplevel, "wlroots");
zxdg_surface_v6_add_listener(output->xdg_surface,
&xdg_surface_listener, output);
zxdg_toplevel_v6_add_listener(output->xdg_toplevel,
&xdg_toplevel_listener, output);
wl_surface_commit(output->surface);
output->egl_window = wl_egl_window_create(output->surface,
wlr_output->width, wlr_output->height);
output->egl_surface = wlr_egl_create_surface(&backend->egl, output->egl_window);
wl_display_roundtrip(output->backend->remote_display);
// start rendering loop per callbacks by rendering first frame
if (!eglMakeCurrent(output->backend->egl.display,
output->egl_surface, output->egl_surface,
output->backend->egl.context)) {
wlr_log(L_ERROR, "eglMakeCurrent failed: %s", egl_error());
goto error;
}
glViewport(0, 0, wlr_output->width, wlr_output->height);
glClearColor(1.0, 1.0, 1.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
output->frame_callback = wl_surface_frame(output->surface);
wl_callback_add_listener(output->frame_callback, &frame_listener, output);
if (!eglSwapBuffers(output->backend->egl.display, output->egl_surface)) {
wlr_log(L_ERROR, "eglSwapBuffers failed: %s", egl_error());
goto error;
}
if (list_add(backend->outputs, wlr_output) == -1) {
wlr_log(L_ERROR, "Allocation failed");
goto error;
}
wlr_output_create_global(wlr_output, backend->local_display);
wl_signal_emit(&backend->backend.events.output_add, wlr_output);
return wlr_output;
error:
wlr_output_destroy(&output->wlr_output);
return NULL;
}
| 1 | 7,824 | Why did you remove this? | swaywm-wlroots | c |
@@ -72,7 +72,12 @@ func mapPort(m portmap.Interface, c chan struct{}, protocol string, extPort, int
}
}()
for {
- addMapping(m, protocol, extPort, intPort, name, publisher)
+ err := addMapping(m, protocol, extPort, intPort, name, publisher)
+ if err != nil {
+ log.Infof("%s, Mapping for port %d failed: %s", logPrefix, extPort, err)
+ } else {
+ log.Info("%s, Mapped network port: %d", logPrefix, extPort)
+ }
select {
case <-c:
return | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mapping
import (
"time"
"github.com/mysteriumnetwork/node/nat/traversal"
log "github.com/cihub/seelog"
portmap "github.com/ethereum/go-ethereum/p2p/nat"
)
const logPrefix = "[port mapping] "
const (
mapTimeout = 20 * time.Minute
mapUpdateInterval = 15 * time.Minute
)
// Publisher is responsible for publishing given events
type Publisher interface {
Publish(topic string, args ...interface{})
}
// GetPortMappingFunc returns PortMapping function if service is behind NAT
func GetPortMappingFunc(pubIP, outIP, protocol string, port int, description string, publisher Publisher) func() {
if pubIP != outIP {
return PortMapping(protocol, port, description, publisher)
}
return func() {}
}
// PortMapping maps given port of given protocol from external IP on a gateway to local machine internal IP
// 'name' denotes rule name added on a gateway.
func PortMapping(protocol string, port int, name string, publisher Publisher) func() {
mapperQuit := make(chan struct{})
go mapPort(portmap.Any(),
mapperQuit,
protocol,
port,
port,
name,
publisher)
return func() { close(mapperQuit) }
}
// mapPort adds a port mapping on m and keeps it alive until c is closed.
// This function is typically invoked in its own goroutine.
func mapPort(m portmap.Interface, c chan struct{}, protocol string, extPort, intPort int, name string, publisher Publisher) {
defer func() {
log.Debug(logPrefix, "Deleting port mapping for port: ", extPort)
if err := m.DeleteMapping(protocol, extPort, intPort); err != nil {
log.Debug(logPrefix, "Couldn't delete port mapping: ", err)
}
}()
for {
addMapping(m, protocol, extPort, intPort, name, publisher)
select {
case <-c:
return
case <-time.After(mapUpdateInterval):
}
}
}
func addMapping(m portmap.Interface, protocol string, extPort, intPort int, name string, publisher Publisher) {
if err := m.AddMapping(protocol, extPort, intPort, name, mapTimeout); err != nil {
log.Debugf("%s, Couldn't add port mapping for port %d: %v, retrying with permanent lease", logPrefix, extPort, err)
if err := m.AddMapping(protocol, extPort, intPort, name, 0); err != nil {
// some gateways support only permanent leases
publisher.Publish(traversal.EventTopic, traversal.EventFailure)
log.Debugf("%s Couldn't add port mapping for port %d: %v", logPrefix, extPort, err)
return
}
}
publisher.Publish(traversal.EventTopic, traversal.EventSuccess)
log.Info(logPrefix, "Mapped network port:", extPort)
}
| 1 | 13,714 | Why we need another error printouts? We already have it inside addMapping.. | mysteriumnetwork-node | go |
@@ -215,7 +215,7 @@ bool CoreChecks::ValidateFsOutputsAgainstDynamicRenderingRenderPass(SHADER_MODUL
const bool alpha_to_coverage_enabled = pipeline->create_info.graphics.pMultisampleState != NULL &&
pipeline->create_info.graphics.pMultisampleState->alphaToCoverageEnable == VK_TRUE;
- for (uint32_t location = 0; location < pipeline->rp_state->dynamic_rendering_pipeline_create_info.colorAttachmentCount; ++location) {
+ for (uint32_t location = 0; location < location_map.size(); ++location) {
const auto output = location_map[location].output;
if (!output && pipeline->attachments[location].colorWriteMask != 0) { | 1 | /* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (C) 2015-2021 Google Inc.
* Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Chris Forbes <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Tobias Hector <[email protected]>
*/
#include "shader_validation.h"
#include <cassert>
#include <cinttypes>
#include <cmath>
#include <sstream>
#include <string>
#include <vector>
#include <spirv/unified1/spirv.hpp>
#include "vk_enum_string_helper.h"
#include "vk_layer_data.h"
#include "vk_layer_utils.h"
#include "chassis.h"
#include "core_validation.h"
#include "spirv_grammar_helper.h"
#include "xxhash.h"
static shader_stage_attributes shader_stage_attribs[] = {
{"vertex shader", false, false, VK_SHADER_STAGE_VERTEX_BIT},
{"tessellation control shader", true, true, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT},
{"tessellation evaluation shader", true, false, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT},
{"geometry shader", true, false, VK_SHADER_STAGE_GEOMETRY_BIT},
{"fragment shader", false, false, VK_SHADER_STAGE_FRAGMENT_BIT},
};
static const spirv_inst_iter GetBaseTypeIter(SHADER_MODULE_STATE const *src, uint32_t type) {
const auto &insn = src->get_def(type);
const uint32_t base_insn_id = src->GetBaseType(insn);
return src->get_def(base_insn_id);
}
static bool BaseTypesMatch(SHADER_MODULE_STATE const *a, SHADER_MODULE_STATE const *b, const spirv_inst_iter &a_base_insn,
const spirv_inst_iter &b_base_insn) {
const uint32_t a_opcode = a_base_insn.opcode();
const uint32_t b_opcode = b_base_insn.opcode();
if (a_opcode == b_opcode) {
if (a_opcode == spv::OpTypeInt) {
// Match width and signedness
return a_base_insn.word(2) == b_base_insn.word(2) && a_base_insn.word(3) == b_base_insn.word(3);
} else if (a_opcode == spv::OpTypeFloat) {
// Match width
return a_base_insn.word(2) == b_base_insn.word(2);
} else if (a_opcode == spv::OpTypeStruct) {
// Match on all element types
if (a_base_insn.len() != b_base_insn.len()) {
return false; // Structs cannot match if member counts differ
}
for (uint32_t i = 2; i < a_base_insn.len(); i++) {
const auto &c_base_insn = GetBaseTypeIter(a, a_base_insn.word(i));
const auto &d_base_insn = GetBaseTypeIter(b, b_base_insn.word(i));
if (!BaseTypesMatch(a, b, c_base_insn, d_base_insn)) {
return false;
}
}
return true;
}
}
return false;
}
static bool TypesMatch(SHADER_MODULE_STATE const *a, SHADER_MODULE_STATE const *b, uint32_t a_type, uint32_t b_type) {
const auto &a_base_insn = GetBaseTypeIter(a, a_type);
const auto &b_base_insn = GetBaseTypeIter(b, b_type);
return BaseTypesMatch(a, b, a_base_insn, b_base_insn);
}
static unsigned GetLocationsConsumedByFormat(VkFormat format) {
switch (format) {
case VK_FORMAT_R64G64B64A64_SFLOAT:
case VK_FORMAT_R64G64B64A64_SINT:
case VK_FORMAT_R64G64B64A64_UINT:
case VK_FORMAT_R64G64B64_SFLOAT:
case VK_FORMAT_R64G64B64_SINT:
case VK_FORMAT_R64G64B64_UINT:
return 2;
default:
return 1;
}
}
static unsigned GetFormatType(VkFormat fmt) {
if (FormatIsSINT(fmt)) return FORMAT_TYPE_SINT;
if (FormatIsUINT(fmt)) return FORMAT_TYPE_UINT;
// Formats such as VK_FORMAT_D16_UNORM_S8_UINT are both
if (FormatIsDepthAndStencil(fmt)) return FORMAT_TYPE_FLOAT | FORMAT_TYPE_UINT;
if (fmt == VK_FORMAT_UNDEFINED) return 0;
// everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader.
return FORMAT_TYPE_FLOAT;
}
static uint32_t GetShaderStageId(VkShaderStageFlagBits stage) {
uint32_t bit_pos = uint32_t(u_ffs(stage));
return bit_pos - 1;
}
bool CoreChecks::ValidateViConsistency(VkPipelineVertexInputStateCreateInfo const *vi) const {
// Walk the binding descriptions, which describe the step rate and stride of each vertex buffer. Each binding should
// be specified only once.
layer_data::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
bool skip = false;
for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
auto desc = &vi->pVertexBindingDescriptions[i];
auto &binding = bindings[desc->binding];
if (binding) {
// TODO: "VUID-VkGraphicsPipelineCreateInfo-pStages-00742" perhaps?
skip |= LogError(device, kVUID_Core_Shader_InconsistentVi, "Duplicate vertex input binding descriptions for binding %d",
desc->binding);
} else {
binding = desc;
}
}
return skip;
}
bool CoreChecks::ValidateViAgainstVsInputs(VkPipelineVertexInputStateCreateInfo const *vi, SHADER_MODULE_STATE const *vs,
spirv_inst_iter entrypoint) const {
bool skip = false;
const auto inputs = vs->CollectInterfaceByLocation(entrypoint, spv::StorageClassInput, false);
// Build index by location
std::map<uint32_t, const VkVertexInputAttributeDescription *> attribs;
if (vi) {
for (uint32_t i = 0; i < vi->vertexAttributeDescriptionCount; ++i) {
const auto num_locations = GetLocationsConsumedByFormat(vi->pVertexAttributeDescriptions[i].format);
for (uint32_t j = 0; j < num_locations; ++j) {
attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
}
}
}
struct AttribInputPair {
const VkVertexInputAttributeDescription *attrib = nullptr;
const interface_var *input = nullptr;
};
std::map<uint32_t, AttribInputPair> location_map;
for (const auto &attrib_it : attribs) location_map[attrib_it.first].attrib = attrib_it.second;
for (const auto &input_it : inputs) location_map[input_it.first.first].input = &input_it.second;
for (const auto &location_it : location_map) {
const auto location = location_it.first;
const auto attrib = location_it.second.attrib;
const auto input = location_it.second.input;
if (attrib && !input) {
skip |= LogPerformanceWarning(vs->vk_shader_module(), kVUID_Core_Shader_OutputNotConsumed,
"Vertex attribute at location %" PRIu32 " not consumed by vertex shader", location);
} else if (!attrib && input) {
skip |= LogError(vs->vk_shader_module(), kVUID_Core_Shader_InputNotProduced,
"Vertex shader consumes input at location %" PRIu32 " but not provided", location);
} else if (attrib && input) {
const auto attrib_type = GetFormatType(attrib->format);
const auto input_type = vs->GetFundamentalType(input->type_id);
// Type checking
if (!(attrib_type & input_type)) {
skip |= LogError(vs->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch,
"Attribute type of `%s` at location %" PRIu32 " does not match vertex shader input type of `%s`",
string_VkFormat(attrib->format), location, vs->DescribeType(input->type_id).c_str());
}
} else { // !attrib && !input
assert(false); // at least one exists in the map
}
}
return skip;
}
bool CoreChecks::ValidateFsOutputsAgainstDynamicRenderingRenderPass(SHADER_MODULE_STATE const* fs, spirv_inst_iter entrypoint,
PIPELINE_STATE const* pipeline) const {
bool skip = false;
struct Attachment {
const interface_var* output = nullptr;
};
std::map<uint32_t, Attachment> location_map;
// TODO: dual source blend index (spv::DecIndex, zero if not provided)
const auto outputs = fs->CollectInterfaceByLocation(entrypoint, spv::StorageClassOutput, false);
for (const auto& output_it : outputs) {
auto const location = output_it.first.first;
location_map[location].output = &output_it.second;
}
const bool alpha_to_coverage_enabled = pipeline->create_info.graphics.pMultisampleState != NULL &&
pipeline->create_info.graphics.pMultisampleState->alphaToCoverageEnable == VK_TRUE;
for (uint32_t location = 0; location < pipeline->rp_state->dynamic_rendering_pipeline_create_info.colorAttachmentCount; ++location) {
const auto output = location_map[location].output;
if (!output && pipeline->attachments[location].colorWriteMask != 0) {
skip |= LogWarning(fs->vk_shader_module(), kVUID_Core_Shader_InputNotProduced,
"Attachment %" PRIu32
" not written by fragment shader; undefined values will be written to attachment",
location);
} else if (output) {
auto format = pipeline->rp_state->dynamic_rendering_pipeline_create_info.pColorAttachmentFormats[location];
const auto attachment_type = GetFormatType(format);
const auto output_type = fs->GetFundamentalType(output->type_id);
// Type checking
if (!(output_type & attachment_type)) {
skip |=
LogWarning(fs->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch,
"Attachment %" PRIu32
" of type `%s` does not match fragment shader output type of `%s`; resulting values are undefined",
location, string_VkFormat(format), fs->DescribeType(output->type_id).c_str());
}
}
}
const auto output_zero = location_map.count(0) ? location_map[0].output : nullptr;
bool location_zero_has_alpha = output_zero && fs->get_def(output_zero->type_id) != fs->end() &&
fs->GetComponentsConsumedByType(output_zero->type_id, false) == 4;
if (alpha_to_coverage_enabled && !location_zero_has_alpha) {
skip |= LogError(fs->vk_shader_module(), kVUID_Core_Shader_NoAlphaAtLocation0WithAlphaToCoverage,
"fragment shader doesn't declare alpha output at location 0 even though alpha to coverage is enabled.");
}
return skip;
}
bool CoreChecks::ValidateFsOutputsAgainstRenderPass(SHADER_MODULE_STATE const *fs, spirv_inst_iter entrypoint,
PIPELINE_STATE const *pipeline, uint32_t subpass_index) const {
bool skip = false;
struct Attachment {
const VkAttachmentReference2 *reference = nullptr;
const VkAttachmentDescription2 *attachment = nullptr;
const interface_var *output = nullptr;
};
std::map<uint32_t, Attachment> location_map;
if (pipeline->rp_state && !pipeline->rp_state->use_dynamic_rendering) {
const auto rpci = pipeline->rp_state->createInfo.ptr();
const auto subpass = rpci->pSubpasses[subpass_index];
for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) {
auto const &reference = subpass.pColorAttachments[i];
location_map[i].reference = &reference;
if (reference.attachment != VK_ATTACHMENT_UNUSED &&
rpci->pAttachments[reference.attachment].format != VK_FORMAT_UNDEFINED) {
location_map[i].attachment = &rpci->pAttachments[reference.attachment];
}
}
}
// TODO: dual source blend index (spv::DecIndex, zero if not provided)
const auto outputs = fs->CollectInterfaceByLocation(entrypoint, spv::StorageClassOutput, false);
for (const auto &output_it : outputs) {
auto const location = output_it.first.first;
location_map[location].output = &output_it.second;
}
const bool alpha_to_coverage_enabled = pipeline->create_info.graphics.pMultisampleState != NULL &&
pipeline->create_info.graphics.pMultisampleState->alphaToCoverageEnable == VK_TRUE;
for (const auto &location_it : location_map) {
const auto reference = location_it.second.reference;
if (reference != nullptr && reference->attachment == VK_ATTACHMENT_UNUSED) {
continue;
}
const auto location = location_it.first;
const auto attachment = location_it.second.attachment;
const auto output = location_it.second.output;
if (attachment && !output) {
if (pipeline->attachments[location].colorWriteMask != 0) {
skip |= LogWarning(fs->vk_shader_module(), kVUID_Core_Shader_InputNotProduced,
"Attachment %" PRIu32
" not written by fragment shader; undefined values will be written to attachment",
location);
}
} else if (!attachment && output) {
if (!(alpha_to_coverage_enabled && location == 0)) {
skip |= LogWarning(fs->vk_shader_module(), kVUID_Core_Shader_OutputNotConsumed,
"fragment shader writes to output location %" PRIu32 " with no matching attachment", location);
}
} else if (attachment && output) {
const auto attachment_type = GetFormatType(attachment->format);
const auto output_type = fs->GetFundamentalType(output->type_id);
// Type checking
if (!(output_type & attachment_type)) {
skip |=
LogWarning(fs->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch,
"Attachment %" PRIu32
" of type `%s` does not match fragment shader output type of `%s`; resulting values are undefined",
location, string_VkFormat(attachment->format), fs->DescribeType(output->type_id).c_str());
}
} else { // !attachment && !output
assert(false); // at least one exists in the map
}
}
const auto output_zero = location_map.count(0) ? location_map[0].output : nullptr;
bool location_zero_has_alpha = output_zero && fs->get_def(output_zero->type_id) != fs->end() &&
fs->GetComponentsConsumedByType(output_zero->type_id, false) == 4;
if (alpha_to_coverage_enabled && !location_zero_has_alpha) {
skip |= LogError(fs->vk_shader_module(), kVUID_Core_Shader_NoAlphaAtLocation0WithAlphaToCoverage,
"fragment shader doesn't declare alpha output at location 0 even though alpha to coverage is enabled.");
}
return skip;
}
PushConstantByteState CoreChecks::ValidatePushConstantSetUpdate(const std::vector<uint8_t> &push_constant_data_update,
const shader_struct_member &push_constant_used_in_shader,
uint32_t &out_issue_index) const {
const auto *used_bytes = push_constant_used_in_shader.GetUsedbytes();
const auto used_bytes_size = used_bytes->size();
if (used_bytes_size == 0) return PC_Byte_Updated;
const auto push_constant_data_update_size = push_constant_data_update.size();
const auto *data = push_constant_data_update.data();
if ((*data == PC_Byte_Updated) && std::memcmp(data, data + 1, push_constant_data_update_size - 1) == 0) {
if (used_bytes_size <= push_constant_data_update_size) {
return PC_Byte_Updated;
}
const auto used_bytes_size1 = used_bytes_size - push_constant_data_update_size;
const auto *used_bytes_data1 = used_bytes->data() + push_constant_data_update_size;
if ((*used_bytes_data1 == 0) && std::memcmp(used_bytes_data1, used_bytes_data1 + 1, used_bytes_size1 - 1) == 0) {
return PC_Byte_Updated;
}
}
uint32_t i = 0;
for (const auto used : *used_bytes) {
if (used) {
if (i >= push_constant_data_update.size() || push_constant_data_update[i] == PC_Byte_Not_Set) {
out_issue_index = i;
return PC_Byte_Not_Set;
} else if (push_constant_data_update[i] == PC_Byte_Not_Updated) {
out_issue_index = i;
return PC_Byte_Not_Updated;
}
}
++i;
}
return PC_Byte_Updated;
}
bool CoreChecks::ValidatePushConstantUsage(const PIPELINE_STATE &pipeline, SHADER_MODULE_STATE const *src,
VkPipelineShaderStageCreateInfo const *pStage, const std::string &vuid) const {
bool skip = false;
// Temp workaround to prevent false positive errors
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2450
if (src->HasMultipleEntryPoints()) {
return skip;
}
// Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
const auto *entrypoint = src->FindEntrypointStruct(pStage->pName, pStage->stage);
if (!entrypoint || !entrypoint->push_constant_used_in_shader.IsUsed()) {
return skip;
}
std::vector<VkPushConstantRange> const *push_constant_ranges = pipeline.pipeline_layout->push_constant_ranges.get();
bool found_stage = false;
for (auto const &range : *push_constant_ranges) {
if (range.stageFlags & pStage->stage) {
found_stage = true;
std::string location_desc;
std::vector<uint8_t> push_constant_bytes_set;
if (range.offset > 0) {
push_constant_bytes_set.resize(range.offset, PC_Byte_Not_Set);
}
push_constant_bytes_set.resize(range.offset + range.size, PC_Byte_Updated);
uint32_t issue_index = 0;
const auto ret =
ValidatePushConstantSetUpdate(push_constant_bytes_set, entrypoint->push_constant_used_in_shader, issue_index);
if (ret == PC_Byte_Not_Set) {
const auto loc_descr = entrypoint->push_constant_used_in_shader.GetLocationDesc(issue_index);
LogObjectList objlist(src->vk_shader_module());
objlist.add(pipeline.pipeline_layout->layout());
skip |= LogError(objlist, vuid, "Push constant buffer:%s in %s is out of range in %s.", loc_descr.c_str(),
string_VkShaderStageFlags(pStage->stage).c_str(),
report_data->FormatHandle(pipeline.pipeline_layout->layout()).c_str());
break;
}
}
}
if (!found_stage) {
LogObjectList objlist(src->vk_shader_module());
objlist.add(pipeline.pipeline_layout->layout());
skip |= LogError(objlist, vuid, "Push constant is used in %s of %s. But %s doesn't set %s.",
string_VkShaderStageFlags(pStage->stage).c_str(), report_data->FormatHandle(src->vk_shader_module()).c_str(),
report_data->FormatHandle(pipeline.pipeline_layout->layout()).c_str(),
string_VkShaderStageFlags(pStage->stage).c_str());
}
return skip;
}
bool CoreChecks::ValidateBuiltinLimits(SHADER_MODULE_STATE const *src, spirv_inst_iter entrypoint) const {
bool skip = false;
// Currently all builtin tested are only found in fragment shaders
if (entrypoint.word(1) != spv::ExecutionModelFragment) {
return skip;
}
// Find all builtin from just the interface variables
for (uint32_t id : FindEntrypointInterfaces(entrypoint)) {
auto insn = src->get_def(id);
assert(insn.opcode() == spv::OpVariable);
const decoration_set decorations = src->get_decorations(insn.word(2));
// Currently don't need to search in structs
if (((decorations.flags & decoration_set::builtin_bit) != 0) && (decorations.builtin == spv::BuiltInSampleMask)) {
auto type_pointer = src->get_def(insn.word(1));
assert(type_pointer.opcode() == spv::OpTypePointer);
auto type = src->get_def(type_pointer.word(3));
if (type.opcode() == spv::OpTypeArray) {
uint32_t length = static_cast<uint32_t>(src->GetConstantValueById(type.word(3)));
// Handles both the input and output sampleMask
if (length > phys_dev_props.limits.maxSampleMaskWords) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-maxSampleMaskWords-00711",
"vkCreateGraphicsPipelines(): The BuiltIns SampleMask array sizes is %u which exceeds "
"maxSampleMaskWords of %u in %s.",
length, phys_dev_props.limits.maxSampleMaskWords,
report_data->FormatHandle(src->vk_shader_module()).c_str());
}
break;
}
}
}
return skip;
}
// Validate that data for each specialization entry is fully contained within the buffer.
bool CoreChecks::ValidateSpecializations(VkPipelineShaderStageCreateInfo const *info) const {
bool skip = false;
VkSpecializationInfo const *spec = info->pSpecializationInfo;
if (spec) {
for (auto i = 0u; i < spec->mapEntryCount; i++) {
if (spec->pMapEntries[i].offset >= spec->dataSize) {
skip |= LogError(device, "VUID-VkSpecializationInfo-offset-00773",
"Specialization entry %u (for constant id %u) references memory outside provided specialization "
"data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER " bytes provided).",
i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
spec->pMapEntries[i].offset + spec->dataSize - 1, spec->dataSize);
continue;
}
if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
skip |= LogError(device, "VUID-VkSpecializationInfo-pMapEntries-00774",
"Specialization entry %u (for constant id %u) references memory outside provided specialization "
"data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER " bytes provided).",
i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize);
}
for (uint32_t j = i + 1; j < spec->mapEntryCount; ++j) {
if (spec->pMapEntries[i].constantID == spec->pMapEntries[j].constantID) {
skip |= LogError(device, "VUID-VkSpecializationInfo-constantID-04911",
"Specialization entry %" PRIu32 " and %" PRIu32 " have the same constantID (%" PRIu32 ").", i,
j, spec->pMapEntries[i].constantID);
}
}
}
}
return skip;
}
// TODO (jbolz): Can this return a const reference?
static std::set<uint32_t> TypeToDescriptorTypeSet(SHADER_MODULE_STATE const *module, uint32_t type_id, unsigned &descriptor_count,
bool is_khr) {
auto type = module->get_def(type_id);
bool is_storage_buffer = false;
descriptor_count = 1;
std::set<uint32_t> ret;
// Strip off any array or ptrs. Where we remove array levels, adjust the descriptor count for each dimension.
while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer || type.opcode() == spv::OpTypeRuntimeArray) {
if (type.opcode() == spv::OpTypeRuntimeArray) {
descriptor_count = 0;
type = module->get_def(type.word(2));
} else if (type.opcode() == spv::OpTypeArray) {
descriptor_count *= module->GetConstantValueById(type.word(3));
type = module->get_def(type.word(2));
} else {
if (type.word(2) == spv::StorageClassStorageBuffer) {
is_storage_buffer = true;
}
type = module->get_def(type.word(3));
}
}
switch (type.opcode()) {
case spv::OpTypeStruct: {
for (const auto insn : module->GetDecorationInstructions()) {
if (insn.word(1) == type.word(1)) {
if (insn.word(2) == spv::DecorationBlock) {
if (is_storage_buffer) {
ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
return ret;
} else {
ret.insert(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
ret.insert(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
ret.insert(VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
return ret;
}
} else if (insn.word(2) == spv::DecorationBufferBlock) {
ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
return ret;
}
}
}
// Invalid
return ret;
}
case spv::OpTypeSampler:
ret.insert(VK_DESCRIPTOR_TYPE_SAMPLER);
ret.insert(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
return ret;
case spv::OpTypeSampledImage: {
// Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
// buffer descriptor doesn't really provide one. Allow this slight mismatch.
auto image_type = module->get_def(type.word(2));
auto dim = image_type.word(3);
auto sampled = image_type.word(7);
if (dim == spv::DimBuffer && sampled == 1) {
ret.insert(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
return ret;
}
}
ret.insert(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
return ret;
case spv::OpTypeImage: {
// Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
// SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
auto dim = type.word(3);
auto sampled = type.word(7);
if (dim == spv::DimSubpassData) {
ret.insert(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
return ret;
} else if (dim == spv::DimBuffer) {
if (sampled == 1) {
ret.insert(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
return ret;
} else {
ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
return ret;
}
} else if (sampled == 1) {
ret.insert(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
ret.insert(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
return ret;
} else {
ret.insert(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
return ret;
}
}
case spv::OpTypeAccelerationStructureNV:
is_khr ? ret.insert(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR)
: ret.insert(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV);
return ret;
// We shouldn't really see any other junk types -- but if we do, they're a mismatch.
default:
return ret; // Matches nothing
}
}
static std::string string_descriptorTypes(const std::set<uint32_t> &descriptor_types) {
std::stringstream ss;
for (auto it = descriptor_types.begin(); it != descriptor_types.end(); ++it) {
if (ss.tellp()) ss << ", ";
ss << string_VkDescriptorType(VkDescriptorType(*it));
}
return ss.str();
}
bool CoreChecks::RequirePropertyFlag(VkBool32 check, char const *flag, char const *structure, const char *vuid) const {
if (!check) {
if (LogError(device, vuid, "Shader requires flag %s set in %s but it is not set on the device", flag, structure)) {
return true;
}
}
return false;
}
bool CoreChecks::RequireFeature(VkBool32 feature, char const *feature_name, const char *vuid) const {
if (!feature) {
if (LogError(device, vuid, "Shader requires %s but is not enabled on the device", feature_name)) {
return true;
}
}
return false;
}
bool CoreChecks::ValidateShaderStageWritableOrAtomicDescriptor(VkShaderStageFlagBits stage, bool has_writable_descriptor,
bool has_atomic_descriptor) const {
bool skip = false;
if (has_writable_descriptor || has_atomic_descriptor) {
switch (stage) {
case VK_SHADER_STAGE_COMPUTE_BIT:
case VK_SHADER_STAGE_RAYGEN_BIT_NV:
case VK_SHADER_STAGE_ANY_HIT_BIT_NV:
case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV:
case VK_SHADER_STAGE_MISS_BIT_NV:
case VK_SHADER_STAGE_INTERSECTION_BIT_NV:
case VK_SHADER_STAGE_CALLABLE_BIT_NV:
case VK_SHADER_STAGE_TASK_BIT_NV:
case VK_SHADER_STAGE_MESH_BIT_NV:
/* No feature requirements for writes and atomics from compute
* raytracing, or mesh stages */
break;
case VK_SHADER_STAGE_FRAGMENT_BIT:
skip |= RequireFeature(enabled_features.core.fragmentStoresAndAtomics, "fragmentStoresAndAtomics",
"VUID-RuntimeSpirv-NonWritable-06340");
break;
default:
skip |= RequireFeature(enabled_features.core.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics",
"VUID-RuntimeSpirv-NonWritable-06341");
break;
}
}
return skip;
}
bool CoreChecks::ValidateShaderStageGroupNonUniform(SHADER_MODULE_STATE const *module, VkShaderStageFlagBits stage,
spirv_inst_iter &insn) const {
bool skip = false;
// Check anything using a group operation (which currently is only OpGroupNonUnifrom* operations)
if (GroupOperation(insn.opcode()) == true) {
// Check the quad operations.
if ((insn.opcode() == spv::OpGroupNonUniformQuadBroadcast) || (insn.opcode() == spv::OpGroupNonUniformQuadSwap)) {
if ((stage != VK_SHADER_STAGE_FRAGMENT_BIT) && (stage != VK_SHADER_STAGE_COMPUTE_BIT)) {
skip |=
RequireFeature(phys_dev_props_core11.subgroupQuadOperationsInAllStages,
"VkPhysicalDeviceSubgroupProperties::quadOperationsInAllStages", "VUID-RuntimeSpirv-None-06342");
}
}
uint32_t scope_type = spv::ScopeMax;
if (insn.opcode() == spv::OpGroupNonUniformPartitionNV) {
// OpGroupNonUniformPartitionNV always assumed subgroup as missing operand
scope_type = spv::ScopeSubgroup;
} else {
// "All <id> used for Scope <id> must be of an OpConstant"
auto scope_id = module->get_def(insn.word(3));
scope_type = scope_id.word(3);
}
if (scope_type == spv::ScopeSubgroup) {
// "Group operations with subgroup scope" must have stage support
const VkSubgroupFeatureFlags supported_stages = phys_dev_props_core11.subgroupSupportedStages;
skip |= RequirePropertyFlag(supported_stages & stage, string_VkShaderStageFlagBits(stage),
"VkPhysicalDeviceSubgroupProperties::supportedStages", "VUID-RuntimeSpirv-None-06343");
}
if (!enabled_features.core12.shaderSubgroupExtendedTypes) {
auto type = module->get_def(insn.word(1));
if (type.opcode() == spv::OpTypeVector) {
// Get the element type
type = module->get_def(type.word(2));
}
if (type.opcode() != spv::OpTypeBool) {
// Both OpTypeInt and OpTypeFloat the width is in the 2nd word.
const uint32_t width = type.word(2);
if ((type.opcode() == spv::OpTypeFloat && width == 16) ||
(type.opcode() == spv::OpTypeInt && (width == 8 || width == 16 || width == 64))) {
skip |= RequireFeature(enabled_features.core12.shaderSubgroupExtendedTypes,
"VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures::shaderSubgroupExtendedTypes",
"VUID-RuntimeSpirv-None-06275");
}
}
}
}
return skip;
}
bool CoreChecks::ValidateMemoryScope(SHADER_MODULE_STATE const *src, const spirv_inst_iter &insn) const {
bool skip = false;
const auto &entry = MemoryScopeParamPosition(insn.opcode());
if (entry > 0) {
const uint32_t scope_id = insn.word(entry);
if (enabled_features.core12.vulkanMemoryModel && !enabled_features.core12.vulkanMemoryModelDeviceScope) {
const auto &iter = src->GetConstantDef(scope_id);
if (iter != src->end()) {
if (GetConstantValue(iter) == spv::Scope::ScopeDevice) {
skip |= LogError(device, "VUID-RuntimeSpirv-vulkanMemoryModel-06265",
"VkPhysicalDeviceVulkan12Features::vulkanMemoryModel is enabled and "
"VkPhysicalDeviceVulkan12Features::vulkanMemoryModelDeviceScope is disabled, but Device "
"memory scope is used.");
}
}
} else if (!enabled_features.core12.vulkanMemoryModel) {
const auto &iter = src->GetConstantDef(scope_id);
if (iter != src->end()) {
if (GetConstantValue(iter) == spv::Scope::ScopeQueueFamily) {
skip |= LogError(device, "VUID-RuntimeSpirv-vulkanMemoryModel-06266",
"VkPhysicalDeviceVulkan12Features::vulkanMemoryModel is not enabled, but QueueFamily "
"memory scope is used.");
}
}
}
}
return skip;
}
bool CoreChecks::ValidateWorkgroupSize(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage,
const std::unordered_map<uint32_t, std::vector<uint32_t>>& id_value_map) const {
bool skip = false;
std::array<uint32_t, 3> work_group_size = src->GetWorkgroupSize(pStage, id_value_map);
for (uint32_t i = 0; i < 3; ++i) {
if (work_group_size[i] > phys_dev_props.limits.maxComputeWorkGroupSize[i]) {
const char member = 'x' + static_cast<int8_t>(i);
skip |= LogError(device, kVUID_Core_Shader_MaxComputeWorkGroupSize,
"Specialization constant is being used to specialize WorkGroupSize.%c, but value (%" PRIu32
") is greater than VkPhysicalDeviceLimits::maxComputeWorkGroupSize[%" PRIu32 "] = %" PRIu32 ".",
member, work_group_size[i], i, phys_dev_props.limits.maxComputeWorkGroupSize[i]);
}
}
return skip;
}
bool CoreChecks::ValidateShaderStageInputOutputLimits(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage,
const PIPELINE_STATE *pipeline, spirv_inst_iter entrypoint) const {
if (pStage->stage == VK_SHADER_STAGE_COMPUTE_BIT || pStage->stage == VK_SHADER_STAGE_ALL_GRAPHICS ||
pStage->stage == VK_SHADER_STAGE_ALL) {
return false;
}
bool skip = false;
auto const &limits = phys_dev_props.limits;
std::set<uint32_t> patch_i_ds;
struct Variable {
uint32_t baseTypePtrID;
uint32_t ID;
uint32_t storageClass;
};
std::vector<Variable> variables;
uint32_t num_vertices = 0;
bool is_iso_lines = false;
bool is_point_mode = false;
auto entrypoint_variables = FindEntrypointInterfaces(entrypoint);
for (auto insn : *src) {
switch (insn.opcode()) {
// Find all Patch decorations
case spv::OpDecorate:
switch (insn.word(2)) {
case spv::DecorationPatch: {
patch_i_ds.insert(insn.word(1));
break;
}
default:
break;
}
break;
// Find all input and output variables
case spv::OpVariable: {
Variable var = {};
var.storageClass = insn.word(3);
if ((var.storageClass == spv::StorageClassInput || var.storageClass == spv::StorageClassOutput) &&
// Only include variables in the entrypoint's interface
find(entrypoint_variables.begin(), entrypoint_variables.end(), insn.word(2)) != entrypoint_variables.end()) {
var.baseTypePtrID = insn.word(1);
var.ID = insn.word(2);
variables.push_back(var);
}
break;
}
case spv::OpExecutionMode:
if (insn.word(1) == entrypoint.word(2)) {
switch (insn.word(2)) {
default:
break;
case spv::ExecutionModeOutputVertices:
num_vertices = insn.word(3);
break;
case spv::ExecutionModeIsolines:
is_iso_lines = true;
break;
case spv::ExecutionModePointMode:
is_point_mode = true;
break;
}
}
break;
default:
break;
}
}
bool strip_output_array_level =
(pStage->stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT || pStage->stage == VK_SHADER_STAGE_MESH_BIT_NV);
bool strip_input_array_level =
(pStage->stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ||
pStage->stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT || pStage->stage == VK_SHADER_STAGE_GEOMETRY_BIT);
uint32_t num_comp_in = 0, num_comp_out = 0;
int max_comp_in = 0, max_comp_out = 0;
auto inputs = src->CollectInterfaceByLocation(entrypoint, spv::StorageClassInput, strip_input_array_level);
auto outputs = src->CollectInterfaceByLocation(entrypoint, spv::StorageClassOutput, strip_output_array_level);
// Find max component location used for input variables.
for (auto &var : inputs) {
int location = var.first.first;
int component = var.first.second;
interface_var &iv = var.second;
// Only need to look at the first location, since we use the type's whole size
if (iv.offset != 0) {
continue;
}
if (iv.is_patch) {
continue;
}
int num_components = src->GetComponentsConsumedByType(iv.type_id, strip_input_array_level);
max_comp_in = std::max(max_comp_in, location * 4 + component + num_components);
}
// Find max component location used for output variables.
for (auto &var : outputs) {
int location = var.first.first;
int component = var.first.second;
interface_var &iv = var.second;
// Only need to look at the first location, since we use the type's whole size
if (iv.offset != 0) {
continue;
}
if (iv.is_patch) {
continue;
}
int num_components = src->GetComponentsConsumedByType(iv.type_id, strip_output_array_level);
max_comp_out = std::max(max_comp_out, location * 4 + component + num_components);
}
// XXX TODO: Would be nice to rewrite this to use CollectInterfaceByLocation (or something similar),
// but that doesn't include builtins.
// When rewritten, using the CreatePipelineExceedVertexMaxComponentsWithBuiltins test it would be nice to also let the user know
// how many components were from builtins as it might not be obvious
for (auto &var : variables) {
// Check if the variable is a patch. Patches can also be members of blocks,
// but if they are then the top-level arrayness has already been stripped
// by the time GetComponentsConsumedByType gets to it.
bool is_patch = patch_i_ds.find(var.ID) != patch_i_ds.end();
if (var.storageClass == spv::StorageClassInput) {
num_comp_in += src->GetComponentsConsumedByType(var.baseTypePtrID, strip_input_array_level && !is_patch);
} else { // var.storageClass == spv::StorageClassOutput
num_comp_out += src->GetComponentsConsumedByType(var.baseTypePtrID, strip_output_array_level && !is_patch);
}
}
switch (pStage->stage) {
case VK_SHADER_STAGE_VERTEX_BIT:
if (num_comp_out > limits.maxVertexOutputComponents) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Vertex shader exceeds "
"VkPhysicalDeviceLimits::maxVertexOutputComponents of %u "
"components by %u components",
limits.maxVertexOutputComponents, num_comp_out - limits.maxVertexOutputComponents);
}
if (max_comp_out > static_cast<int>(limits.maxVertexOutputComponents)) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Vertex shader output variable uses location that "
"exceeds component limit VkPhysicalDeviceLimits::maxVertexOutputComponents (%u)",
limits.maxVertexOutputComponents);
}
break;
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
if (num_comp_in > limits.maxTessellationControlPerVertexInputComponents) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Tessellation control shader exceeds "
"VkPhysicalDeviceLimits::maxTessellationControlPerVertexInputComponents of %u "
"components by %u components",
limits.maxTessellationControlPerVertexInputComponents,
num_comp_in - limits.maxTessellationControlPerVertexInputComponents);
}
if (max_comp_in > static_cast<int>(limits.maxTessellationControlPerVertexInputComponents)) {
skip |=
LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Tessellation control shader input variable uses location that "
"exceeds component limit VkPhysicalDeviceLimits::maxTessellationControlPerVertexInputComponents (%u)",
limits.maxTessellationControlPerVertexInputComponents);
}
if (num_comp_out > limits.maxTessellationControlPerVertexOutputComponents) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Tessellation control shader exceeds "
"VkPhysicalDeviceLimits::maxTessellationControlPerVertexOutputComponents of %u "
"components by %u components",
limits.maxTessellationControlPerVertexOutputComponents,
num_comp_out - limits.maxTessellationControlPerVertexOutputComponents);
}
if (max_comp_out > static_cast<int>(limits.maxTessellationControlPerVertexOutputComponents)) {
skip |=
LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Tessellation control shader output variable uses location that "
"exceeds component limit VkPhysicalDeviceLimits::maxTessellationControlPerVertexOutputComponents (%u)",
limits.maxTessellationControlPerVertexOutputComponents);
}
break;
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
if (num_comp_in > limits.maxTessellationEvaluationInputComponents) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Tessellation evaluation shader exceeds "
"VkPhysicalDeviceLimits::maxTessellationEvaluationInputComponents of %u "
"components by %u components",
limits.maxTessellationEvaluationInputComponents,
num_comp_in - limits.maxTessellationEvaluationInputComponents);
}
if (max_comp_in > static_cast<int>(limits.maxTessellationEvaluationInputComponents)) {
skip |=
LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Tessellation evaluation shader input variable uses location that "
"exceeds component limit VkPhysicalDeviceLimits::maxTessellationEvaluationInputComponents (%u)",
limits.maxTessellationEvaluationInputComponents);
}
if (num_comp_out > limits.maxTessellationEvaluationOutputComponents) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Tessellation evaluation shader exceeds "
"VkPhysicalDeviceLimits::maxTessellationEvaluationOutputComponents of %u "
"components by %u components",
limits.maxTessellationEvaluationOutputComponents,
num_comp_out - limits.maxTessellationEvaluationOutputComponents);
}
if (max_comp_out > static_cast<int>(limits.maxTessellationEvaluationOutputComponents)) {
skip |=
LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Tessellation evaluation shader output variable uses location that "
"exceeds component limit VkPhysicalDeviceLimits::maxTessellationEvaluationOutputComponents (%u)",
limits.maxTessellationEvaluationOutputComponents);
}
// Portability validation
if (IsExtEnabled(device_extensions.vk_khr_portability_subset)) {
if (is_iso_lines && (VK_FALSE == enabled_features.portability_subset_features.tessellationIsolines)) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-tessellationShader-06326",
"Invalid Pipeline CreateInfo state (portability error): Tessellation evaluation shader"
" is using abstract patch type IsoLines, but this is not supported on this platform");
}
if (is_point_mode && (VK_FALSE == enabled_features.portability_subset_features.tessellationPointMode)) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-tessellationShader-06327",
"Invalid Pipeline CreateInfo state (portability error): Tessellation evaluation shader"
" is using abstract patch type PointMode, but this is not supported on this platform");
}
}
break;
case VK_SHADER_STAGE_GEOMETRY_BIT:
if (num_comp_in > limits.maxGeometryInputComponents) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Geometry shader exceeds "
"VkPhysicalDeviceLimits::maxGeometryInputComponents of %u "
"components by %u components",
limits.maxGeometryInputComponents, num_comp_in - limits.maxGeometryInputComponents);
}
if (max_comp_in > static_cast<int>(limits.maxGeometryInputComponents)) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Geometry shader input variable uses location that "
"exceeds component limit VkPhysicalDeviceLimits::maxGeometryInputComponents (%u)",
limits.maxGeometryInputComponents);
}
if (num_comp_out > limits.maxGeometryOutputComponents) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Geometry shader exceeds "
"VkPhysicalDeviceLimits::maxGeometryOutputComponents of %u "
"components by %u components",
limits.maxGeometryOutputComponents, num_comp_out - limits.maxGeometryOutputComponents);
}
if (max_comp_out > static_cast<int>(limits.maxGeometryOutputComponents)) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Geometry shader output variable uses location that "
"exceeds component limit VkPhysicalDeviceLimits::maxGeometryOutputComponents (%u)",
limits.maxGeometryOutputComponents);
}
if (num_comp_out * num_vertices > limits.maxGeometryTotalOutputComponents) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Geometry shader exceeds "
"VkPhysicalDeviceLimits::maxGeometryTotalOutputComponents of %u "
"components by %u components",
limits.maxGeometryTotalOutputComponents,
num_comp_out * num_vertices - limits.maxGeometryTotalOutputComponents);
}
break;
case VK_SHADER_STAGE_FRAGMENT_BIT:
if (num_comp_in > limits.maxFragmentInputComponents) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Fragment shader exceeds "
"VkPhysicalDeviceLimits::maxFragmentInputComponents of %u "
"components by %u components",
limits.maxFragmentInputComponents, num_comp_in - limits.maxFragmentInputComponents);
}
if (max_comp_in > static_cast<int>(limits.maxFragmentInputComponents)) {
skip |= LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-Location-06272",
"Invalid Pipeline CreateInfo State: Fragment shader input variable uses location that "
"exceeds component limit VkPhysicalDeviceLimits::maxFragmentInputComponents (%u)",
limits.maxFragmentInputComponents);
}
break;
case VK_SHADER_STAGE_RAYGEN_BIT_NV:
case VK_SHADER_STAGE_ANY_HIT_BIT_NV:
case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV:
case VK_SHADER_STAGE_MISS_BIT_NV:
case VK_SHADER_STAGE_INTERSECTION_BIT_NV:
case VK_SHADER_STAGE_CALLABLE_BIT_NV:
case VK_SHADER_STAGE_TASK_BIT_NV:
case VK_SHADER_STAGE_MESH_BIT_NV:
break;
default:
assert(false); // This should never happen
}
return skip;
}
bool CoreChecks::ValidateShaderStorageImageFormats(SHADER_MODULE_STATE const *src) const {
bool skip = false;
// Got through all ImageRead/Write instructions
for (auto insn : *src) {
switch (insn.opcode()) {
case spv::OpImageSparseRead:
case spv::OpImageRead: {
spirv_inst_iter type_def = src->GetImageFormatInst(insn.word(3));
if (type_def != src->end()) {
const auto dim = type_def.word(3);
// If the Image Dim operand is not SubpassData, the Image Format must not be Unknown, unless the
// StorageImageReadWithoutFormat Capability was declared.
if (dim != spv::DimSubpassData && type_def.word(8) == spv::ImageFormatUnknown) {
skip |= RequireFeature(enabled_features.core.shaderStorageImageReadWithoutFormat,
"shaderStorageImageReadWithoutFormat",
kVUID_Features_shaderStorageImageReadWithoutFormat);
}
}
break;
}
case spv::OpImageWrite: {
spirv_inst_iter type_def = src->GetImageFormatInst(insn.word(1));
if (type_def != src->end()) {
if (type_def.word(8) == spv::ImageFormatUnknown) {
skip |= RequireFeature(enabled_features.core.shaderStorageImageWriteWithoutFormat,
"shaderStorageImageWriteWithoutFormat",
kVUID_Features_shaderStorageImageWriteWithoutFormat);
}
}
break;
}
}
}
// Go through all variables for images and check decorations
for (auto insn : *src) {
if (insn.opcode() != spv::OpVariable)
continue;
uint32_t var = insn.word(2);
spirv_inst_iter type_def = src->GetImageFormatInst(insn.word(1));
if (type_def == src->end())
continue;
// Only check if the Image Dim operand is not SubpassData
const auto dim = type_def.word(3);
if (dim == spv::DimSubpassData) continue;
// Only check storage images
if (type_def.word(7) != 2) continue;
if (type_def.word(8) != spv::ImageFormatUnknown) continue;
decoration_set img_decorations = src->get_decorations(var);
if (!enabled_features.core.shaderStorageImageReadWithoutFormat &&
!(img_decorations.flags & decoration_set::nonreadable_bit)) {
skip |= LogError(device, "VUID-RuntimeSpirv-OpTypeImage-06270",
"shaderStorageImageReadWithoutFormat not supported but variable %" PRIu32
" "
" without format not marked a NonReadable",
var);
}
if (!enabled_features.core.shaderStorageImageWriteWithoutFormat &&
!(img_decorations.flags & decoration_set::nonwritable_bit)) {
skip |= LogError(device, "VUID-RuntimeSpirv-OpTypeImage-06269",
"shaderStorageImageWriteWithoutFormat not supported but variable %" PRIu32
" "
"without format not marked a NonWritable",
var);
}
}
return skip;
}
bool CoreChecks::ValidateShaderStageMaxResources(VkShaderStageFlagBits stage, const PIPELINE_STATE *pipeline) const {
bool skip = false;
uint32_t total_resources = 0;
// Only currently testing for graphics and compute pipelines
// TODO: Add check and support for Ray Tracing pipeline VUID 03428
if ((stage & (VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_COMPUTE_BIT)) == 0) {
return false;
}
if (stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
if (pipeline->rp_state->use_dynamic_rendering) {
total_resources += pipeline->rp_state->dynamic_rendering_pipeline_create_info.colorAttachmentCount;
} else {
// "For the fragment shader stage the framebuffer color attachments also count against this limit"
total_resources +=
pipeline->rp_state->createInfo.pSubpasses[pipeline->create_info.graphics.subpass].colorAttachmentCount;
}
}
// TODO: This reuses a lot of GetDescriptorCountMaxPerStage but currently would need to make it agnostic in a way to handle
// input from CreatePipeline and CreatePipelineLayout level
for (auto set_layout : pipeline->pipeline_layout->set_layouts) {
if ((set_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT) != 0) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < set_layout->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = set_layout->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (((stage & binding->stageFlags) != 0) && (binding->descriptorCount > 0)) {
// Check only descriptor types listed in maxPerStageResources description in spec
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
total_resources += binding->descriptorCount;
break;
default:
break;
}
}
}
}
if (total_resources > phys_dev_props.limits.maxPerStageResources) {
const char *vuid = (stage == VK_SHADER_STAGE_COMPUTE_BIT) ? "VUID-VkComputePipelineCreateInfo-layout-01687"
: "VUID-VkGraphicsPipelineCreateInfo-layout-01688";
skip |= LogError(pipeline->pipeline(), vuid,
"Invalid Pipeline CreateInfo State: Shader Stage %s exceeds component limit "
"VkPhysicalDeviceLimits::maxPerStageResources (%u)",
string_VkShaderStageFlagBits(stage), phys_dev_props.limits.maxPerStageResources);
}
return skip;
}
// copy the specialization constant value into buf, if it is present
void GetSpecConstantValue(VkPipelineShaderStageCreateInfo const *pStage, uint32_t spec_id, void *buf) {
VkSpecializationInfo const *spec = pStage->pSpecializationInfo;
if (spec && spec_id < spec->mapEntryCount) {
memcpy(buf, (uint8_t *)spec->pData + spec->pMapEntries[spec_id].offset, spec->pMapEntries[spec_id].size);
}
}
// Fill in value with the constant or specialization constant value, if available.
// Returns true if the value has been accurately filled out.
static bool GetIntConstantValue(spirv_inst_iter insn, SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage,
const layer_data::unordered_map<uint32_t, uint32_t> &id_to_spec_id, uint32_t *value) {
auto type_id = src->get_def(insn.word(1));
if (type_id.opcode() != spv::OpTypeInt || type_id.word(2) != 32) {
return false;
}
switch (insn.opcode()) {
case spv::OpSpecConstant:
*value = insn.word(3);
GetSpecConstantValue(pStage, id_to_spec_id.at(insn.word(2)), value);
return true;
case spv::OpConstant:
*value = insn.word(3);
return true;
default:
return false;
}
}
// Map SPIR-V type to VK_COMPONENT_TYPE enum
VkComponentTypeNV GetComponentType(spirv_inst_iter insn, SHADER_MODULE_STATE const *src) {
switch (insn.opcode()) {
case spv::OpTypeInt:
switch (insn.word(2)) {
case 8:
return insn.word(3) != 0 ? VK_COMPONENT_TYPE_SINT8_NV : VK_COMPONENT_TYPE_UINT8_NV;
case 16:
return insn.word(3) != 0 ? VK_COMPONENT_TYPE_SINT16_NV : VK_COMPONENT_TYPE_UINT16_NV;
case 32:
return insn.word(3) != 0 ? VK_COMPONENT_TYPE_SINT32_NV : VK_COMPONENT_TYPE_UINT32_NV;
case 64:
return insn.word(3) != 0 ? VK_COMPONENT_TYPE_SINT64_NV : VK_COMPONENT_TYPE_UINT64_NV;
default:
return VK_COMPONENT_TYPE_MAX_ENUM_NV;
}
case spv::OpTypeFloat:
switch (insn.word(2)) {
case 16:
return VK_COMPONENT_TYPE_FLOAT16_NV;
case 32:
return VK_COMPONENT_TYPE_FLOAT32_NV;
case 64:
return VK_COMPONENT_TYPE_FLOAT64_NV;
default:
return VK_COMPONENT_TYPE_MAX_ENUM_NV;
}
default:
return VK_COMPONENT_TYPE_MAX_ENUM_NV;
}
}
// Validate SPV_NV_cooperative_matrix behavior that can't be statically validated
// in SPIRV-Tools (e.g. due to specialization constant usage).
bool CoreChecks::ValidateCooperativeMatrix(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage,
const PIPELINE_STATE *pipeline) const {
bool skip = false;
// Map SPIR-V result ID to specialization constant id (SpecId decoration value)
layer_data::unordered_map<uint32_t, uint32_t> id_to_spec_id;
// Map SPIR-V result ID to the ID of its type.
layer_data::unordered_map<uint32_t, uint32_t> id_to_type_id;
struct CoopMatType {
uint32_t scope, rows, cols;
VkComponentTypeNV component_type;
bool all_constant;
CoopMatType() : scope(0), rows(0), cols(0), component_type(VK_COMPONENT_TYPE_MAX_ENUM_NV), all_constant(false) {}
void Init(uint32_t id, SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage,
const layer_data::unordered_map<uint32_t, uint32_t> &id_to_spec_id) {
spirv_inst_iter insn = src->get_def(id);
uint32_t component_type_id = insn.word(2);
uint32_t scope_id = insn.word(3);
uint32_t rows_id = insn.word(4);
uint32_t cols_id = insn.word(5);
auto component_type_iter = src->get_def(component_type_id);
auto scope_iter = src->get_def(scope_id);
auto rows_iter = src->get_def(rows_id);
auto cols_iter = src->get_def(cols_id);
all_constant = true;
if (!GetIntConstantValue(scope_iter, src, pStage, id_to_spec_id, &scope)) {
all_constant = false;
}
if (!GetIntConstantValue(rows_iter, src, pStage, id_to_spec_id, &rows)) {
all_constant = false;
}
if (!GetIntConstantValue(cols_iter, src, pStage, id_to_spec_id, &cols)) {
all_constant = false;
}
component_type = GetComponentType(component_type_iter, src);
}
};
bool seen_coopmat_capability = false;
for (auto insn : *src) {
// Whitelist instructions whose result can be a cooperative matrix type, and
// keep track of their types. It would be nice if SPIRV-Headers generated code
// to identify which instructions have a result type and result id. Lacking that,
// this whitelist is based on the set of instructions that
// SPV_NV_cooperative_matrix says can be used with cooperative matrix types.
switch (insn.opcode()) {
case spv::OpLoad:
case spv::OpCooperativeMatrixLoadNV:
case spv::OpCooperativeMatrixMulAddNV:
case spv::OpSNegate:
case spv::OpFNegate:
case spv::OpIAdd:
case spv::OpFAdd:
case spv::OpISub:
case spv::OpFSub:
case spv::OpFDiv:
case spv::OpSDiv:
case spv::OpUDiv:
case spv::OpMatrixTimesScalar:
case spv::OpConstantComposite:
case spv::OpCompositeConstruct:
case spv::OpConvertFToU:
case spv::OpConvertFToS:
case spv::OpConvertSToF:
case spv::OpConvertUToF:
case spv::OpUConvert:
case spv::OpSConvert:
case spv::OpFConvert:
id_to_type_id[insn.word(2)] = insn.word(1);
break;
default:
break;
}
switch (insn.opcode()) {
case spv::OpDecorate:
if (insn.word(2) == spv::DecorationSpecId) {
id_to_spec_id[insn.word(1)] = insn.word(3);
}
break;
case spv::OpCapability:
if (insn.word(1) == spv::CapabilityCooperativeMatrixNV) {
seen_coopmat_capability = true;
if (!(pStage->stage & phys_dev_ext_props.cooperative_matrix_props.cooperativeMatrixSupportedStages)) {
skip |= LogError(
pipeline->pipeline(), "VUID-RuntimeSpirv-OpTypeCooperativeMatrixNV-06322",
"OpTypeCooperativeMatrixNV used in shader stage not in cooperativeMatrixSupportedStages (= %u)",
phys_dev_ext_props.cooperative_matrix_props.cooperativeMatrixSupportedStages);
}
}
break;
case spv::OpMemoryModel:
// If the capability isn't enabled, don't bother with the rest of this function.
// OpMemoryModel is the first required instruction after all OpCapability instructions.
if (!seen_coopmat_capability) {
return skip;
}
break;
case spv::OpTypeCooperativeMatrixNV: {
CoopMatType m;
m.Init(insn.word(1), src, pStage, id_to_spec_id);
if (m.all_constant) {
// Validate that the type parameters are all supported for one of the
// operands of a cooperative matrix property.
bool valid = false;
for (unsigned i = 0; i < cooperative_matrix_properties.size(); ++i) {
if (cooperative_matrix_properties[i].AType == m.component_type &&
cooperative_matrix_properties[i].MSize == m.rows && cooperative_matrix_properties[i].KSize == m.cols &&
cooperative_matrix_properties[i].scope == m.scope) {
valid = true;
break;
}
if (cooperative_matrix_properties[i].BType == m.component_type &&
cooperative_matrix_properties[i].KSize == m.rows && cooperative_matrix_properties[i].NSize == m.cols &&
cooperative_matrix_properties[i].scope == m.scope) {
valid = true;
break;
}
if (cooperative_matrix_properties[i].CType == m.component_type &&
cooperative_matrix_properties[i].MSize == m.rows && cooperative_matrix_properties[i].NSize == m.cols &&
cooperative_matrix_properties[i].scope == m.scope) {
valid = true;
break;
}
if (cooperative_matrix_properties[i].DType == m.component_type &&
cooperative_matrix_properties[i].MSize == m.rows && cooperative_matrix_properties[i].NSize == m.cols &&
cooperative_matrix_properties[i].scope == m.scope) {
valid = true;
break;
}
}
if (!valid) {
skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_CooperativeMatrixType,
"OpTypeCooperativeMatrixNV (result id = %u) operands don't match a supported matrix type",
insn.word(1));
}
}
break;
}
case spv::OpCooperativeMatrixMulAddNV: {
CoopMatType a, b, c, d;
if (id_to_type_id.find(insn.word(2)) == id_to_type_id.end() ||
id_to_type_id.find(insn.word(3)) == id_to_type_id.end() ||
id_to_type_id.find(insn.word(4)) == id_to_type_id.end() ||
id_to_type_id.find(insn.word(5)) == id_to_type_id.end()) {
// Couldn't find type of matrix
assert(false);
break;
}
d.Init(id_to_type_id[insn.word(2)], src, pStage, id_to_spec_id);
a.Init(id_to_type_id[insn.word(3)], src, pStage, id_to_spec_id);
b.Init(id_to_type_id[insn.word(4)], src, pStage, id_to_spec_id);
c.Init(id_to_type_id[insn.word(5)], src, pStage, id_to_spec_id);
if (a.all_constant && b.all_constant && c.all_constant && d.all_constant) {
// Validate that the type parameters are all supported for the same
// cooperative matrix property.
bool valid = false;
for (unsigned i = 0; i < cooperative_matrix_properties.size(); ++i) {
if (cooperative_matrix_properties[i].AType == a.component_type &&
cooperative_matrix_properties[i].MSize == a.rows && cooperative_matrix_properties[i].KSize == a.cols &&
cooperative_matrix_properties[i].scope == a.scope &&
cooperative_matrix_properties[i].BType == b.component_type &&
cooperative_matrix_properties[i].KSize == b.rows && cooperative_matrix_properties[i].NSize == b.cols &&
cooperative_matrix_properties[i].scope == b.scope &&
cooperative_matrix_properties[i].CType == c.component_type &&
cooperative_matrix_properties[i].MSize == c.rows && cooperative_matrix_properties[i].NSize == c.cols &&
cooperative_matrix_properties[i].scope == c.scope &&
cooperative_matrix_properties[i].DType == d.component_type &&
cooperative_matrix_properties[i].MSize == d.rows && cooperative_matrix_properties[i].NSize == d.cols &&
cooperative_matrix_properties[i].scope == d.scope) {
valid = true;
break;
}
}
if (!valid) {
skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_CooperativeMatrixMulAdd,
"OpCooperativeMatrixMulAddNV (result id = %u) operands don't match a supported matrix "
"VkCooperativeMatrixPropertiesNV",
insn.word(2));
}
}
break;
}
default:
break;
}
}
return skip;
}
bool CoreChecks::ValidateShaderResolveQCOM(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage,
const PIPELINE_STATE *pipeline) const {
bool skip = false;
// If the pipeline's subpass description contains flag VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM,
// then the fragment shader must not enable the SPIRV SampleRateShading capability.
if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
for (auto insn : *src) {
switch (insn.opcode()) {
case spv::OpCapability:
if (insn.word(1) == spv::CapabilitySampleRateShading) {
auto subpass_flags =
(pipeline->rp_state == nullptr)
? 0
: pipeline->rp_state->createInfo.pSubpasses[pipeline->create_info.graphics.subpass].flags;
if ((subpass_flags & VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM) != 0) {
skip |=
LogError(pipeline->pipeline(), "VUID-RuntimeSpirv-SampleRateShading-06378",
"Invalid Pipeline CreateInfo State: fragment shader enables SampleRateShading capability "
"and the subpass flags includes VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM.");
}
}
break;
default:
break;
}
}
}
return skip;
}
bool CoreChecks::ValidateShaderSubgroupSizeControl(VkPipelineShaderStageCreateInfo const *pStage) const {
bool skip = false;
if ((pStage->flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) != 0 &&
!enabled_features.subgroup_size_control_features.subgroupSizeControl) {
skip |= LogError(
device, "VUID-VkPipelineShaderStageCreateInfo-flags-02784",
"VkPipelineShaderStageCreateInfo flags contain VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT, "
"but the VkPhysicalDeviceSubgroupSizeControlFeaturesEXT::subgroupSizeControl feature is not enabled.");
}
if ((pStage->flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) != 0 &&
!enabled_features.subgroup_size_control_features.computeFullSubgroups) {
skip |= LogError(
device, "VUID-VkPipelineShaderStageCreateInfo-flags-02785",
"VkPipelineShaderStageCreateInfo flags contain VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT, but the "
"VkPhysicalDeviceSubgroupSizeControlFeaturesEXT::computeFullSubgroups feature is not enabled");
}
return skip;
}
bool CoreChecks::ValidateAtomicsTypes(SHADER_MODULE_STATE const *src) const {
bool skip = false;
// "If sparseImageInt64Atomics is enabled, shaderImageInt64Atomics must be enabled"
const bool valid_image_64_int = enabled_features.shader_image_atomic_int64_features.shaderImageInt64Atomics == VK_TRUE;
const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT &float_features = enabled_features.shader_atomic_float_features;
const VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT &float2_features = enabled_features.shader_atomic_float2_features;
const bool valid_storage_buffer_float = (
(float_features.shaderBufferFloat32Atomics == VK_TRUE) ||
(float_features.shaderBufferFloat32AtomicAdd == VK_TRUE) ||
(float_features.shaderBufferFloat64Atomics == VK_TRUE) ||
(float_features.shaderBufferFloat64AtomicAdd == VK_TRUE) ||
(float2_features.shaderBufferFloat16Atomics == VK_TRUE) ||
(float2_features.shaderBufferFloat16AtomicAdd == VK_TRUE) ||
(float2_features.shaderBufferFloat16AtomicMinMax == VK_TRUE) ||
(float2_features.shaderBufferFloat32AtomicMinMax == VK_TRUE) ||
(float2_features.shaderBufferFloat64AtomicMinMax == VK_TRUE));
const bool valid_workgroup_float = (
(float_features.shaderSharedFloat32Atomics == VK_TRUE) ||
(float_features.shaderSharedFloat32AtomicAdd == VK_TRUE) ||
(float_features.shaderSharedFloat64Atomics == VK_TRUE) ||
(float_features.shaderSharedFloat64AtomicAdd == VK_TRUE) ||
(float2_features.shaderSharedFloat16Atomics == VK_TRUE) ||
(float2_features.shaderSharedFloat16AtomicAdd == VK_TRUE) ||
(float2_features.shaderSharedFloat16AtomicMinMax == VK_TRUE) ||
(float2_features.shaderSharedFloat32AtomicMinMax == VK_TRUE) ||
(float2_features.shaderSharedFloat64AtomicMinMax == VK_TRUE));
const bool valid_image_float = (
(float_features.shaderImageFloat32Atomics == VK_TRUE) ||
(float_features.shaderImageFloat32AtomicAdd == VK_TRUE) ||
(float2_features.shaderImageFloat32AtomicMinMax == VK_TRUE));
const bool valid_16_float = (
(float2_features.shaderBufferFloat16Atomics == VK_TRUE) ||
(float2_features.shaderBufferFloat16AtomicAdd == VK_TRUE) ||
(float2_features.shaderBufferFloat16AtomicMinMax == VK_TRUE) ||
(float2_features.shaderSharedFloat16Atomics == VK_TRUE) ||
(float2_features.shaderSharedFloat16AtomicAdd == VK_TRUE) ||
(float2_features.shaderSharedFloat16AtomicMinMax == VK_TRUE));
const bool valid_32_float = (
(float_features.shaderBufferFloat32Atomics == VK_TRUE) ||
(float_features.shaderBufferFloat32AtomicAdd == VK_TRUE) ||
(float_features.shaderSharedFloat32Atomics == VK_TRUE) ||
(float_features.shaderSharedFloat32AtomicAdd == VK_TRUE) ||
(float_features.shaderImageFloat32Atomics == VK_TRUE) ||
(float_features.shaderImageFloat32AtomicAdd == VK_TRUE) ||
(float2_features.shaderBufferFloat32AtomicMinMax == VK_TRUE) ||
(float2_features.shaderSharedFloat32AtomicMinMax == VK_TRUE) ||
(float2_features.shaderImageFloat32AtomicMinMax == VK_TRUE));
const bool valid_64_float = (
(float_features.shaderBufferFloat64Atomics == VK_TRUE) ||
(float_features.shaderBufferFloat64AtomicAdd == VK_TRUE) ||
(float_features.shaderSharedFloat64Atomics == VK_TRUE) ||
(float_features.shaderSharedFloat64AtomicAdd == VK_TRUE) ||
(float2_features.shaderBufferFloat64AtomicMinMax == VK_TRUE) ||
(float2_features.shaderSharedFloat64AtomicMinMax == VK_TRUE));
// clang-format on
for (const auto &atomic_inst : src->GetAtomicInstructions()) {
const atomic_instruction &atomic = atomic_inst.second;
const uint32_t opcode = src->at(atomic_inst.first).opcode();
if ((atomic.bit_width == 64) && (atomic.type == spv::OpTypeInt)) {
// Validate 64-bit image atomics
if (((atomic.storage_class == spv::StorageClassStorageBuffer) || (atomic.storage_class == spv::StorageClassUniform)) &&
(enabled_features.core12.shaderBufferInt64Atomics == VK_FALSE)) {
skip |= LogError(device, "VUID-RuntimeSpirv-None-06278",
"%s: Can't use 64-bit int atomics operations (%s) with %s storage class without "
"shaderBufferInt64Atomics enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str(), string_SpvOpcode(opcode),
StorageClassName(atomic.storage_class));
} else if ((atomic.storage_class == spv::StorageClassWorkgroup) &&
(enabled_features.core12.shaderSharedInt64Atomics == VK_FALSE)) {
skip |= LogError(device, "VUID-RuntimeSpirv-None-06279",
"%s: Can't use 64-bit int atomics operations (%s) with Workgroup storage class without "
"shaderSharedInt64Atomics enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str(), string_SpvOpcode(opcode));
} else if ((atomic.storage_class == spv::StorageClassImage) && (valid_image_64_int == false)) {
skip |= LogError(device, "VUID-RuntimeSpirv-None-06288",
"%s: Can't use 64-bit int atomics operations (%s) with Image storage class without "
"shaderImageInt64Atomics enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str(), string_SpvOpcode(opcode));
}
} else if (atomic.type == spv::OpTypeFloat) {
// Validate Floats
if (atomic.storage_class == spv::StorageClassStorageBuffer) {
if (valid_storage_buffer_float == false) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_shader_atomic_float2) ? "VUID-RuntimeSpirv-None-06284"
: "VUID-RuntimeSpirv-None-06280";
skip |= LogError(device, vuid,
"%s: Can't use float atomics operations (%s) with StorageBuffer storage class without "
"shaderBufferFloat32Atomics or shaderBufferFloat32AtomicAdd or shaderBufferFloat64Atomics or "
"shaderBufferFloat64AtomicAdd or shaderBufferFloat16Atomics or shaderBufferFloat16AtomicAdd "
"or shaderBufferFloat16AtomicMinMax or shaderBufferFloat32AtomicMinMax or "
"shaderBufferFloat64AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str(), string_SpvOpcode(opcode));
} else if (opcode == spv::OpAtomicFAddEXT) {
if ((atomic.bit_width == 16) && (float2_features.shaderBufferFloat16AtomicAdd == VK_FALSE)) {
skip |= LogError(device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 16-bit float atomics for add operations (OpAtomicFAddEXT) with "
"StorageBuffer storage class without shaderBufferFloat16AtomicAdd enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 32) && (float_features.shaderBufferFloat32AtomicAdd == VK_FALSE)) {
skip |= LogError(device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 32-bit float atomics for add operations (OpAtomicFAddEXT) with "
"StorageBuffer storage class without shaderBufferFloat32AtomicAdd enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 64) && (float_features.shaderBufferFloat64AtomicAdd == VK_FALSE)) {
skip |= LogError(device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 64-bit float atomics for add operations (OpAtomicFAddEXT) with "
"StorageBuffer storage class without shaderBufferFloat64AtomicAdd enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
}
} else if (opcode == spv::OpAtomicFMinEXT || opcode == spv::OpAtomicFMaxEXT) {
if ((atomic.bit_width == 16) && (float2_features.shaderBufferFloat16AtomicMinMax == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 16-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with "
"StorageBuffer storage class without shaderBufferFloat16AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 32) && (float2_features.shaderBufferFloat32AtomicMinMax == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 32-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with "
"StorageBuffer storage class without shaderBufferFloat32AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 64) && (float2_features.shaderBufferFloat64AtomicMinMax == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 64-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with "
"StorageBuffer storage class without shaderBufferFloat64AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
}
} else {
// Assume is valid load/store/exchange (rest of supported atomic operations) or else spirv-val will catch
if ((atomic.bit_width == 16) && (float2_features.shaderBufferFloat16Atomics == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 16-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, "
"OpAtomicExchange) with StorageBuffer storage class without shaderBufferFloat16Atomics enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 32) && (float_features.shaderBufferFloat32Atomics == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 32-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, "
"OpAtomicExchange) with StorageBuffer storage class without shaderBufferFloat32Atomics enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 64) && (float_features.shaderBufferFloat64Atomics == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 64-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, "
"OpAtomicExchange) with StorageBuffer storage class without shaderBufferFloat64Atomics enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
}
}
} else if (atomic.storage_class == spv::StorageClassWorkgroup) {
if (valid_workgroup_float == false) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_shader_atomic_float2) ? "VUID-RuntimeSpirv-None-06285"
: "VUID-RuntimeSpirv-None-06281";
skip |=
LogError(device, vuid,
"%s: Can't use float atomics operations (%s) with Workgroup storage class without "
"shaderSharedFloat32Atomics or "
"shaderSharedFloat32AtomicAdd or shaderSharedFloat64Atomics or shaderSharedFloat64AtomicAdd or "
"shaderSharedFloat16Atomics or shaderSharedFloat16AtomicAdd or shaderSharedFloat16AtomicMinMax or "
"shaderSharedFloat32AtomicMinMax or shaderSharedFloat64AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str(), string_SpvOpcode(opcode));
} else if (opcode == spv::OpAtomicFAddEXT) {
if ((atomic.bit_width == 16) && (float2_features.shaderSharedFloat16AtomicAdd == VK_FALSE)) {
skip |= LogError(device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 16-bit float atomics for add operations (OpAtomicFAddEXT) with Workgroup "
"storage class without shaderSharedFloat16AtomicAdd enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 32) && (float_features.shaderSharedFloat32AtomicAdd == VK_FALSE)) {
skip |= LogError(device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 32-bit float atomics for add operations (OpAtomicFAddEXT) with Workgroup "
"storage class without shaderSharedFloat32AtomicAdd enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 64) && (float_features.shaderSharedFloat64AtomicAdd == VK_FALSE)) {
skip |= LogError(device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 64-bit float atomics for add operations (OpAtomicFAddEXT) with Workgroup "
"storage class without shaderSharedFloat64AtomicAdd enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
}
} else if (opcode == spv::OpAtomicFMinEXT || opcode == spv::OpAtomicFMaxEXT) {
if ((atomic.bit_width == 16) && (float2_features.shaderSharedFloat16AtomicMinMax == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 16-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with "
"Workgroup storage class without shaderSharedFloat16AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 32) && (float2_features.shaderSharedFloat32AtomicMinMax == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 32-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with "
"Workgroup storage class without shaderSharedFloat32AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 64) && (float2_features.shaderSharedFloat64AtomicMinMax == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 64-bit float atomics for min/max operations (OpAtomicFMinEXT or OpAtomicFMaxEXT) with "
"Workgroup storage class without shaderSharedFloat64AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
}
} else {
// Assume is valid load/store/exchange (rest of supported atomic operations) or else spirv-val will catch
if ((atomic.bit_width == 16) && (float2_features.shaderSharedFloat16Atomics == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 16-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, "
"OpAtomicExchange) with Workgroup storage class without shaderSharedFloat16Atomics enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 32) && (float_features.shaderSharedFloat32Atomics == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 32-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, "
"OpAtomicExchange) with Workgroup storage class without shaderSharedFloat32Atomics enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
} else if ((atomic.bit_width == 64) && (float_features.shaderSharedFloat64Atomics == VK_FALSE)) {
skip |= LogError(
device, kVUID_Core_Shader_AtomicFeature,
"%s: Can't use 64-bit float atomics for load/store/exhange operations (OpAtomicLoad, OpAtomicStore, "
"OpAtomicExchange) with Workgroup storage class without shaderSharedFloat64Atomics enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str());
}
}
} else if ((atomic.storage_class == spv::StorageClassImage) && (valid_image_float == false)) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_shader_atomic_float2) ? "VUID-RuntimeSpirv-None-06286"
: "VUID-RuntimeSpirv-None-06282";
skip |= LogError(
device, vuid,
"%s: Can't use float atomics operations (%s) with Image storage class without shaderImageFloat32Atomics or "
"shaderImageFloat32AtomicAdd or shaderImageFloat32AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str(), string_SpvOpcode(opcode));
} else if ((atomic.bit_width == 16) && (valid_16_float == false)) {
skip |= LogError(device, "VUID-RuntimeSpirv-None-06337",
"%s: Can't use 16-bit float atomics operations (%s) without shaderBufferFloat16Atomics, "
"shaderBufferFloat16AtomicAdd, shaderBufferFloat16AtomicMinMax, shaderSharedFloat16Atomics, "
"shaderSharedFloat16AtomicAdd or shaderSharedFloat16AtomicMinMax enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str(), string_SpvOpcode(opcode));
} else if ((atomic.bit_width == 32) && (valid_32_float == false)) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_shader_atomic_float2) ? "VUID-RuntimeSpirv-None-06338"
: "VUID-RuntimeSpirv-None-06335";
skip |= LogError(device, vuid,
"%s: Can't use 32-bit float atomics operations (%s) without shaderBufferFloat32AtomicMinMax, "
"shaderSharedFloat32AtomicMinMax, shaderImageFloat32AtomicMinMax, sparseImageFloat32AtomicMinMax, "
"shaderBufferFloat32Atomics, shaderBufferFloat32AtomicAdd, shaderSharedFloat32Atomics, "
"shaderSharedFloat32AtomicAdd, shaderImageFloat32Atomics, shaderImageFloat32AtomicAdd, "
"sparseImageFloat32Atomics or sparseImageFloat32AtomicAdd enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str(), string_SpvOpcode(opcode));
} else if ((atomic.bit_width == 64) && (valid_64_float == false)) {
const char *vuid = IsExtEnabled(device_extensions.vk_ext_shader_atomic_float2) ? "VUID-RuntimeSpirv-None-06339"
: "VUID-RuntimeSpirv-None-06336";
skip |= LogError(device, vuid,
"%s: Can't use 64-bit float atomics operations (%s) without shaderBufferFloat64AtomicMinMax, "
"shaderSharedFloat64AtomicMinMax, shaderBufferFloat64Atomics, shaderBufferFloat64AtomicAdd, "
"shaderSharedFloat64Atomics or shaderSharedFloat64AtomicAdd enabled.",
report_data->FormatHandle(src->vk_shader_module()).c_str(), string_SpvOpcode(opcode));
}
}
}
return skip;
}
bool CoreChecks::ValidateExecutionModes(SHADER_MODULE_STATE const *src, spirv_inst_iter entrypoint, VkShaderStageFlagBits stage,
const PIPELINE_STATE *pipeline) const {
auto entrypoint_id = entrypoint.word(2);
// The first denorm execution mode encountered, along with its bit width.
// Used to check if SeparateDenormSettings is respected.
std::pair<spv::ExecutionMode, uint32_t> first_denorm_execution_mode = std::make_pair(spv::ExecutionModeMax, 0);
// The first rounding mode encountered, along with its bit width.
// Used to check if SeparateRoundingModeSettings is respected.
std::pair<spv::ExecutionMode, uint32_t> first_rounding_mode = std::make_pair(spv::ExecutionModeMax, 0);
bool skip = false;
uint32_t vertices_out = 0;
uint32_t invocations = 0;
const auto &execution_mode_inst = src->GetExecutionModeInstructions();
auto it = execution_mode_inst.find(entrypoint_id);
if (it != execution_mode_inst.end()) {
for (auto insn : it->second) {
auto mode = insn.word(2);
switch (mode) {
case spv::ExecutionModeSignedZeroInfNanPreserve: {
auto bit_width = insn.word(3);
if (bit_width == 16 && !phys_dev_props_core12.shaderSignedZeroInfNanPreserveFloat16) {
skip |= LogError(
device, "VUID-RuntimeSpirv-shaderSignedZeroInfNanPreserveFloat16-06293",
"Shader requires SignedZeroInfNanPreserve for bit width 16 but it is not enabled on the device");
} else if (bit_width == 32 && !phys_dev_props_core12.shaderSignedZeroInfNanPreserveFloat32) {
skip |= LogError(
device, "VUID-RuntimeSpirv-shaderSignedZeroInfNanPreserveFloat32-06294",
"Shader requires SignedZeroInfNanPreserve for bit width 32 but it is not enabled on the device");
} else if (bit_width == 64 && !phys_dev_props_core12.shaderSignedZeroInfNanPreserveFloat64) {
skip |= LogError(
device, "VUID-RuntimeSpirv-shaderSignedZeroInfNanPreserveFloat64-06295",
"Shader requires SignedZeroInfNanPreserve for bit width 64 but it is not enabled on the device");
}
break;
}
case spv::ExecutionModeDenormPreserve: {
auto bit_width = insn.word(3);
if (bit_width == 16 && !phys_dev_props_core12.shaderDenormPreserveFloat16) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderDenormPreserveFloat16-06296",
"Shader requires DenormPreserve for bit width 16 but it is not enabled on the device");
} else if (bit_width == 32 && !phys_dev_props_core12.shaderDenormPreserveFloat32) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderDenormPreserveFloat32-06297",
"Shader requires DenormPreserve for bit width 32 but it is not enabled on the device");
} else if (bit_width == 64 && !phys_dev_props_core12.shaderDenormPreserveFloat64) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderDenormPreserveFloat64-06298",
"Shader requires DenormPreserve for bit width 64 but it is not enabled on the device");
}
if (first_denorm_execution_mode.first == spv::ExecutionModeMax) {
// Register the first denorm execution mode found
first_denorm_execution_mode = std::make_pair(static_cast<spv::ExecutionMode>(mode), bit_width);
} else if (first_denorm_execution_mode.first != mode && first_denorm_execution_mode.second != bit_width) {
switch (phys_dev_props_core12.denormBehaviorIndependence) {
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY:
if (first_rounding_mode.second != 32 && bit_width != 32) {
skip |= LogError(device, "VUID-RuntimeSpirv-denormBehaviorIndependence-06289",
"Shader uses different denorm execution modes for 16 and 64-bit but "
"denormBehaviorIndependence is "
"VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY on the device");
}
break;
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL:
break;
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE:
skip |= LogError(device, "VUID-RuntimeSpirv-denormBehaviorIndependence-06290",
"Shader uses different denorm execution modes for different bit widths but "
"denormBehaviorIndependence is "
"VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE on the device");
break;
default:
break;
}
}
break;
}
case spv::ExecutionModeDenormFlushToZero: {
auto bit_width = insn.word(3);
if (bit_width == 16 && !phys_dev_props_core12.shaderDenormFlushToZeroFloat16) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderDenormFlushToZeroFloat16-06299",
"Shader requires DenormFlushToZero for bit width 16 but it is not enabled on the device");
} else if (bit_width == 32 && !phys_dev_props_core12.shaderDenormFlushToZeroFloat32) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderDenormFlushToZeroFloat32-06300",
"Shader requires DenormFlushToZero for bit width 32 but it is not enabled on the device");
} else if (bit_width == 64 && !phys_dev_props_core12.shaderDenormFlushToZeroFloat64) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderDenormFlushToZeroFloat64-06301",
"Shader requires DenormFlushToZero for bit width 64 but it is not enabled on the device");
}
if (first_denorm_execution_mode.first == spv::ExecutionModeMax) {
// Register the first denorm execution mode found
first_denorm_execution_mode = std::make_pair(static_cast<spv::ExecutionMode>(mode), bit_width);
} else if (first_denorm_execution_mode.first != mode && first_denorm_execution_mode.second != bit_width) {
switch (phys_dev_props_core12.denormBehaviorIndependence) {
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY:
if (first_rounding_mode.second != 32 && bit_width != 32) {
skip |= LogError(device, "VUID-RuntimeSpirv-denormBehaviorIndependence-06289",
"Shader uses different denorm execution modes for 16 and 64-bit but "
"denormBehaviorIndependence is "
"VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY on the device");
}
break;
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL:
break;
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE:
skip |= LogError(device, "VUID-RuntimeSpirv-denormBehaviorIndependence-06290",
"Shader uses different denorm execution modes for different bit widths but "
"denormBehaviorIndependence is "
"VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE on the device");
break;
default:
break;
}
}
break;
}
case spv::ExecutionModeRoundingModeRTE: {
auto bit_width = insn.word(3);
if (bit_width == 16 && !phys_dev_props_core12.shaderRoundingModeRTEFloat16) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderRoundingModeRTEFloat16-06302",
"Shader requires RoundingModeRTE for bit width 16 but it is not enabled on the device");
} else if (bit_width == 32 && !phys_dev_props_core12.shaderRoundingModeRTEFloat32) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderRoundingModeRTEFloat32-06303",
"Shader requires RoundingModeRTE for bit width 32 but it is not enabled on the device");
} else if (bit_width == 64 && !phys_dev_props_core12.shaderRoundingModeRTEFloat64) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderRoundingModeRTEFloat64-06304",
"Shader requires RoundingModeRTE for bit width 64 but it is not enabled on the device");
}
if (first_rounding_mode.first == spv::ExecutionModeMax) {
// Register the first rounding mode found
first_rounding_mode = std::make_pair(static_cast<spv::ExecutionMode>(mode), bit_width);
} else if (first_rounding_mode.first != mode && first_rounding_mode.second != bit_width) {
switch (phys_dev_props_core12.roundingModeIndependence) {
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY:
if (first_rounding_mode.second != 32 && bit_width != 32) {
skip |= LogError(device, "VUID-RuntimeSpirv-roundingModeIndependence-06291",
"Shader uses different rounding modes for 16 and 64-bit but "
"roundingModeIndependence is "
"VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY on the device");
}
break;
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL:
break;
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE:
skip |= LogError(device, "VUID-RuntimeSpirv-roundingModeIndependence-06292",
"Shader uses different rounding modes for different bit widths but "
"roundingModeIndependence is "
"VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE on the device");
break;
default:
break;
}
}
break;
}
case spv::ExecutionModeRoundingModeRTZ: {
auto bit_width = insn.word(3);
if (bit_width == 16 && !phys_dev_props_core12.shaderRoundingModeRTZFloat16) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderRoundingModeRTZFloat16-06305",
"Shader requires RoundingModeRTZ for bit width 16 but it is not enabled on the device");
} else if (bit_width == 32 && !phys_dev_props_core12.shaderRoundingModeRTZFloat32) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderRoundingModeRTZFloat32-06306",
"Shader requires RoundingModeRTZ for bit width 32 but it is not enabled on the device");
} else if (bit_width == 64 && !phys_dev_props_core12.shaderRoundingModeRTZFloat64) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderRoundingModeRTZFloat64-06307",
"Shader requires RoundingModeRTZ for bit width 64 but it is not enabled on the device");
}
if (first_rounding_mode.first == spv::ExecutionModeMax) {
// Register the first rounding mode found
first_rounding_mode = std::make_pair(static_cast<spv::ExecutionMode>(mode), bit_width);
} else if (first_rounding_mode.first != mode && first_rounding_mode.second != bit_width) {
switch (phys_dev_props_core12.roundingModeIndependence) {
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY:
if (first_rounding_mode.second != 32 && bit_width != 32) {
skip |= LogError(device, "VUID-RuntimeSpirv-roundingModeIndependence-06291",
"Shader uses different rounding modes for 16 and 64-bit but "
"roundingModeIndependence is "
"VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY on the device");
}
break;
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL:
break;
case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE:
skip |= LogError(device, "VUID-RuntimeSpirv-roundingModeIndependence-06292",
"Shader uses different rounding modes for different bit widths but "
"roundingModeIndependence is "
"VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE on the device");
break;
default:
break;
}
}
break;
}
case spv::ExecutionModeOutputVertices: {
vertices_out = insn.word(3);
break;
}
case spv::ExecutionModeInvocations: {
invocations = insn.word(3);
break;
}
case spv::ExecutionModeLocalSizeId: {
if (!enabled_features.maintenance4_features.maintenance4) {
skip |= LogError(device, "VUID-RuntimeSpirv-LocalSizeId-06434",
"LocalSizeId execution mode used but maintenance4 feature not enabled");
}
break;
}
case spv::ExecutionModeEarlyFragmentTests: {
if ((stage == VK_SHADER_STAGE_FRAGMENT_BIT) &&
(pipeline && pipeline->create_info.graphics.pDepthStencilState &&
(pipeline->create_info.graphics.pDepthStencilState->flags &
(VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM |
VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM)) != 0)) {
skip |= LogError(
device, " VUID-VkGraphicsPipelineCreateInfo-pStages-06466",
"The fragment shader enables early fragment tests, but VkPipelineDepthStencilStateCreateInfo::flags == "
"%s",
string_VkPipelineDepthStencilStateCreateFlags(pipeline->create_info.graphics.pDepthStencilState->flags)
.c_str());
}
break;
}
}
}
}
if (entrypoint.word(1) == spv::ExecutionModelGeometry) {
if (vertices_out == 0 || vertices_out > phys_dev_props.limits.maxGeometryOutputVertices) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00714",
"Geometry shader entry point must have an OpExecutionMode instruction that "
"specifies a maximum output vertex count that is greater than 0 and less "
"than or equal to maxGeometryOutputVertices. "
"OutputVertices=%d, maxGeometryOutputVertices=%d",
vertices_out, phys_dev_props.limits.maxGeometryOutputVertices);
}
if (invocations == 0 || invocations > phys_dev_props.limits.maxGeometryShaderInvocations) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00715",
"Geometry shader entry point must have an OpExecutionMode instruction that "
"specifies an invocation count that is greater than 0 and less "
"than or equal to maxGeometryShaderInvocations. "
"Invocations=%d, maxGeometryShaderInvocations=%d",
invocations, phys_dev_props.limits.maxGeometryShaderInvocations);
}
}
return skip;
}
// For given pipelineLayout verify that the set_layout_node at slot.first
// has the requested binding at slot.second and return ptr to that binding
static VkDescriptorSetLayoutBinding const *GetDescriptorBinding(PIPELINE_LAYOUT_STATE const *pipelineLayout,
DescriptorSlot slot) {
if (!pipelineLayout) return nullptr;
if (slot.set >= pipelineLayout->set_layouts.size()) return nullptr;
return pipelineLayout->set_layouts[slot.set]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.binding);
}
// If PointList topology is specified in the pipeline, verify that a shader geometry stage writes PointSize
// o If there is only a vertex shader : gl_PointSize must be written when using points
// o If there is a geometry or tessellation shader:
// - If shaderTessellationAndGeometryPointSize feature is enabled:
// * gl_PointSize must be written in the final geometry stage
// - If shaderTessellationAndGeometryPointSize feature is disabled:
// * gl_PointSize must NOT be written and a default of 1.0 is assumed
bool CoreChecks::ValidatePointListShaderState(const PIPELINE_STATE *pipeline, SHADER_MODULE_STATE const *src,
spirv_inst_iter entrypoint, VkShaderStageFlagBits stage) const {
if (pipeline->topology_at_rasterizer != VK_PRIMITIVE_TOPOLOGY_POINT_LIST) {
return false;
}
bool pointsize_written = false;
bool skip = false;
// Search for PointSize built-in decorations
for (const auto &set : src->GetBuiltinDecorationList()) {
auto insn = src->at(set.offset);
if (set.builtin == spv::BuiltInPointSize) {
pointsize_written = src->IsBuiltInWritten(insn, entrypoint);
if (pointsize_written) {
break;
}
}
}
if ((stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT || stage == VK_SHADER_STAGE_GEOMETRY_BIT) &&
!enabled_features.core.shaderTessellationAndGeometryPointSize) {
if (pointsize_written) {
skip |= LogError(pipeline->pipeline(), kVUID_Core_Shader_PointSizeBuiltInOverSpecified,
"Pipeline topology is set to POINT_LIST and geometry or tessellation shaders write PointSize which "
"is prohibited when the shaderTessellationAndGeometryPointSize feature is not enabled.");
}
} else if (!pointsize_written) {
skip |=
LogError(pipeline->pipeline(), kVUID_Core_Shader_MissingPointSizeBuiltIn,
"Pipeline topology is set to POINT_LIST, but PointSize is not written to in the shader corresponding to %s.",
string_VkShaderStageFlagBits(stage));
}
return skip;
}
bool CoreChecks::ValidatePrimitiveRateShaderState(const PIPELINE_STATE *pipeline, SHADER_MODULE_STATE const *src,
spirv_inst_iter entrypoint, VkShaderStageFlagBits stage) const {
bool primitiverate_written = false;
bool viewportindex_written = false;
bool viewportmask_written = false;
bool skip = false;
// Check if the primitive shading rate is written
for (const auto &set : src->GetBuiltinDecorationList()) {
auto insn = src->at(set.offset);
if (set.builtin == spv::BuiltInPrimitiveShadingRateKHR) {
primitiverate_written = src->IsBuiltInWritten(insn, entrypoint);
} else if (set.builtin == spv::BuiltInViewportIndex) {
viewportindex_written = src->IsBuiltInWritten(insn, entrypoint);
} else if (set.builtin == spv::BuiltInViewportMaskNV) {
viewportmask_written = src->IsBuiltInWritten(insn, entrypoint);
}
if (primitiverate_written && viewportindex_written && viewportmask_written) {
break;
}
}
if (!phys_dev_ext_props.fragment_shading_rate_props.primitiveFragmentShadingRateWithMultipleViewports &&
(pipeline->GetPipelineType() == VK_PIPELINE_BIND_POINT_GRAPHICS) && pipeline->create_info.graphics.pViewportState) {
if (!IsDynamic(pipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) &&
pipeline->create_info.graphics.pViewportState->viewportCount > 1 && primitiverate_written) {
skip |= LogError(pipeline->pipeline(),
"VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04503",
"vkCreateGraphicsPipelines: %s shader statically writes to PrimitiveShadingRateKHR built-in, but "
"multiple viewports "
"are used and the primitiveFragmentShadingRateWithMultipleViewports limit is not supported.",
string_VkShaderStageFlagBits(stage));
}
if (primitiverate_written && viewportindex_written) {
skip |= LogError(pipeline->pipeline(),
"VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04504",
"vkCreateGraphicsPipelines: %s shader statically writes to both PrimitiveShadingRateKHR and "
"ViewportIndex built-ins,"
"but the primitiveFragmentShadingRateWithMultipleViewports limit is not supported.",
string_VkShaderStageFlagBits(stage));
}
if (primitiverate_written && viewportmask_written) {
skip |= LogError(pipeline->pipeline(),
"VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04505",
"vkCreateGraphicsPipelines: %s shader statically writes to both PrimitiveShadingRateKHR and "
"ViewportMaskNV built-ins,"
"but the primitiveFragmentShadingRateWithMultipleViewports limit is not supported.",
string_VkShaderStageFlagBits(stage));
}
}
return skip;
}
bool CoreChecks::ValidateDecorations(SHADER_MODULE_STATE const* module) const {
bool skip = false;
std::vector<spirv_inst_iter> xfb_streams;
std::vector<spirv_inst_iter> xfb_buffers;
std::vector<spirv_inst_iter> xfb_offsets;
for (const auto &op_decorate : module->GetDecorationInstructions()) {
uint32_t decoration = op_decorate.word(2);
if (decoration == spv::DecorationXfbStride) {
uint32_t stride = op_decorate.word(3);
if (stride > phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBufferDataStride) {
skip |= LogError(
device, "VUID-RuntimeSpirv-XfbStride-06313",
"vkCreateGraphicsPipelines(): shader uses transform feedback with xfb_stride (%" PRIu32
") greater than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBufferDataStride (%" PRIu32
").",
stride, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBufferDataStride);
}
}
if (decoration == spv::DecorationStream) {
xfb_streams.push_back(op_decorate);
uint32_t stream = op_decorate.word(3);
if (stream >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams) {
skip |= LogError(
device, "VUID-RuntimeSpirv-Stream-06312",
"vkCreateGraphicsPipelines(): shader uses transform feedback with stream (%" PRIu32
") not less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams (%" PRIu32 ").",
stream, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
}
if (decoration == spv::DecorationXfbBuffer) {
xfb_buffers.push_back(op_decorate);
}
if (decoration == spv::DecorationOffset) {
xfb_offsets.push_back(op_decorate);
}
}
// XfbBuffer, buffer data size
std::vector<std::pair<uint32_t, uint32_t>> buffer_data_sizes;
for (const auto &op_decorate : xfb_offsets) {
for (const auto xfb_buffer : xfb_buffers) {
if (xfb_buffer.word(1) == op_decorate.word(1)) {
const auto offset = op_decorate.word(3);
const auto def = module->get_def(xfb_buffer.word(1));
const auto size = module->GetTypeBytesSize(def);
const uint32_t buffer_data_size = offset + size;
if (buffer_data_size > phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBufferDataSize) {
skip |= LogError(
device, "VUID-RuntimeSpirv-Offset-06308",
"vkCreateGraphicsPipelines(): shader uses transform feedback with xfb_offset (%" PRIu32
") + size of variable (%" PRIu32 ") greater than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBufferDataSize "
"(%" PRIu32 ").",
offset, size, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBufferDataSize);
}
bool found = false;
for (auto &bds : buffer_data_sizes) {
if (bds.first == xfb_buffer.word(1)) {
bds.second = std::max(bds.second, buffer_data_size);
found = true;
break;
}
}
if (!found) {
buffer_data_sizes.emplace_back(xfb_buffer.word(1), buffer_data_size);
}
break;
}
}
}
std::unordered_map<uint32_t, uint32_t> stream_data_size;
for (const auto &xfb_stream : xfb_streams) {
for (const auto& bds : buffer_data_sizes) {
if (xfb_stream.word(1) == bds.first) {
uint32_t stream = xfb_stream.word(3);
const auto itr = stream_data_size.find(stream);
if (itr != stream_data_size.end()) {
itr->second += bds.second;
} else {
stream_data_size.insert({stream, bds.second});
}
}
}
}
for (const auto& stream : stream_data_size) {
if (stream.second > phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreamDataSize) {
skip |= LogError(device, "VUID-RuntimeSpirv-XfbBuffer-06309",
"vkCreateGraphicsPipelines(): shader uses transform feedback with stream (%" PRIu32
") having the sum of buffer data sizes (%" PRIu32
") not less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackBufferDataSize "
"(%" PRIu32 ").",
stream.first, stream.second,
phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackBufferDataSize);
}
}
return skip;
}
bool CoreChecks::ValidateTransformFeedback(SHADER_MODULE_STATE const *src) const {
bool skip = false;
// Temp workaround to prevent false positive errors
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2450
if (src->HasMultipleEntryPoints()) {
return skip;
}
layer_data::unordered_set<uint32_t> emitted_streams;
bool output_points = false;
for (const auto& insn : *src) {
const uint32_t opcode = insn.opcode();
if (opcode == spv::OpEmitStreamVertex) {
emitted_streams.emplace(static_cast<uint32_t>(src->GetConstantValueById(insn.word(1))));
}
if (opcode == spv::OpEmitStreamVertex || opcode == spv::OpEndStreamPrimitive) {
uint32_t stream = static_cast<uint32_t>(src->GetConstantValueById(insn.word(1)));
if (stream >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams) {
skip |= LogError(
device, "VUID-RuntimeSpirv-OpEmitStreamVertex-06310",
"vkCreateGraphicsPipelines(): shader uses transform feedback stream (%s) with index %" PRIu32
", which is not less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams (%" PRIu32
").",
string_SpvOpcode(opcode), stream, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
}
if (opcode == spv::OpExecutionMode && insn.word(2) == spv::ExecutionModeOutputPoints) {
output_points = true;
}
}
const uint32_t emitted_streams_size = static_cast<uint32_t>(emitted_streams.size());
if (emitted_streams_size > 1 && !output_points &&
phys_dev_ext_props.transform_feedback_props.transformFeedbackStreamsLinesTriangles == VK_FALSE) {
skip |= LogError(
device, "VUID-RuntimeSpirv-transformFeedbackStreamsLinesTriangles-06311",
"vkCreateGraphicsPipelines(): shader emits to %" PRIu32 " vertex streams and VkPhysicalDeviceTransformFeedbackPropertiesEXT::transformFeedbackStreamsLinesTriangles is VK_FALSE, but execution mode is not OutputPoints.",
emitted_streams_size);
}
return skip;
}
// Checks for both TexelOffset and TexelGatherOffset limits
bool CoreChecks::ValidateTexelOffsetLimits(SHADER_MODULE_STATE const *src, spirv_inst_iter &insn) const {
bool skip = false;
const uint32_t opcode = insn.opcode();
if (ImageGatherOperation(opcode) || ImageSampleOperation(opcode) || ImageFetchOperation(opcode)) {
uint32_t image_operand_position = ImageOperandsParamPosition(opcode);
// Image operands can be optional
if (image_operand_position != 0 && insn.len() > image_operand_position) {
auto image_operand = insn.word(image_operand_position);
// Bits we are validating (sample/fetch only check ConstOffset)
uint32_t offset_bits =
ImageGatherOperation(opcode)
? (spv::ImageOperandsOffsetMask | spv::ImageOperandsConstOffsetMask | spv::ImageOperandsConstOffsetsMask)
: (spv::ImageOperandsConstOffsetMask);
if (image_operand & (offset_bits)) {
// Operand values follow
uint32_t index = image_operand_position + 1;
// Each bit has it's own operand, starts with the smallest set bit and loop to the highest bit among
// ImageOperandsOffsetMask, ImageOperandsConstOffsetMask and ImageOperandsConstOffsetsMask
for (uint32_t i = 1; i < spv::ImageOperandsConstOffsetsMask; i <<= 1) {
if (image_operand & i) { // If the bit is set, consume operand
if (insn.len() > index && (i & offset_bits)) {
uint32_t constant_id = insn.word(index);
const auto &constant = src->get_def(constant_id);
const bool is_dynamic_offset = constant == src->end();
if (!is_dynamic_offset && constant.opcode() == spv::OpConstantComposite) {
for (uint32_t j = 3; j < constant.len(); ++j) {
uint32_t comp_id = constant.word(j);
const auto &comp = src->get_def(comp_id);
const auto &comp_type = src->get_def(comp.word(1));
// Get operand value
const uint32_t offset = comp.word(3);
// spec requires minTexelGatherOffset/minTexelOffset to be -8 or less so never can compare if
// unsigned spec requires maxTexelGatherOffset/maxTexelOffset to be 7 or greater so never can
// compare if signed is less then zero
const int32_t signed_offset = static_cast<int32_t>(offset);
const bool use_signed = (comp_type.opcode() == spv::OpTypeInt && comp_type.word(3) != 0);
// There are 2 sets of VU being covered where the only main difference is the opcode
if (ImageGatherOperation(opcode)) {
// min/maxTexelGatherOffset
if (use_signed && (signed_offset < phys_dev_props.limits.minTexelGatherOffset)) {
skip |=
LogError(device, "VUID-RuntimeSpirv-OpImage-06376",
"vkCreateShaderModule(): Shader uses %s with offset (%" PRIi32
") less than VkPhysicalDeviceLimits::minTexelGatherOffset (%" PRIi32 ").",
string_SpvOpcode(opcode), signed_offset,
phys_dev_props.limits.minTexelGatherOffset);
} else if ((offset > phys_dev_props.limits.maxTexelGatherOffset) &&
(!use_signed || (use_signed && signed_offset > 0))) {
skip |= LogError(
device, "VUID-RuntimeSpirv-OpImage-06377",
"vkCreateShaderModule(): Shader uses %s with offset (%" PRIu32
") greater than VkPhysicalDeviceLimits::maxTexelGatherOffset (%" PRIu32 ").",
string_SpvOpcode(opcode), offset, phys_dev_props.limits.maxTexelGatherOffset);
}
} else {
// min/maxTexelOffset
if (use_signed && (signed_offset < phys_dev_props.limits.minTexelOffset)) {
skip |= LogError(device, "VUID-RuntimeSpirv-OpImageSample-06435",
"vkCreateShaderModule(): Shader uses %s with offset (%" PRIi32
") less than VkPhysicalDeviceLimits::minTexelOffset (%" PRIi32 ").",
string_SpvOpcode(opcode), signed_offset,
phys_dev_props.limits.minTexelOffset);
} else if ((offset > phys_dev_props.limits.maxTexelOffset) &&
(!use_signed || (use_signed && signed_offset > 0))) {
skip |=
LogError(device, "VUID-RuntimeSpirv-OpImageSample-06436",
"vkCreateShaderModule(): Shader uses %s with offset (%" PRIu32
") greater than VkPhysicalDeviceLimits::maxTexelOffset (%" PRIu32 ").",
string_SpvOpcode(opcode), offset, phys_dev_props.limits.maxTexelOffset);
}
}
}
}
}
index += ImageOperandsParamCount(i);
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateShaderClock(SHADER_MODULE_STATE const *module, spirv_inst_iter &insn) const {
bool skip = false;
switch (insn.opcode()) {
case spv::OpReadClockKHR: {
auto scope_id = module->get_def(insn.word(3));
auto scope_type = scope_id.word(3);
// if scope isn't Subgroup or Device, spirv-val will catch
if ((scope_type == spv::ScopeSubgroup) && (enabled_features.shader_clock_features.shaderSubgroupClock == VK_FALSE)) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderSubgroupClock-06267",
"%s: OpReadClockKHR is used with a Subgroup scope but shaderSubgroupClock was not enabled.",
report_data->FormatHandle(module->vk_shader_module()).c_str());
} else if ((scope_type == spv::ScopeDevice) && (enabled_features.shader_clock_features.shaderDeviceClock == VK_FALSE)) {
skip |= LogError(device, "VUID-RuntimeSpirv-shaderDeviceClock-06268",
"%s: OpReadClockKHR is used with a Device scope but shaderDeviceClock was not enabled.",
report_data->FormatHandle(module->vk_shader_module()).c_str());
}
break;
}
}
return skip;
}
bool CoreChecks::ValidatePipelineShaderStage(const PIPELINE_STATE *pipeline, const PipelineStageState &stage_state,
bool check_point_size) const {
bool skip = false;
const auto *pStage = stage_state.create_info;
const auto *module = stage_state.module.get();
const auto &entrypoint = stage_state.entrypoint;
// Check the module
if (!module->has_valid_spirv) {
skip |= LogError(
device, "VUID-VkPipelineShaderStageCreateInfo-module-parameter", "%s does not contain valid spirv for stage %s.",
report_data->FormatHandle(module->vk_shader_module()).c_str(), string_VkShaderStageFlagBits(stage_state.stage_flag));
}
// If specialization-constant values are given and specialization-constant instructions are present in the shader, the
// specializations should be applied and validated.
if (pStage->pSpecializationInfo != nullptr && pStage->pSpecializationInfo->mapEntryCount > 0 &&
pStage->pSpecializationInfo->pMapEntries != nullptr && module->HasSpecConstants()) {
// Gather the specialization-constant values.
auto const &specialization_info = pStage->pSpecializationInfo;
auto const &specialization_data = reinterpret_cast<uint8_t const *>(specialization_info->pData);
std::unordered_map<uint32_t, std::vector<uint32_t>> id_value_map; // note: this must be std:: to work with spvtools
id_value_map.reserve(specialization_info->mapEntryCount);
for (auto i = 0u; i < specialization_info->mapEntryCount; ++i) {
auto const &map_entry = specialization_info->pMapEntries[i];
const auto itr = module->GetSpecConstMap().find(map_entry.constantID);
// "If a constantID value is not a specialization constant ID used in the shader, that map entry does not affect the
// behavior of the pipeline."
if (itr != module->GetSpecConstMap().cend()) {
// Make sure map_entry.size matches the spec constant's size
uint32_t spec_const_size = decoration_set::kInvalidValue;
const auto def_ins = module->get_def(itr->second);
const auto type_ins = module->get_def(def_ins.word(1));
// Specialization constants can only be of type bool, scalar integer, or scalar floating point
switch (type_ins.opcode()) {
case spv::OpTypeBool:
// "If the specialization constant is of type boolean, size must be the byte size of VkBool32"
spec_const_size = sizeof(VkBool32);
break;
case spv::OpTypeInt:
case spv::OpTypeFloat:
spec_const_size = type_ins.word(2) / 8;
break;
default:
// spirv-val should catch if SpecId is not used on a OpSpecConstantTrue/OpSpecConstantFalse/OpSpecConstant
// and OpSpecConstant is validated to be a OpTypeInt or OpTypeFloat
break;
}
if (map_entry.size != spec_const_size) {
skip |=
LogError(device, "VUID-VkSpecializationMapEntry-constantID-00776",
"Specialization constant (ID = %" PRIu32 ", entry = %" PRIu32
") has invalid size %zu in shader module %s. Expected size is %" PRIu32 " from shader definition.",
map_entry.constantID, i, map_entry.size,
report_data->FormatHandle(module->vk_shader_module()).c_str(), spec_const_size);
}
}
if ((map_entry.offset + map_entry.size) <= specialization_info->dataSize) {
// Allocate enough room for ceil(map_entry.size / 4) to store entries
std::vector<uint32_t> entry_data((map_entry.size + 4 - 1) / 4, 0);
uint8_t *out_p = reinterpret_cast<uint8_t *>(entry_data.data());
const uint8_t *const start_in_p = specialization_data + map_entry.offset;
const uint8_t *const end_in_p = start_in_p + map_entry.size;
std::copy(start_in_p, end_in_p, out_p);
id_value_map.emplace(map_entry.constantID, std::move(entry_data));
}
}
// both spirv-opt and spirv-val will use the same flags
spvtools::ValidatorOptions options;
AdjustValidatorOptions(device_extensions, enabled_features, options);
// Apply the specialization-constant values and revalidate the shader module.
spv_target_env spirv_environment = PickSpirvEnv(api_version, IsExtEnabled(device_extensions.vk_khr_spirv_1_4));
spvtools::Optimizer optimizer(spirv_environment);
spvtools::MessageConsumer consumer = [&skip, &module, &stage_state, this](spv_message_level_t level, const char *source,
const spv_position_t &position,
const char *message) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-module-parameter",
"%s does not contain valid spirv for stage %s. %s",
report_data->FormatHandle(module->vk_shader_module()).c_str(),
string_VkShaderStageFlagBits(stage_state.stage_flag), message);
};
optimizer.SetMessageConsumer(consumer);
optimizer.RegisterPass(spvtools::CreateSetSpecConstantDefaultValuePass(id_value_map));
optimizer.RegisterPass(spvtools::CreateFreezeSpecConstantValuePass());
std::vector<uint32_t> specialized_spirv;
auto const optimized = optimizer.Run(module->words.data(), module->words.size(), &specialized_spirv, options, false);
assert(optimized == true);
if (optimized) {
spv_context ctx = spvContextCreate(spirv_environment);
spv_const_binary_t binary{specialized_spirv.data(), specialized_spirv.size()};
spv_diagnostic diag = nullptr;
auto const spv_valid = spvValidateWithOptions(ctx, options, &binary, &diag);
if (spv_valid != SPV_SUCCESS) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-module-04145",
"After specialization was applied, %s does not contain valid spirv for stage %s.",
report_data->FormatHandle(module->vk_shader_module()).c_str(),
string_VkShaderStageFlagBits(stage_state.stage_flag));
}
spvDiagnosticDestroy(diag);
spvContextDestroy(ctx);
}
skip |= ValidateWorkgroupSize(module, pStage, id_value_map);
}
// Check the entrypoint
if (entrypoint == module->end()) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-pName-00707", "No entrypoint found named `%s` for stage %s.",
pStage->pName, string_VkShaderStageFlagBits(stage_state.stage_flag));
}
if (skip) return true; // no point continuing beyond here, any analysis is just going to be garbage.
// Mark accessible ids
auto &accessible_ids = stage_state.accessible_ids;
// Validate descriptor set layout against what the entrypoint actually uses
// The following tries to limit the number of passes through the shader module. The validation passes in here are "stateless"
// and mainly only checking the instruction in detail for a single operation
uint32_t total_shared_size = 0;
for (auto insn : *module) {
skip |= ValidateTexelOffsetLimits(module, insn);
skip |= ValidateShaderCapabilitiesAndExtensions(module, insn);
skip |= ValidateShaderClock(module, insn);
skip |= ValidateShaderStageGroupNonUniform(module, pStage->stage, insn);
skip |= ValidateMemoryScope(module, insn);
total_shared_size += module->CalcComputeSharedMemory(pStage->stage, insn);
}
if (total_shared_size > phys_dev_props.limits.maxComputeSharedMemorySize) {
skip |= LogError(device, kVUID_Core_Shader_MaxComputeSharedMemorySize,
"Shader uses %" PRIu32 " bytes of shared memory, more than allowed by physicalDeviceLimits::maxComputeSharedMemorySize (%" PRIu32 ")",
total_shared_size, phys_dev_props.limits.maxComputeSharedMemorySize);
}
skip |= ValidateTransformFeedback(module);
skip |= ValidateShaderStageWritableOrAtomicDescriptor(pStage->stage, stage_state.has_writable_descriptor,
stage_state.has_atomic_descriptor);
skip |= ValidateShaderStageInputOutputLimits(module, pStage, pipeline, entrypoint);
skip |= ValidateShaderStorageImageFormats(module);
skip |= ValidateShaderStageMaxResources(pStage->stage, pipeline);
skip |= ValidateAtomicsTypes(module);
skip |= ValidateExecutionModes(module, entrypoint, pStage->stage, pipeline);
skip |= ValidateSpecializations(pStage);
skip |= ValidateDecorations(module);
if (check_point_size && !pipeline->create_info.graphics.pRasterizationState->rasterizerDiscardEnable) {
skip |= ValidatePointListShaderState(pipeline, module, entrypoint, pStage->stage);
}
skip |= ValidateBuiltinLimits(module, entrypoint);
if (enabled_features.cooperative_matrix_features.cooperativeMatrix) {
skip |= ValidateCooperativeMatrix(module, pStage, pipeline);
}
if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) {
skip |= ValidatePrimitiveRateShaderState(pipeline, module, entrypoint, pStage->stage);
}
if (IsExtEnabled(device_extensions.vk_qcom_render_pass_shader_resolve)) {
skip |= ValidateShaderResolveQCOM(module, pStage, pipeline);
}
if (IsExtEnabled(device_extensions.vk_ext_subgroup_size_control)) {
skip |= ValidateShaderSubgroupSizeControl(pStage);
}
// "layout must be consistent with the layout of the * shader"
// 'consistent' -> #descriptorsets-pipelinelayout-consistency
std::string vuid_layout_mismatch;
switch (pipeline->create_info.graphics.sType) {
case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO:
vuid_layout_mismatch = "VUID-VkGraphicsPipelineCreateInfo-layout-00756";
break;
case VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO:
vuid_layout_mismatch = "VUID-VkComputePipelineCreateInfo-layout-00703";
break;
case VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR:
vuid_layout_mismatch = "VUID-VkRayTracingPipelineCreateInfoKHR-layout-03427";
break;
case VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV:
vuid_layout_mismatch = "VUID-VkRayTracingPipelineCreateInfoNV-layout-03427";
break;
default:
assert(false);
break;
}
// Validate Push Constants use
skip |= ValidatePushConstantUsage(*pipeline, module, pStage, vuid_layout_mismatch);
// Validate descriptor use
for (auto use : stage_state.descriptor_uses) {
// Verify given pipelineLayout has requested setLayout with requested binding
const auto &binding = GetDescriptorBinding(pipeline->pipeline_layout.get(), use.first);
unsigned required_descriptor_count;
bool is_khr = binding && binding->descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR;
std::set<uint32_t> descriptor_types =
TypeToDescriptorTypeSet(module, use.second.type_id, required_descriptor_count, is_khr);
if (!binding) {
skip |= LogError(device, vuid_layout_mismatch,
"Shader uses descriptor slot %u.%u (expected `%s`) but not declared in pipeline layout",
use.first.set, use.first.binding, string_descriptorTypes(descriptor_types).c_str());
} else if (~binding->stageFlags & pStage->stage) {
skip |= LogError(device, vuid_layout_mismatch,
"Shader uses descriptor slot %u.%u but descriptor not accessible from stage %s", use.first.set,
use.first.binding, string_VkShaderStageFlagBits(pStage->stage));
} else if ((binding->descriptorType != VK_DESCRIPTOR_TYPE_MUTABLE_VALVE) &&
(descriptor_types.find(binding->descriptorType) == descriptor_types.end())) {
skip |= LogError(device, vuid_layout_mismatch,
"Type mismatch on descriptor slot %u.%u (expected `%s`) but descriptor of type %s", use.first.set,
use.first.binding, string_descriptorTypes(descriptor_types).c_str(),
string_VkDescriptorType(binding->descriptorType));
} else if (binding->descriptorCount < required_descriptor_count) {
skip |= LogError(device, vuid_layout_mismatch,
"Shader expects at least %u descriptors for binding %u.%u but only %u provided",
required_descriptor_count, use.first.set, use.first.binding, binding->descriptorCount);
}
}
// Validate use of input attachments against subpass structure
if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
auto input_attachment_uses = module->CollectInterfaceByInputAttachmentIndex(accessible_ids);
if (!pipeline->rp_state->use_dynamic_rendering) {
auto rpci = pipeline->rp_state->createInfo.ptr();
auto subpass = pipeline->create_info.graphics.subpass;
for (auto use : input_attachment_uses) {
auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount)
? input_attachments[use.first].attachment
: VK_ATTACHMENT_UNUSED;
if (index == VK_ATTACHMENT_UNUSED) {
skip |= LogError(device, kVUID_Core_Shader_MissingInputAttachment,
"Shader consumes input attachment index %d but not provided in subpass", use.first);
}
else if (!(GetFormatType(rpci->pAttachments[index].format) & module->GetFundamentalType(use.second.type_id))) {
skip |=
LogError(device, kVUID_Core_Shader_InputAttachmentTypeMismatch,
"Subpass input attachment %u format of %s does not match type used in shader `%s`", use.first,
string_VkFormat(rpci->pAttachments[index].format), module->DescribeType(use.second.type_id).c_str());
}
}
}
}
if (pStage->stage == VK_SHADER_STAGE_COMPUTE_BIT) {
skip |= ValidateComputeWorkGroupSizes(module, entrypoint, stage_state);
}
return skip;
}
bool CoreChecks::ValidateInterfaceBetweenStages(SHADER_MODULE_STATE const *producer, spirv_inst_iter producer_entrypoint,
shader_stage_attributes const *producer_stage, SHADER_MODULE_STATE const *consumer,
spirv_inst_iter consumer_entrypoint,
shader_stage_attributes const *consumer_stage) const {
bool skip = false;
auto outputs =
producer->CollectInterfaceByLocation(producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
auto inputs = consumer->CollectInterfaceByLocation(consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
auto a_it = outputs.begin();
auto b_it = inputs.begin();
uint32_t a_component = 0;
uint32_t b_component = 0;
// Maps sorted by key (location); walk them together to find mismatches
while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
a_first.second += a_component;
b_first.second += b_component;
const auto a_length = a_at_end ? 0 : producer->GetNumComponentsInBaseType(producer->get_def(a_it->second.type_id));
const auto b_length = b_at_end ? 0 : consumer->GetNumComponentsInBaseType(consumer->get_def(b_it->second.type_id));
assert(a_at_end || a_component < a_length);
assert(b_at_end || b_component < b_length);
if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
skip |= LogPerformanceWarning(producer->vk_shader_module(), kVUID_Core_Shader_OutputNotConsumed,
"%s writes to output location %" PRIu32 ".%" PRIu32 " which is not consumed by %s",
producer_stage->name, a_first.first, a_first.second, consumer_stage->name);
if ((b_first.first > a_first.first) || b_at_end || (a_component + 1 == a_length)) {
a_it++;
a_component = 0;
} else {
a_component++;
}
} else if (a_at_end || a_first > b_first) {
skip |= LogError(consumer->vk_shader_module(), kVUID_Core_Shader_InputNotProduced,
"%s consumes input location %" PRIu32 ".%" PRIu32 " which is not written by %s", consumer_stage->name,
b_first.first, b_first.second, producer_stage->name);
if ((a_first.first > b_first.first) || a_at_end || (b_component + 1 == b_length)) {
b_it++;
b_component = 0;
} else {
b_component++;
}
} else {
// subtleties of arrayed interfaces:
// - if is_patch, then the member is not arrayed, even though the interface may be.
// - if is_block_member, then the extra array level of an arrayed interface is not
// expressed in the member type -- it's expressed in the block type.
if (!TypesMatch(producer, consumer, a_it->second.type_id, b_it->second.type_id)) {
skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch,
"Type mismatch on location %" PRIu32 ".%" PRIu32 ": '%s' vs '%s'", a_first.first, a_first.second,
producer->DescribeType(a_it->second.type_id).c_str(),
consumer->DescribeType(b_it->second.type_id).c_str());
a_it++;
b_it++;
continue;
}
if (a_it->second.is_patch != b_it->second.is_patch) {
skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch,
"Decoration mismatch on location %u.%u: is per-%s in %s stage but per-%s in %s stage",
a_first.first, a_first.second, a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name);
}
if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch,
"Decoration mismatch on location %" PRIu32 ".%" PRIu32 ": %s and %s stages differ in precision",
a_first.first, a_first.second, producer_stage->name, consumer_stage->name);
}
uint32_t a_remaining = a_length - a_component;
uint32_t b_remaining = b_length - b_component;
if (a_remaining == b_remaining) { // Sizes match so we can advance both a_it and b_it
a_it++;
b_it++;
a_component = 0;
b_component = 0;
} else if (a_remaining > b_remaining) { // a has more components remaining
a_component += b_remaining;
b_component = 0;
b_it++;
} else if (b_remaining > a_remaining) { // b has more components remaining
b_component += a_remaining;
a_component = 0;
a_it++;
}
}
}
if (consumer_stage->stage != VK_SHADER_STAGE_FRAGMENT_BIT) {
auto builtins_producer = producer->CollectBuiltinBlockMembers(producer_entrypoint, spv::StorageClassOutput);
auto builtins_consumer = consumer->CollectBuiltinBlockMembers(consumer_entrypoint, spv::StorageClassInput);
if (!builtins_producer.empty() && !builtins_consumer.empty()) {
if (builtins_producer.size() != builtins_consumer.size()) {
skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch,
"Number of elements inside builtin block differ between stages (%s %d vs %s %d).",
producer_stage->name, static_cast<int>(builtins_producer.size()), consumer_stage->name,
static_cast<int>(builtins_consumer.size()));
} else {
auto it_producer = builtins_producer.begin();
auto it_consumer = builtins_consumer.begin();
while (it_producer != builtins_producer.end() && it_consumer != builtins_consumer.end()) {
if (*it_producer != *it_consumer) {
skip |= LogError(producer->vk_shader_module(), kVUID_Core_Shader_InterfaceTypeMismatch,
"Builtin variable inside block doesn't match between %s and %s.", producer_stage->name,
consumer_stage->name);
break;
}
it_producer++;
it_consumer++;
}
}
}
}
return skip;
}
static inline uint32_t DetermineFinalGeomStage(const PIPELINE_STATE *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
uint32_t stage_mask = 0;
if (pipeline->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_POINT_LIST) {
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
stage_mask |= pCreateInfo->pStages[i].stage;
}
// Determine which shader in which PointSize should be written (the final geometry stage)
if (stage_mask & VK_SHADER_STAGE_MESH_BIT_NV) {
stage_mask = VK_SHADER_STAGE_MESH_BIT_NV;
} else if (stage_mask & VK_SHADER_STAGE_GEOMETRY_BIT) {
stage_mask = VK_SHADER_STAGE_GEOMETRY_BIT;
} else if (stage_mask & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) {
stage_mask = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
} else if (stage_mask & VK_SHADER_STAGE_VERTEX_BIT) {
stage_mask = VK_SHADER_STAGE_VERTEX_BIT;
}
}
return stage_mask;
}
// Validate that the shaders used by the given pipeline and store the active_slots
// that are actually used by the pipeline into pPipeline->active_slots
bool CoreChecks::ValidateGraphicsPipelineShaderState(const PIPELINE_STATE *pipeline) const {
const auto create_info = pipeline->create_info.graphics.ptr();
bool skip = false;
uint32_t pointlist_stage_mask = DetermineFinalGeomStage(pipeline, create_info);
const PipelineStageState *vertex_stage = nullptr, *fragment_stage = nullptr;
for (auto &stage : pipeline->stage_state) {
skip |= ValidatePipelineShaderStage(pipeline, stage, (pointlist_stage_mask == stage.stage_flag));
if (stage.stage_flag == VK_SHADER_STAGE_VERTEX_BIT) {
vertex_stage = &stage;
}
if (stage.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT) {
fragment_stage = &stage;
}
}
// if the shader stages are no good individually, cross-stage validation is pointless.
if (skip) return true;
auto vi = create_info->pVertexInputState;
if (vi) {
skip |= ValidateViConsistency(vi);
}
if (vertex_stage && vertex_stage->module->has_valid_spirv && !IsDynamic(pipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) {
skip |= ValidateViAgainstVsInputs(vi, vertex_stage->module.get(), vertex_stage->entrypoint);
}
for (size_t i = 1; i < pipeline->stage_state.size(); i++) {
const auto &producer = pipeline->stage_state[i - 1];
const auto &consumer = pipeline->stage_state[i];
assert(producer.module);
if (&producer == fragment_stage) {
break;
}
if (consumer.module) {
if (consumer.module->has_valid_spirv && producer.module->has_valid_spirv) {
auto producer_id = GetShaderStageId(producer.stage_flag);
auto consumer_id = GetShaderStageId(consumer.stage_flag);
skip |=
ValidateInterfaceBetweenStages(producer.module.get(), producer.entrypoint, &shader_stage_attribs[producer_id],
consumer.module.get(), consumer.entrypoint, &shader_stage_attribs[consumer_id]);
}
}
}
if (fragment_stage && fragment_stage->module->has_valid_spirv) {
if (pipeline->rp_state->use_dynamic_rendering) {
skip |= ValidateFsOutputsAgainstDynamicRenderingRenderPass(fragment_stage->module.get(), fragment_stage->entrypoint, pipeline);
} else {
skip |= ValidateFsOutputsAgainstRenderPass(fragment_stage->module.get(), fragment_stage->entrypoint, pipeline,
create_info->subpass);
}
}
return skip;
}
bool CoreChecks::ValidateGraphicsPipelineShaderDynamicState(const PIPELINE_STATE *pipeline, const CMD_BUFFER_STATE *pCB,
const char *caller, const DrawDispatchVuid &vuid) const {
bool skip = false;
for (auto &stage : pipeline->stage_state) {
if (stage.stage_flag == VK_SHADER_STAGE_VERTEX_BIT || stage.stage_flag == VK_SHADER_STAGE_GEOMETRY_BIT ||
stage.stage_flag == VK_SHADER_STAGE_MESH_BIT_NV) {
if (!phys_dev_ext_props.fragment_shading_rate_props.primitiveFragmentShadingRateWithMultipleViewports &&
IsDynamic(pipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) && pCB->viewportWithCountCount != 1) {
if (stage.wrote_primitive_shading_rate) {
skip |=
LogError(pipeline->pipeline(), vuid.viewport_count_primitive_shading_rate,
"%s: %s shader of currently bound pipeline statically writes to PrimitiveShadingRateKHR built-in"
"but multiple viewports are set by the last call to vkCmdSetViewportWithCountEXT,"
"and the primitiveFragmentShadingRateWithMultipleViewports limit is not supported.",
caller, string_VkShaderStageFlagBits(stage.stage_flag));
}
}
}
}
return skip;
}
bool CoreChecks::ValidateComputePipelineShaderState(PIPELINE_STATE *pipeline) const {
return ValidatePipelineShaderStage(pipeline, pipeline->stage_state[0], false);
}
uint32_t CoreChecks::CalcShaderStageCount(const PIPELINE_STATE *pipeline, VkShaderStageFlagBits stageBit) const {
uint32_t total = 0;
const auto &create_info = pipeline->create_info.raytracing;
const auto *stages = create_info.ptr()->pStages;
for (uint32_t stage_index = 0; stage_index < create_info.stageCount; stage_index++) {
if (stages[stage_index].stage == stageBit) {
total++;
}
}
if (create_info.pLibraryInfo) {
for (uint32_t i = 0; i < create_info.pLibraryInfo->libraryCount; ++i) {
const auto library_pipeline = Get<PIPELINE_STATE>(create_info.pLibraryInfo->pLibraries[i]);
total += CalcShaderStageCount(library_pipeline.get(), stageBit);
}
}
return total;
}
bool CoreChecks::GroupHasValidIndex(const PIPELINE_STATE *pipeline, uint32_t group, uint32_t stage) const {
if (group == VK_SHADER_UNUSED_NV) {
return true;
}
const auto &create_info = pipeline->create_info.raytracing;
const auto *stages = create_info.ptr()->pStages;
if (group < create_info.stageCount) {
return (stages[group].stage & stage) != 0;
}
group -= create_info.stageCount;
// Search libraries
if (create_info.pLibraryInfo) {
for (uint32_t i = 0; i < create_info.pLibraryInfo->libraryCount; ++i) {
auto library_pipeline = Get<PIPELINE_STATE>(create_info.pLibraryInfo->pLibraries[i]);
const uint32_t stage_count = library_pipeline->create_info.raytracing.ptr()->stageCount;
if (group < stage_count) {
return (library_pipeline->create_info.raytracing.ptr()->pStages[group].stage & stage) != 0;
}
group -= stage_count;
}
}
// group index too large
return false;
}
bool CoreChecks::ValidateRayTracingPipeline(PIPELINE_STATE *pipeline, VkPipelineCreateFlags flags, bool isKHR) const {
bool skip = false;
const auto &create_info = pipeline->create_info.raytracing;
if (isKHR) {
if (create_info.maxPipelineRayRecursionDepth > phys_dev_ext_props.ray_tracing_propsKHR.maxRayRecursionDepth) {
skip |=
LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-maxPipelineRayRecursionDepth-03589",
"vkCreateRayTracingPipelinesKHR: maxPipelineRayRecursionDepth (%d ) must be less than or equal to "
"VkPhysicalDeviceRayTracingPipelinePropertiesKHR::maxRayRecursionDepth %d",
create_info.maxPipelineRayRecursionDepth, phys_dev_ext_props.ray_tracing_propsKHR.maxRayRecursionDepth);
}
if (create_info.pLibraryInfo) {
for (uint32_t i = 0; i < create_info.pLibraryInfo->libraryCount; ++i) {
const auto library_pipelinestate = Get<PIPELINE_STATE>(create_info.pLibraryInfo->pLibraries[i]);
const auto &library_create_info = library_pipelinestate->create_info.raytracing;
if (library_create_info.maxPipelineRayRecursionDepth != create_info.maxPipelineRayRecursionDepth) {
skip |= LogError(
device, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraries-03591",
"vkCreateRayTracingPipelinesKHR: Each element (%d) of the pLibraries member of libraries must have been"
"created with the value of maxPipelineRayRecursionDepth (%d) equal to that in this pipeline (%d) .",
i, library_create_info.maxPipelineRayRecursionDepth, create_info.maxPipelineRayRecursionDepth);
}
if (library_create_info.pLibraryInfo && (library_create_info.pLibraryInterface->maxPipelineRayHitAttributeSize !=
create_info.pLibraryInterface->maxPipelineRayHitAttributeSize ||
library_create_info.pLibraryInterface->maxPipelineRayPayloadSize !=
create_info.pLibraryInterface->maxPipelineRayPayloadSize)) {
skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraryInfo-03593",
"vkCreateRayTracingPipelinesKHR: If pLibraryInfo is not NULL, each element of its pLibraries "
"member must have been created with values of the maxPipelineRayPayloadSize and "
"maxPipelineRayHitAttributeSize members of pLibraryInterface equal to those in this pipeline");
}
if ((flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR) &&
!(library_create_info.flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) {
skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03594",
"vkCreateRayTracingPipelinesKHR: If flags includes "
"VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR, each element of "
"the pLibraries member of libraries must have been created with the "
"VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR bit set");
}
}
}
} else {
if (create_info.maxRecursionDepth > phys_dev_ext_props.ray_tracing_propsNV.maxRecursionDepth) {
skip |= LogError(device, "VUID-VkRayTracingPipelineCreateInfoNV-maxRecursionDepth-03457",
"vkCreateRayTracingPipelinesNV: maxRecursionDepth (%d) must be less than or equal to "
"VkPhysicalDeviceRayTracingPropertiesNV::maxRecursionDepth (%d)",
create_info.maxRecursionDepth, phys_dev_ext_props.ray_tracing_propsNV.maxRecursionDepth);
}
}
const auto *groups = create_info.ptr()->pGroups;
for (uint32_t stage_index = 0; stage_index < create_info.stageCount; stage_index++) {
skip |= ValidatePipelineShaderStage(pipeline, pipeline->stage_state[stage_index], false);
}
if ((create_info.flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) == 0) {
const uint32_t raygen_stages_count = CalcShaderStageCount(pipeline, VK_SHADER_STAGE_RAYGEN_BIT_KHR);
if (raygen_stages_count == 0) {
skip |= LogError(
device,
isKHR ? "VUID-VkRayTracingPipelineCreateInfoKHR-stage-03425" : "VUID-VkRayTracingPipelineCreateInfoNV-stage-06232",
" : The stage member of at least one element of pStages must be VK_SHADER_STAGE_RAYGEN_BIT_KHR.");
}
}
for (uint32_t group_index = 0; group_index < create_info.groupCount; group_index++) {
const auto &group = groups[group_index];
if (group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV) {
if (!GroupHasValidIndex(
pipeline, group.generalShader,
VK_SHADER_STAGE_RAYGEN_BIT_NV | VK_SHADER_STAGE_MISS_BIT_NV | VK_SHADER_STAGE_CALLABLE_BIT_NV)) {
skip |= LogError(device,
isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03474"
: "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02413",
": pGroups[%d]", group_index);
}
if (group.anyHitShader != VK_SHADER_UNUSED_NV || group.closestHitShader != VK_SHADER_UNUSED_NV ||
group.intersectionShader != VK_SHADER_UNUSED_NV) {
skip |= LogError(device,
isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03475"
: "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02414",
": pGroups[%d]", group_index);
}
} else if (group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV) {
if (!GroupHasValidIndex(pipeline, group.intersectionShader, VK_SHADER_STAGE_INTERSECTION_BIT_NV)) {
skip |= LogError(device,
isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03476"
: "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02415",
": pGroups[%d]", group_index);
}
} else if (group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV) {
if (group.intersectionShader != VK_SHADER_UNUSED_NV) {
skip |= LogError(device,
isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03477"
: "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02416",
": pGroups[%d]", group_index);
}
}
if (group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV ||
group.type == VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV) {
if (!GroupHasValidIndex(pipeline, group.anyHitShader, VK_SHADER_STAGE_ANY_HIT_BIT_NV)) {
skip |= LogError(device,
isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-anyHitShader-03479"
: "VUID-VkRayTracingShaderGroupCreateInfoNV-anyHitShader-02418",
": pGroups[%d]", group_index);
}
if (!GroupHasValidIndex(pipeline, group.closestHitShader, VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV)) {
skip |= LogError(device,
isKHR ? "VUID-VkRayTracingShaderGroupCreateInfoKHR-closestHitShader-03478"
: "VUID-VkRayTracingShaderGroupCreateInfoNV-closestHitShader-02417",
": pGroups[%d]", group_index);
}
}
}
return skip;
}
uint32_t ValidationCache::MakeShaderHash(VkShaderModuleCreateInfo const *smci) { return XXH32(smci->pCode, smci->codeSize, 0); }
static ValidationCache *GetValidationCacheInfo(VkShaderModuleCreateInfo const *pCreateInfo) {
const auto validation_cache_ci = LvlFindInChain<VkShaderModuleValidationCacheCreateInfoEXT>(pCreateInfo->pNext);
if (validation_cache_ci) {
return CastFromHandle<ValidationCache *>(validation_cache_ci->validationCache);
}
return nullptr;
}
bool CoreChecks::PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) const {
bool skip = false;
spv_result_t spv_valid = SPV_SUCCESS;
if (disabled[shader_validation]) {
return false;
}
auto have_glsl_shader = IsExtEnabled(device_extensions.vk_nv_glsl_shader);
if (!have_glsl_shader && (pCreateInfo->codeSize % 4)) {
skip |= LogError(device, "VUID-VkShaderModuleCreateInfo-pCode-01376",
"SPIR-V module not valid: Codesize must be a multiple of 4 but is " PRINTF_SIZE_T_SPECIFIER ".",
pCreateInfo->codeSize);
} else {
auto cache = GetValidationCacheInfo(pCreateInfo);
uint32_t hash = 0;
// If app isn't using a shader validation cache, use the default one from CoreChecks
if (!cache) cache = CastFromHandle<ValidationCache *>(core_validation_cache);
if (cache) {
hash = ValidationCache::MakeShaderHash(pCreateInfo);
if (cache->Contains(hash)) return false;
}
// Use SPIRV-Tools validator to try and catch any issues with the module itself. If specialization constants are present,
// the default values will be used during validation.
spv_target_env spirv_environment = PickSpirvEnv(api_version, IsExtEnabled(device_extensions.vk_khr_spirv_1_4));
spv_context ctx = spvContextCreate(spirv_environment);
spv_const_binary_t binary{pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t)};
spv_diagnostic diag = nullptr;
spvtools::ValidatorOptions options;
AdjustValidatorOptions(device_extensions, enabled_features, options);
spv_valid = spvValidateWithOptions(ctx, options, &binary, &diag);
if (spv_valid != SPV_SUCCESS) {
if (!have_glsl_shader || (pCreateInfo->pCode[0] == spv::MagicNumber)) {
if (spv_valid == SPV_WARNING) {
skip |= LogWarning(device, kVUID_Core_Shader_InconsistentSpirv, "SPIR-V module not valid: %s",
diag && diag->error ? diag->error : "(no error text)");
} else {
skip |= LogError(device, kVUID_Core_Shader_InconsistentSpirv, "SPIR-V module not valid: %s",
diag && diag->error ? diag->error : "(no error text)");
}
}
} else {
if (cache) {
cache->Insert(hash);
}
}
spvDiagnosticDestroy(diag);
spvContextDestroy(ctx);
}
return skip;
}
bool CoreChecks::ValidateComputeWorkGroupSizes(const SHADER_MODULE_STATE *shader, const spirv_inst_iter &entrypoint,
const PipelineStageState &stage_state) const {
bool skip = false;
uint32_t local_size_x = 0;
uint32_t local_size_y = 0;
uint32_t local_size_z = 0;
if (shader->FindLocalSize(entrypoint, local_size_x, local_size_y, local_size_z)) {
if (local_size_x > phys_dev_props.limits.maxComputeWorkGroupSize[0]) {
skip |= LogError(shader->vk_shader_module(), "VUID-RuntimeSpirv-x-06429",
"%s local_size_x (%" PRIu32 ") exceeds device limit maxComputeWorkGroupSize[0] (%" PRIu32 ").",
report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x,
phys_dev_props.limits.maxComputeWorkGroupSize[0]);
}
if (local_size_y > phys_dev_props.limits.maxComputeWorkGroupSize[1]) {
skip |= LogError(shader->vk_shader_module(), "VUID-RuntimeSpirv-y-06430",
"%s local_size_y (%" PRIu32 ") exceeds device limit maxComputeWorkGroupSize[1] (%" PRIu32 ").",
report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x,
phys_dev_props.limits.maxComputeWorkGroupSize[1]);
}
if (local_size_z > phys_dev_props.limits.maxComputeWorkGroupSize[2]) {
skip |= LogError(shader->vk_shader_module(), "VUID-RuntimeSpirv-z-06431",
"%s local_size_z (%" PRIu32 ") exceeds device limit maxComputeWorkGroupSize[2] (%" PRIu32 ").",
report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x,
phys_dev_props.limits.maxComputeWorkGroupSize[2]);
}
uint32_t limit = phys_dev_props.limits.maxComputeWorkGroupInvocations;
uint64_t invocations = local_size_x * local_size_y;
// Prevent overflow.
bool fail = false;
if (invocations > UINT32_MAX || invocations > limit) {
fail = true;
}
if (!fail) {
invocations *= local_size_z;
if (invocations > UINT32_MAX || invocations > limit) {
fail = true;
}
}
if (fail) {
skip |= LogError(shader->vk_shader_module(), "VUID-RuntimeSpirv-x-06432",
"%s local_size (%" PRIu32 ", %" PRIu32 ", %" PRIu32
") exceeds device limit maxComputeWorkGroupInvocations (%" PRIu32 ").",
report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x, local_size_y,
local_size_z, limit);
}
const auto subgroup_flags = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT |
VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT;
if ((stage_state.create_info->flags & subgroup_flags) == subgroup_flags) {
if (SafeModulo(local_size_x, phys_dev_ext_props.subgroup_size_control_props.maxSubgroupSize) != 0) {
skip |= LogError(
shader->vk_shader_module(), "VUID-VkPipelineShaderStageCreateInfo-flags-02758",
"%s flags contain VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT and "
"VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT bits, but local workgroup size in the X "
"dimension (%" PRIu32
") is not a multiple of VkPhysicalDeviceSubgroupSizeControlPropertiesEXT::maxSubgroupSize (%" PRIu32 ").",
report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x,
phys_dev_ext_props.subgroup_size_control_props.maxSubgroupSize);
}
} else if ((stage_state.create_info->flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) &&
(stage_state.create_info->flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT) == 0) {
const auto *required_subgroup_size_features =
LvlFindInChain<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>(stage_state.create_info->pNext);
if (!required_subgroup_size_features) {
if (SafeModulo(local_size_x, phys_dev_props_core11.subgroupSize) != 0) {
skip |= LogError(
shader->vk_shader_module(), "VUID-VkPipelineShaderStageCreateInfo-flags-02759",
"%s flags contain VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT bit, and not the"
"VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT bit, but local workgroup size in the "
"X dimension (%" PRIu32 ") is not a multiple of VkPhysicalDeviceVulkan11Properties::subgroupSize (%" PRIu32
").",
report_data->FormatHandle(shader->vk_shader_module()).c_str(), local_size_x,
phys_dev_props_core11.subgroupSize);
}
}
}
}
return skip;
}
spv_target_env PickSpirvEnv(uint32_t api_version, bool spirv_1_4) {
if (api_version >= VK_API_VERSION_1_2) {
return SPV_ENV_VULKAN_1_2;
} else if (api_version >= VK_API_VERSION_1_1) {
if (spirv_1_4) {
return SPV_ENV_VULKAN_1_1_SPIRV_1_4;
} else {
return SPV_ENV_VULKAN_1_1;
}
}
return SPV_ENV_VULKAN_1_0;
}
// Some Vulkan extensions/features are just all done in spirv-val behind optional settings
void AdjustValidatorOptions(const DeviceExtensions &device_extensions, const DeviceFeatures &enabled_features,
spvtools::ValidatorOptions &options) {
// VK_KHR_relaxed_block_layout never had a feature bit so just enabling the extension allows relaxed layout
// Was promotoed in Vulkan 1.1 so anyone using Vulkan 1.1 also gets this for free
if (IsExtEnabled(device_extensions.vk_khr_relaxed_block_layout)) {
// --relax-block-layout
options.SetRelaxBlockLayout(true);
}
// The rest of the settings are controlled from a feature bit, which are set correctly in the state tracking. Regardless of
// Vulkan version used, the feature bit is needed (also described in the spec).
if (enabled_features.core12.uniformBufferStandardLayout == VK_TRUE) {
// --uniform-buffer-standard-layout
options.SetUniformBufferStandardLayout(true);
}
if (enabled_features.core12.scalarBlockLayout == VK_TRUE) {
// --scalar-block-layout
options.SetScalarBlockLayout(true);
}
if (enabled_features.workgroup_memory_explicit_layout_features.workgroupMemoryExplicitLayoutScalarBlockLayout) {
// --workgroup-scalar-block-layout
options.SetWorkgroupScalarBlockLayout(true);
}
if (enabled_features.maintenance4_features.maintenance4) {
// --allow-localsizeid
options.SetAllowLocalSizeId(true);
}
}
| 1 | 23,092 | This LGTM, but I'm curious if this fixed a specific error you were hitting? | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -181,7 +181,9 @@ func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
}
for _, subs := range strings.Split(parts[1], ",") {
- cgroups[subs] = parts[2]
+ if subs != "" {
+ cgroups[subs] = parts[2]
+ }
}
}
if err := s.Err(); err != nil { | 1 | // +build linux
package cgroups
import (
"bufio"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const (
CgroupProcesses = "cgroup.procs"
unifiedMountpoint = "/sys/fs/cgroup"
)
var (
isUnifiedOnce sync.Once
isUnified bool
)
// IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode.
func IsCgroup2UnifiedMode() bool {
isUnifiedOnce.Do(func() {
var st unix.Statfs_t
err := unix.Statfs(unifiedMountpoint, &st)
if err != nil {
if os.IsNotExist(err) && userns.RunningInUserNS() {
// ignore the "not found" error if running in userns
logrus.WithError(err).Debugf("%s missing, assuming cgroup v1", unifiedMountpoint)
isUnified = false
return
}
panic(fmt.Sprintf("cannot statfs cgroup root: %s", err))
}
isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC
})
return isUnified
}
type Mount struct {
Mountpoint string
Root string
Subsystems []string
}
// GetCgroupMounts returns the mounts for the cgroup subsystems.
// all indicates whether to return just the first instance or all the mounts.
// This function should not be used from cgroupv2 code, as in this case
// all the controllers are available under the constant unifiedMountpoint.
func GetCgroupMounts(all bool) ([]Mount, error) {
if IsCgroup2UnifiedMode() {
// TODO: remove cgroupv2 case once all external users are converted
availableControllers, err := GetAllSubsystems()
if err != nil {
return nil, err
}
m := Mount{
Mountpoint: unifiedMountpoint,
Root: unifiedMountpoint,
Subsystems: availableControllers,
}
return []Mount{m}, nil
}
return getCgroupMountsV1(all)
}
// GetAllSubsystems returns all the cgroup subsystems supported by the kernel
func GetAllSubsystems() ([]string, error) {
// /proc/cgroups is meaningless for v2
// https://github.com/torvalds/linux/blob/v5.3/Documentation/admin-guide/cgroup-v2.rst#deprecated-v1-core-features
if IsCgroup2UnifiedMode() {
// "pseudo" controllers do not appear in /sys/fs/cgroup/cgroup.controllers.
// - devices: implemented in kernel 4.15
// - freezer: implemented in kernel 5.2
// We assume these are always available, as it is hard to detect availability.
pseudo := []string{"devices", "freezer"}
data, err := fscommon.ReadFile("/sys/fs/cgroup", "cgroup.controllers")
if err != nil {
return nil, err
}
subsystems := append(pseudo, strings.Fields(data)...)
return subsystems, nil
}
f, err := os.Open("/proc/cgroups")
if err != nil {
return nil, err
}
defer f.Close()
subsystems := []string{}
s := bufio.NewScanner(f)
for s.Scan() {
text := s.Text()
if text[0] != '#' {
parts := strings.Fields(text)
if len(parts) >= 4 && parts[3] != "0" {
subsystems = append(subsystems, parts[0])
}
}
}
if err := s.Err(); err != nil {
return nil, err
}
return subsystems, nil
}
func readProcsFile(file string) ([]int, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var (
s = bufio.NewScanner(f)
out = []int{}
)
for s.Scan() {
if t := s.Text(); t != "" {
pid, err := strconv.Atoi(t)
if err != nil {
return nil, err
}
out = append(out, pid)
}
}
return out, s.Err()
}
// ParseCgroupFile parses the given cgroup file, typically /proc/self/cgroup
// or /proc/<pid>/cgroup, into a map of subsystems to cgroup paths, e.g.
// "cpu": "/user.slice/user-1000.slice"
// "pids": "/user.slice/user-1000.slice"
// etc.
//
// Note that for cgroup v2 unified hierarchy, there are no per-controller
// cgroup paths, so the resulting map will have a single element where the key
// is empty string ("") and the value is the cgroup path the <pid> is in.
func ParseCgroupFile(path string) (map[string]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return parseCgroupFromReader(f)
}
// helper function for ParseCgroupFile to make testing easier
func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
s := bufio.NewScanner(r)
cgroups := make(map[string]string)
for s.Scan() {
text := s.Text()
// from cgroups(7):
// /proc/[pid]/cgroup
// ...
// For each cgroup hierarchy ... there is one entry
// containing three colon-separated fields of the form:
// hierarchy-ID:subsystem-list:cgroup-path
parts := strings.SplitN(text, ":", 3)
if len(parts) < 3 {
return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text)
}
for _, subs := range strings.Split(parts[1], ",") {
cgroups[subs] = parts[2]
}
}
if err := s.Err(); err != nil {
return nil, err
}
return cgroups, nil
}
func PathExists(path string) bool {
if _, err := os.Stat(path); err != nil {
return false
}
return true
}
func EnterPid(cgroupPaths map[string]string, pid int) error {
for _, path := range cgroupPaths {
if PathExists(path) {
if err := WriteCgroupProc(path, pid); err != nil {
return err
}
}
}
return nil
}
func rmdir(path string) error {
err := unix.Rmdir(path)
if err == nil || err == unix.ENOENT {
return nil
}
return &os.PathError{Op: "rmdir", Path: path, Err: err}
}
// RemovePath aims to remove cgroup path. It does so recursively,
// by removing any subdirectories (sub-cgroups) first.
func RemovePath(path string) error {
// try the fast path first
if err := rmdir(path); err == nil {
return nil
}
infos, err := ioutil.ReadDir(path)
if err != nil {
if os.IsNotExist(err) {
err = nil
}
return err
}
for _, info := range infos {
if info.IsDir() {
// We should remove subcgroups dir first
if err = RemovePath(filepath.Join(path, info.Name())); err != nil {
break
}
}
}
if err == nil {
err = rmdir(path)
}
return err
}
// RemovePaths iterates over the provided paths removing them.
// We trying to remove all paths five times with increasing delay between tries.
// If after all there are not removed cgroups - appropriate error will be
// returned.
func RemovePaths(paths map[string]string) (err error) {
const retries = 5
delay := 10 * time.Millisecond
for i := 0; i < retries; i++ {
if i != 0 {
time.Sleep(delay)
delay *= 2
}
for s, p := range paths {
if err := RemovePath(p); err != nil {
// do not log intermediate iterations
switch i {
case 0:
logrus.WithError(err).Warnf("Failed to remove cgroup (will retry)")
case retries - 1:
logrus.WithError(err).Error("Failed to remove cgroup")
}
}
_, err := os.Stat(p)
// We need this strange way of checking cgroups existence because
// RemoveAll almost always returns error, even on already removed
// cgroups
if os.IsNotExist(err) {
delete(paths, s)
}
}
if len(paths) == 0 {
//nolint:ineffassign,staticcheck // done to help garbage collecting: opencontainers/runc#2506
paths = make(map[string]string)
return nil
}
}
return fmt.Errorf("Failed to remove paths: %v", paths)
}
func GetHugePageSize() ([]string, error) {
dir, err := os.OpenFile("/sys/kernel/mm/hugepages", unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err != nil {
return nil, err
}
files, err := dir.Readdirnames(0)
dir.Close()
if err != nil {
return nil, err
}
return getHugePageSizeFromFilenames(files)
}
func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) {
pageSizes := make([]string, 0, len(fileNames))
for _, file := range fileNames {
// example: hugepages-1048576kB
val := strings.TrimPrefix(file, "hugepages-")
if len(val) == len(file) {
// unexpected file name: no prefix found
continue
}
// The suffix is always "kB" (as of Linux 5.9)
eLen := len(val) - 2
val = strings.TrimSuffix(val, "kB")
if len(val) != eLen {
logrus.Warnf("GetHugePageSize: %s: invalid filename suffix (expected \"kB\")", file)
continue
}
size, err := strconv.Atoi(val)
if err != nil {
return nil, err
}
// Model after https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/mm/hugetlb_cgroup.c?id=eff48ddeab782e35e58ccc8853f7386bbae9dec4#n574
// but in our case the size is in KB already.
if size >= (1 << 20) {
val = strconv.Itoa(size>>20) + "GB"
} else if size >= (1 << 10) {
val = strconv.Itoa(size>>10) + "MB"
} else {
val += "KB"
}
pageSizes = append(pageSizes, val)
}
return pageSizes, nil
}
// GetPids returns all pids, that were added to cgroup at path.
func GetPids(dir string) ([]int, error) {
return readProcsFile(filepath.Join(dir, CgroupProcesses))
}
// GetAllPids returns all pids, that were added to cgroup at path and to all its
// subcgroups.
func GetAllPids(path string) ([]int, error) {
var pids []int
// collect pids from all sub-cgroups
err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {
if iErr != nil {
return iErr
}
if info.IsDir() || info.Name() != CgroupProcesses {
return nil
}
cPids, err := readProcsFile(p)
if err != nil {
return err
}
pids = append(pids, cPids...)
return nil
})
return pids, err
}
// WriteCgroupProc writes the specified pid into the cgroup's cgroup.procs file
func WriteCgroupProc(dir string, pid int) error {
// Normally dir should not be empty, one case is that cgroup subsystem
// is not mounted, we will get empty dir, and we want it fail here.
if dir == "" {
return fmt.Errorf("no such directory for %s", CgroupProcesses)
}
// Dont attach any pid to the cgroup if -1 is specified as a pid
if pid == -1 {
return nil
}
file, err := fscommon.OpenFile(dir, CgroupProcesses, os.O_WRONLY)
if err != nil {
return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err)
}
defer file.Close()
for i := 0; i < 5; i++ {
_, err = file.WriteString(strconv.Itoa(pid))
if err == nil {
return nil
}
// EINVAL might mean that the task being added to cgroup.procs is in state
// TASK_NEW. We should attempt to do so again.
if errors.Is(err, unix.EINVAL) {
time.Sleep(30 * time.Millisecond)
continue
}
return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err)
}
return err
}
// Since the OCI spec is designed for cgroup v1, in some cases
// there is need to convert from the cgroup v1 configuration to cgroup v2
// the formula for cpuShares is y = (1 + ((x - 2) * 9999) / 262142)
// convert from [2-262144] to [1-10000]
// 262144 comes from Linux kernel definition "#define MAX_SHARES (1UL << 18)"
func ConvertCPUSharesToCgroupV2Value(cpuShares uint64) uint64 {
if cpuShares == 0 {
return 0
}
return (1 + ((cpuShares-2)*9999)/262142)
}
// ConvertMemorySwapToCgroupV2Value converts MemorySwap value from OCI spec
// for use by cgroup v2 drivers. A conversion is needed since Resources.MemorySwap
// is defined as memory+swap combined, while in cgroup v2 swap is a separate value.
func ConvertMemorySwapToCgroupV2Value(memorySwap, memory int64) (int64, error) {
// for compatibility with cgroup1 controller, set swap to unlimited in
// case the memory is set to unlimited, and swap is not explicitly set,
// treating the request as "set both memory and swap to unlimited".
if memory == -1 && memorySwap == 0 {
return -1, nil
}
if memorySwap == -1 || memorySwap == 0 {
// -1 is "max", 0 is "unset", so treat as is
return memorySwap, nil
}
// sanity checks
if memory == 0 || memory == -1 {
return 0, errors.New("unable to set swap limit without memory limit")
}
if memory < 0 {
return 0, fmt.Errorf("invalid memory value: %d", memory)
}
if memorySwap < memory {
return 0, errors.New("memory+swap limit should be >= memory limit")
}
return memorySwap - memory, nil
}
// Since the OCI spec is designed for cgroup v1, in some cases
// there is need to convert from the cgroup v1 configuration to cgroup v2
// the formula for BlkIOWeight to IOWeight is y = (1 + (x - 10) * 9999 / 990)
// convert linearly from [10-1000] to [1-10000]
func ConvertBlkIOToIOWeightValue(blkIoWeight uint16) uint64 {
if blkIoWeight == 0 {
return 0
}
return uint64(1 + (uint64(blkIoWeight)-10)*9999/990)
}
| 1 | 23,720 | Actually we rely on this functionality in cgroup v2, where the subsystem is empty. | opencontainers-runc | go |
@@ -1,10 +1,12 @@
-package core
+package core_test
import (
"testing"
"unsafe"
"github.com/google/go-cmp/cmp"
+
+ "go.opentelemetry.io/api/core"
"go.opentelemetry.io/api/registry"
)
| 1 | package core
import (
"testing"
"unsafe"
"github.com/google/go-cmp/cmp"
"go.opentelemetry.io/api/registry"
)
func TestBool(t *testing.T) {
for _, testcase := range []struct {
name string
v bool
want Value
}{
{
name: "value: true",
v: true,
want: Value{
Type: BOOL,
Bool: true,
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Bool(v bool) KeyValue {}
have := Key{}.Bool(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestInt64(t *testing.T) {
for _, testcase := range []struct {
name string
v int64
want Value
}{
{
name: "value: int64(42)",
v: int64(42),
want: Value{
Type: INT64,
Int64: int64(42),
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Int64(v int64) KeyValue {
have := Key{}.Int64(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestUint64(t *testing.T) {
for _, testcase := range []struct {
name string
v uint64
want Value
}{
{
name: "value: uint64(42)",
v: uint64(42),
want: Value{
Type: UINT64,
Uint64: uint64(42),
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Uint64(v uint64) KeyValue {
have := Key{}.Uint64(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestFloat64(t *testing.T) {
for _, testcase := range []struct {
name string
v float64
want Value
}{
{
name: "value: float64(42.1)",
v: float64(42.1),
want: Value{
Type: FLOAT64,
Float64: float64(42.1),
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Float64(v float64) KeyValue {
have := Key{}.Float64(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestInt32(t *testing.T) {
for _, testcase := range []struct {
name string
v int32
want Value
}{
{
name: "value: int32(42)",
v: int32(42),
want: Value{
Type: INT32,
Int64: int64(42),
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Int32(v int32) KeyValue {
have := Key{}.Int32(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestUint32(t *testing.T) {
for _, testcase := range []struct {
name string
v uint32
want Value
}{
{
name: "value: uint32(42)",
v: uint32(42),
want: Value{
Type: UINT32,
Uint64: uint64(42),
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Uint32(v uint32) KeyValue {
have := Key{}.Uint32(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestFloat32(t *testing.T) {
for _, testcase := range []struct {
name string
v float32
want Value
}{
{
name: "value: float32(42.0)",
v: float32(42.0),
want: Value{
Type: FLOAT32,
Float64: float64(42.0),
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Float32(v float32) KeyValue {
have := Key{}.Float32(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestString(t *testing.T) {
for _, testcase := range []struct {
name string
v string
want Value
}{
{
name: `value: string("foo")`,
v: "foo",
want: Value{
Type: STRING,
String: "foo",
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) String(v string) KeyValue {
have := Key{}.String(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestBytes(t *testing.T) {
for _, testcase := range []struct {
name string
v []byte
want Value
}{
{
name: `value: []byte{'f','o','o'}`,
v: []byte{'f', 'o', 'o'},
want: Value{
Type: BYTES,
Bytes: []byte{'f', 'o', 'o'},
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Bytes(v []byte) KeyValue {
have := Key{}.Bytes(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestInt(t *testing.T) {
WTYPE := INT64
if unsafe.Sizeof(int(42)) == 4 {
// switch the desired value-type depending on system int byte-size
WTYPE = INT32
}
for _, testcase := range []struct {
name string
v int
want Value
}{
{
name: `value: int(42)`,
v: int(42),
want: Value{
Type: WTYPE,
Int64: int64(42),
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Int(v int) KeyValue {
have := Key{}.Int(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestUint(t *testing.T) {
WTYPE := UINT64
if unsafe.Sizeof(uint(42)) == 4 {
// switch the desired value-type depending on system int byte-size
WTYPE = UINT32
}
for _, testcase := range []struct {
name string
v uint
want Value
}{
{
name: `value: uint(42)`,
v: uint(42),
want: Value{
Type: WTYPE,
Uint64: 42,
},
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (k Key) Uint(v uint) KeyValue {
have := Key{}.Uint(testcase.v)
if diff := cmp.Diff(testcase.want, have.Value); diff != "" {
t.Fatal(diff)
}
})
}
}
func TestDefined(t *testing.T) {
for _, testcase := range []struct {
name string
k Key
want bool
}{
{
name: `Key Defined`,
k: Key{
registry.Variable{
Name: "foo",
},
},
want: true,
},
{
name: `Key not Defined`,
k: Key{registry.Variable{}},
want: false,
},
} {
t.Run(testcase.name, func(t *testing.T) {
//func (k Key) Defined() bool {
have := testcase.k.Defined()
if have != testcase.want {
t.Errorf("Want: %v, but have: %v", testcase.want, have)
}
})
}
}
func TestEmit(t *testing.T) {
for _, testcase := range []struct {
name string
v Value
want string
}{
{
name: `bool`,
v: Value{
Type: BOOL,
Bool: true,
},
want: "true",
},
{
name: `int32`,
v: Value{
Type: INT32,
Int64: 42,
},
want: "42",
},
{
name: `int64`,
v: Value{
Type: INT64,
Int64: 42,
},
want: "42",
},
{
name: `uint32`,
v: Value{
Type: UINT32,
Uint64: 42,
},
want: "42",
},
{
name: `uint64`,
v: Value{
Type: UINT64,
Uint64: 42,
},
want: "42",
},
{
name: `float32`,
v: Value{
Type: FLOAT32,
Float64: 42.1,
},
want: "42.1",
},
{
name: `float64`,
v: Value{
Type: FLOAT64,
Float64: 42.1,
},
want: "42.1",
},
{
name: `string`,
v: Value{
Type: STRING,
String: "foo",
},
want: "foo",
},
{
name: `bytes`,
v: Value{
Type: BYTES,
Bytes: []byte{'f', 'o', 'o'},
},
want: "foo",
},
} {
t.Run(testcase.name, func(t *testing.T) {
//proto: func (v Value) Emit() string {
have := testcase.v.Emit()
if have != testcase.want {
t.Errorf("Want: %s, but have: %s", testcase.want, have)
}
})
}
}
| 1 | 9,571 | suggestion: use `core` package name | open-telemetry-opentelemetry-go | go |
@@ -1354,7 +1354,7 @@ defaultdict(<class 'list'>, {'col..., 'col...})]
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
- def transpose(self, limit: Optional[int] = 1000):
+ def transpose(self, limit: Optional[int] = get_option("compute.max_rows")):
"""
Transpose index and columns.
| 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from collections import OrderedDict
from distutils.version import LooseVersion
import re
import warnings
import inspect
import json
from functools import partial, reduce
import sys
from itertools import zip_longest
from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
from pandas.core.dtypes.common import infer_dtype_from_object
else:
from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.inference import is_sequence
from pyspark import sql as spark
from pyspark.sql import functions as F, Column
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, NumericType, ShortType, StructType)
from pyspark.sql.utils import AnalysisException
from pyspark.sql.window import Window
from pyspark.sql.functions import pandas_udf
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import validate_arguments_and_invoke_function, align_diff_frames
from databricks.koalas.generic import _Frame
from databricks.koalas.internal import _InternalFrame, IndexMap
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.utils import column_index_level, scol_for
from databricks.koalas.typedef import as_spark_type
from databricks.koalas.plot import KoalasFramePlotMethods
from databricks.koalas.config import get_option
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide by constant with reverse version.
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
Multiply by constant.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Divide by constant.
>>> df / 1
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
>>> df.div(1)
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
>>> df // 2
angles degrees
circle 0 180
triangle 1 90
rectangle 2 180
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
"""
T = TypeVar('T')
if (3, 5) <= sys.version_info < (3, 7):
from typing import GenericMeta
# This is a workaround to support variadic generic in DataFrame in Python 3.5+.
# See https://github.com/python/typing/issues/193
# We wrap the input params by a tuple to mimic variadic generic.
old_getitem = GenericMeta.__getitem__ # type: ignore
def new_getitem(self, params):
if hasattr(self, "is_dataframe"):
return old_getitem(self, Tuple[params])
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class DataFrame(_Frame, Generic[T]):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: _InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame, Spark DataFrame \
or Koalas Series
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, a Spark DataFrame, and a Koalas Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, _InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(data)
elif isinstance(data, spark.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(_InternalFrame(data))
elif isinstance(data, ks.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_dataframe()
super(DataFrame, self).__init__(data._internal)
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
super(DataFrame, self).__init__(_InternalFrame.from_pandas(pdf))
@property
def _sdf(self) -> spark.DataFrame:
return self._internal.sdf
def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=False):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
axis: used only for sanity check because series only support index axis.
name : original pandas API name.
axis : axis to apply. 0 or 1, or 'index' or 'columns.
numeric_only : boolean, default False
If True, sfun is applied on numeric columns (including booleans) only.
"""
from inspect import signature
from databricks.koalas import Series
if axis in ('index', 0, None):
exprs = []
num_args = len(signature(sfun).parameters)
for col, idx in zip(self._internal.data_columns, self._internal.column_index):
col_sdf = self._internal.scol_for(col)
col_type = self._internal.spark_type_for(col)
is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType))
min_or_max = sfun.__name__ in ('min', 'max')
keep_column = not numeric_only or is_numeric_or_boolean or min_or_max
if keep_column:
if isinstance(col_type, BooleanType) and not min_or_max:
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(str(idx) if len(idx) > 1 else idx[0]))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
if self._internal.column_index_level > 1:
pdf.columns = pd.MultiIndex.from_tuples(self._internal.column_index)
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
# TODO: return Koalas series.
return row # Return first row as a Series
elif axis in ('columns', 1):
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = 1000
pdf = self.head(limit + 1)._to_internal_pandas()
pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only)
if len(pdf) <= limit:
return Series(pser)
@pandas_udf(returnType=as_spark_type(pser.dtype.type))
def calculate_columns_axis(*cols):
return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only)
df = self._sdf.select(calculate_columns_axis(*self._internal.data_scols).alias("0"))
return DataFrame(df)["0"]
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
# Arithmetic Operators
def _map_series_op(self, op, other):
if not isinstance(other, DataFrame) and is_sequence(other):
raise ValueError(
"%s with a sequence is currently not supported; "
"however, got %s." % (op, type(other)))
applied = []
if isinstance(other, DataFrame) and self is not other:
# Different DataFrames
def apply_op(kdf, this_columns, that_columns):
for this_column, that_column in zip(this_columns, that_columns):
yield getattr(kdf[this_column], op)(kdf[that_column])
return align_diff_frames(apply_op, self, other, fillna=True, how="full")
elif isinstance(other, DataFrame) and self is not other:
# Same DataFrames
for column in self._internal.data_columns:
applied.append(getattr(self[column], op)(other[column]))
else:
# DataFrame and Series
for column in self._internal.data_columns:
applied.append(getattr(self[column], op)(other))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
def __add__(self, other):
return self._map_series_op("add", other)
def __radd__(self, other):
return self._map_series_op("radd", other)
def __div__(self, other):
return self._map_series_op("div", other)
def __rdiv__(self, other):
return self._map_series_op("rdiv", other)
def __truediv__(self, other):
return self._map_series_op("truediv", other)
def __rtruediv__(self, other):
return self._map_series_op("rtruediv", other)
def __mul__(self, other):
return self._map_series_op("mul", other)
def __rmul__(self, other):
return self._map_series_op("rmul", other)
def __sub__(self, other):
return self._map_series_op("sub", other)
def __rsub__(self, other):
return self._map_series_op("rsub", other)
def __pow__(self, other):
return self._map_series_op("pow", other)
def __rpow__(self, other):
return self._map_series_op("rpow", other)
def __mod__(self, other):
return self._map_series_op("mod", other)
def __rmod__(self, other):
return self._map_series_op("rmod", other)
def __floordiv__(self, other):
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other):
return self._map_series_op("rfloordiv", other)
def add(self, other):
return self + other
# create accessor for plot
plot = CachedAccessor("plot", KoalasFramePlotMethods)
add.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name='+',
equiv='dataframe + other',
reverse='radd')
def radd(self, other):
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name="+",
equiv="other + dataframe",
reverse='add')
def div(self, other):
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rdiv')
divide = div
def rdiv(self, other):
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='div')
def truediv(self, other):
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rtruediv')
def rtruediv(self, other):
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='truediv')
def mul(self, other):
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="dataframe * other",
reverse='rmul')
multiply = mul
def rmul(self, other):
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="other * dataframe",
reverse='mul')
def sub(self, other):
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="dataframe - other",
reverse='rsub')
subtract = sub
def rsub(self, other):
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="other - dataframe",
reverse='sub')
def mod(self, other):
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='dataframe % other',
reverse='rmod')
def rmod(self, other):
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='other % dataframe',
reverse='mod')
def pow(self, other):
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power of series',
op_name='**',
equiv='dataframe ** other',
reverse='rpow')
def rpow(self, other):
return other - self
rpow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power',
op_name='**',
equiv='other ** dataframe',
reverse='pow')
def floordiv(self, other):
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='dataframe // other',
reverse='rfloordiv')
def rfloordiv(self, other):
return other - self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='other // dataframe',
reverse='floordiv')
# Comparison Operators
def __eq__(self, other):
return self._map_series_op("eq", other)
def __ne__(self, other):
return self._map_series_op("ne", other)
def __lt__(self, other):
return self._map_series_op("lt", other)
def __le__(self, other):
return self._map_series_op("le", other)
def __ge__(self, other):
return self._map_series_op("ge", other)
def __gt__(self, other):
return self._map_series_op("gt", other)
def eq(self, other):
"""
Compare if the current value is equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False None
c False True
d False None
"""
return self == other
equals = eq
def gt(self, other):
"""
Compare if the current value is greater than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False None
c True False
d True None
"""
return self > other
def ge(self, other):
"""
Compare if the current value is greater than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True None
c True True
d True None
"""
return self >= other
def lt(self, other):
"""
Compare if the current value is less than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False None
c False False
d False None
"""
return self < other
def le(self, other):
"""
Compare if the current value is less than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True None
c False True
d False None
"""
return self <= other
def ne(self, other):
"""
Compare if the current value is not equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True None
c True False
d True None
"""
return self != other
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
See https://docs.python.org/3/library/typing.html. For instance, as below:
>>> def function() -> int:
... return 1
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
applied = []
for column in self._internal.data_columns:
applied.append(self[column].apply(func))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self, limit: Optional[int] = 1000):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the default limit of input length, 1000 and raises a ValueError.
>>> ks.DataFrame({'a': range(1001)}).transpose() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please use df.transpose(limit=<maximum number of rows>) to retrieve more than
1000 rows. Note that, before changing the given 'limit', this operation is
considerably expensive.
Parameters
----------
limit : int, optional
This parameter sets the limit of the current DataFrame. Set `None` to unlimit
the input length. When the limit is set, it is executed by the shortcut by collecting
the data into driver side, and then using pandas API. If the limit is unset,
the operation is executed by PySpark. Default is 1000.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
if limit is not None:
pdf = self.head(limit + 1)._to_internal_pandas()
if len(pdf) > limit:
raise ValueError(
"Current DataFrame has more then the given limit %s rows. Please use "
"df.transpose(limit=<maximum number of rows>) to retrieve more than %s rows. "
"Note that, before changing the given 'limit', this operation is considerably "
"expensive." % (limit, limit))
return DataFrame(pdf.transpose())
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +------+------+------+------+------+
# |index1|index2|(a,x1)|(a,x2)|(b,x3)|
# +------+------+------+------+------+
# | y1| z1| 1| 0| 0|
# | y2| z2| 0| 50| 0|
# | y3| z3| 3| 2| 1|
# +------+------+------+------+------+
#
# Output of `exploded_df` becomes as below:
#
# +-----------------+-----------------+-----------------+-----+
# | index|__index_level_0__|__index_level_1__|value|
# +-----------------+-----------------+-----------------+-----+
# |{"a":["y1","z1"]}| a| x1| 1|
# |{"a":["y1","z1"]}| a| x2| 0|
# |{"a":["y1","z1"]}| b| x3| 0|
# |{"a":["y2","z2"]}| a| x1| 0|
# |{"a":["y2","z2"]}| a| x2| 50|
# |{"a":["y2","z2"]}| b| x3| 0|
# |{"a":["y3","z3"]}| a| x1| 3|
# |{"a":["y3","z3"]}| a| x2| 2|
# |{"a":["y3","z3"]}| b| x3| 1|
# +-----------------+-----------------+-----------------+-----+
internal_index_column = "__index_level_{}__".format
pairs = F.explode(F.array(*[
F.struct(
[F.lit(col).alias(internal_index_column(i)) for i, col in enumerate(idx)] +
[self[idx]._scol.alias("value")]
) for idx in self._internal.column_index]))
exploded_df = self._sdf.withColumn("pairs", pairs).select(
[F.to_json(F.struct(F.array([scol.cast('string')
for scol in self._internal.index_scols])
.alias('a'))).alias('index'),
F.col("pairs.*")])
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
internal_index_columns = [internal_index_column(i)
for i in range(self._internal.column_index_level)]
pivoted_df = exploded_df.groupBy(internal_index_columns).pivot('index')
transposed_df = pivoted_df.agg(F.first(F.col("value")))
new_data_columns = list(filter(lambda x: x not in internal_index_columns,
transposed_df.columns))
internal = self._internal.copy(
sdf=transposed_df,
data_columns=new_data_columns,
index_map=[(col, None) for col in internal_index_columns],
column_index=[tuple(json.loads(col)['a']) for col in new_data_columns],
column_index_names=None)
return DataFrame(internal)
T = property(transpose)
def transform(self, func):
"""
Call ``func`` on self producing a Series with transformed values
and that has the same length as its input.
.. note:: unlike pandas, it is required for ``func`` to specify return type hint.
.. note:: the series within ``func`` is actually a pandas series, and
the length of each series is not guaranteed.
Parameters
----------
func : function
Function to use for transforming the data. It must work when pandas Series
is passed.
Returns
-------
DataFrame
A DataFrame that must have the same length as self.
Raises
------
Exception : If the returned DataFrame has a different length than self.
Examples
--------
>>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
>>> df.transform(square)
A B
0 0 1
1 1 4
2 4 9
"""
assert callable(func), "the first argument should be a callable function."
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
if return_sig is None:
raise ValueError("Given function must have return type hint; however, not found.")
wrapped = ks.pandas_wraps(func)
applied = []
for column in self._internal.data_columns:
applied.append(wrapped(self[column]).rename(column))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf)
return DataFrame(internal)
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes import Index, MultiIndex
if len(self._internal.index_map) == 1:
return Index(self)
else:
return MultiIndex(self)
@property
def empty(self):
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return len(self._internal.data_columns) == 0 or self._sdf.rdd.isEmpty()
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, str):
keys = [keys]
else:
keys = list(keys)
for key in keys:
if key not in self.columns:
raise KeyError(key)
if drop:
data_columns = [column for column in self._internal.data_columns if column not in keys]
else:
data_columns = self._internal.data_columns
if append:
index_map = self._internal.index_map + [(column, column) for column in keys]
else:
index_map = [(column, column) for column in keys]
index_columns = set(column for column, _ in index_map)
columns = [column for column, _ in index_map] + \
[column for column in data_columns if column not in index_columns]
# Sync Spark's columns as well.
sdf = self._sdf.select([self._internal.scol_for(name) for name in columns])
internal = _InternalFrame(sdf=sdf, index_map=index_map, data_columns=data_columns)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = ks.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1,
... col_fill='species') # doctest: +NORMALIZE_WHITESPACE
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1,
... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
multi_index = len(self._internal.index_map) > 1
def rename(index):
if multi_index:
return 'level_{}'.format(index)
else:
if 'index' not in self._internal.data_columns:
return 'index'
else:
return 'level_{}'.format(index)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._internal.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._internal.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._internal.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._internal.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._internal.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._internal.index_map.copy()
for i in idx:
info = self._internal.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(index_name)))
index_map.remove(info)
new_data_columns = [
self._internal.scol_for(column).alias(name) for column, name in new_index_map]
if len(index_map) > 0:
index_columns = [column for column, _ in index_map]
sdf = self._sdf.select(
index_columns + new_data_columns + self._internal.data_columns)
else:
sdf = self._sdf.select(new_data_columns + self._internal.data_columns)
# Now, new internal Spark columns are named as same as index name.
new_index_map = [(name, name) for column, name in new_index_map]
index_map = [('__index_level_0__', None)]
sdf = _InternalFrame.attach_default_index(sdf)
if drop:
new_index_map = []
internal = self._internal.copy(
sdf=sdf,
data_columns=[column for column, _ in new_index_map] + self._internal.data_columns,
index_map=index_map,
column_index=None)
if self._internal.column_index_level > 1:
column_depth = len(self._internal.column_index[0])
if col_level >= column_depth:
raise IndexError('Too many levels: Index has only {} levels, not {}'
.format(column_depth, col_level + 1))
columns = pd.MultiIndex.from_tuples(
[tuple(name if i == col_level else col_fill
for i in range(column_depth))
for _, name in new_index_map] + self._internal.column_index)
else:
columns = [name for _, name in new_index_map] + self._internal.data_columns
if inplace:
self._internal = internal
self.columns = columns
else:
kdf = DataFrame(internal)
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None):
"""
Shift DataFrame by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input DataFrame, shifted.
Examples
--------
>>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
applied = []
for column in self._internal.data_columns:
applied.append(self[column].shift(periods, fill_value))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
# TODO: add axis parameter
def diff(self, periods=1):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
applied = []
for column in self._internal.data_columns:
applied.append(self[column].diff(periods))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
def nunique(self, axis: int = 0, dropna: bool = True, approx: bool = False,
rsd: float = 0.05) -> pd.Series:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a pandas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
Name: 0, dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
Name: 0, dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
Name: 0, dtype: int64
"""
if axis != 0:
raise ValueError("The 'nunique' method only works with axis=0 at the moment")
res = self._sdf.select([self[column]._nunique(dropna, approx, rsd)
for column in self.columns])
return res.toPandas().T.iloc[:, 0]
def round(self, decimals=0):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Returns
-------
DataFrame
See Also
--------
Series.round
Examples
--------
>>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076],
... 'B':[0.992815, 0.645646, 0.149370],
... 'C':[0.173891, 0.577595, 0.491027]},
... columns=['A', 'B', 'C'],
... index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1.0 0.17
second 0.0 1.0 0.58
third 0.9 0.0 0.49
"""
if isinstance(decimals, ks.Series):
decimals_list = [kv for kv in decimals._to_internal_pandas().items()]
elif isinstance(decimals, dict):
decimals_list = [(k, v) for k, v in decimals.items()]
elif isinstance(decimals, int):
decimals_list = [(v, decimals) for v in self._internal.data_columns]
else:
raise ValueError("decimals must be an integer, a dict-like or a Series")
sdf = self._sdf
for decimal in decimals_list:
sdf = sdf.withColumn(decimal[0], F.round(scol_for(sdf, decimal[0]), decimal[1]))
return DataFrame(self._internal.copy(sdf=sdf))
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},
... columns = ['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 1 1 1
2 1 1 1
3 3 4 5
>>> df.duplicated().sort_index()
0 False
1 True
2 True
3 False
Name: 0, dtype: bool
Mark duplicates as ``True`` except for the last occurrence.
>>> df.duplicated(keep='last').sort_index()
0 True
1 True
2 False
3 False
Name: 0, dtype: bool
Mark all duplicates as ``True``.
>>> df.duplicated(keep=False).sort_index()
0 True
1 True
2 True
3 False
Name: 0, dtype: bool
"""
from databricks.koalas.series import _col
if len(self._internal.index_names) > 1:
raise ValueError("Now we don't support multi-index Now.")
if subset is None:
group_cols = self._internal.data_columns
else:
group_cols = subset
diff = set(subset).difference(set(self._internal.data_columns))
if len(diff) > 0:
raise KeyError(', '.join(diff))
sdf = self._sdf
index = self._internal.index_columns[0]
if self._internal.index_names[0] is not None:
name = self._internal.index_names[0]
else:
name = '0'
if keep == 'first' or keep == 'last':
if keep == 'first':
ord_func = spark.functions.asc
else:
ord_func = spark.functions.desc
window = Window.partitionBy(group_cols).orderBy(ord_func(index)).rowsBetween(
Window.unboundedPreceding, Window.currentRow)
sdf = sdf.withColumn(name, F.row_number().over(window) > 1)
elif not keep:
window = Window.partitionBy(group_cols).orderBy(F.col(index).desc())\
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
sdf = sdf.withColumn(name, F.count(F.col(index)).over(window) > 1)
else:
raise ValueError("'keep' only support 'first', 'last' and False")
return _col(DataFrame(_InternalFrame(sdf=sdf.select(index, name), data_columns=[name],
index_map=self._internal.index_map)))
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
col1 col2
0 1 3
1 2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def cache(self):
"""
Yields and caches the current DataFrame.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
return _CachedDataFrame(self._internal)
def to_table(self, name: str, format: Optional[str] = None, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
**options):
"""
Write the DataFrame into a Spark table.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the table exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options
Additional options passed directly to Spark.
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
self.to_spark().write.saveAsTable(name=name, format=format, mode=mode,
partitionBy=partition_cols, options=options)
def to_delta(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None, **options):
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date')
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country'])
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2019-01-01"')
"""
self.to_spark_io(
path=path, mode=mode, format="delta", partition_cols=partition_cols, options=options)
def to_parquet(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
compression: Optional[str] = None):
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
self.to_spark().write.parquet(
path=path, mode=mode, partitionBy=partition_cols, compression=compression)
def to_spark_io(self, path: Optional[str] = None, format: Optional[str] = None,
mode: str = 'error', partition_cols: Union[str, List[str], None] = None,
**options):
"""Write the DataFrame out to a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
self.to_spark().write.save(
path=path, format=format, mode=mode, partitionBy=partition_cols, options=options)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
.. note:: Index information is lost. So, if the index columns are not present in
actual columns, they are lost.
See Also
--------
DataFrame.to_koalas
"""
return self._internal.spark_df
def to_pandas(self):
"""
Return a pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.pandas_df.copy()
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, F.lit(c))
data_columns = set(self._internal.data_columns)
adding_columns = [name for name, _ in pairs if name not in data_columns]
level = self._internal.column_index_level
adding_column_index = [tuple([col, *([''] * (level - 1))]) for col in adding_columns]
internal = self._internal.copy(
sdf=sdf,
data_columns=(self._internal.data_columns + adding_columns),
column_index=(self._internal.column_index + adding_column_index))
return DataFrame(internal)
@staticmethod
def from_records(data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None, exclude: list = None,
columns: list = None, coerce_float: bool = False, nrows: int = None) \
-> 'DataFrame':
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ks.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ks.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ks.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float,
nrows))
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
"""
return DataFrame(self._internal.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._internal.data_columns]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F.when(self[column].notna()._scol, 1).otherwise(0)
for column in columns],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(columns))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
# TODO: add 'limit' when value parameter exists
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None):
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
sdf = self._sdf
if value is not None:
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if (value is None) and (method is None):
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
if limit is not None:
raise ValueError('limit parameter for value is not support now')
sdf = sdf.fillna(value)
internal = self._internal.copy(sdf=sdf)
else:
applied = []
for col in self._internal.data_columns:
applied.append(self[col].fillna(value=value, method=method, axis=axis,
inplace=False, limit=limit))
sdf = self._sdf.select(self._internal.index_columns + [col._scol for col in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[col.name for col in applied])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
# TODO: add 'downcast' when value parameter exists
def bfill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> df.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit)
# TODO: add 'downcast' when value parameter exists
def ffill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> df.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit)
def replace(self, to_replace=None, value=None, subset=None, inplace=False,
limit=None, regex=False, method='pad'):
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, or list
Value to be replaced. If the value is a dict, then value is ignored and
to_replace must be a mapping from column name (string) to replacement value.
The value to be replaced must be an int, float, or string.
value : int, float, string, or list
Value to use to replace holes. The replacement value must be an int, float,
or string. If value is a list, value should be of the same length with to_replace.
subset : string, list
Optional list of column names to consider. Columns specified in subset that
do not have matching data type are ignored. For example, if value is a string,
and subset contains a non-string column, then the non-string column is simply ignored.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Replacing value by specifying column
>>> df.replace('Mjolnir', 'Stormbuster', subset='weapon')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict like `to_replace`
>>> df = ks.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']},
... columns=['A', 'B', 'C'])
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
Notes
-----
One difference between this implementation and pandas is that it is necessary
to specify the column name when you are passing dictionary in `to_replace`
parameter. Calling `replace` on its index such as `df.replace({0: 10, 1: 100})` will
throw an error. Instead specify column-name like `df.replace({'A': {0: 10, 1: 100}})`.
"""
if method != 'pad':
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
if value is not None and not isinstance(value, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(value)))
if to_replace is not None and not isinstance(to_replace, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(to_replace)))
if isinstance(value, list) and isinstance(to_replace, list):
if len(value) != len(to_replace):
raise ValueError('Length of to_replace and value must be same')
sdf = self._sdf.select(self._internal.data_columns)
if isinstance(to_replace, dict) and value is None and \
(not any(isinstance(i, dict) for i in to_replace.values())):
sdf = sdf.replace(to_replace, value, subset)
elif isinstance(to_replace, dict):
for df_column, replacement in to_replace.items():
if isinstance(replacement, dict):
sdf = sdf.replace(replacement, subset=df_column)
else:
sdf = sdf.withColumn(df_column,
F.when(scol_for(sdf, df_column) == replacement, value)
.otherwise(scol_for(sdf, df_column)))
else:
sdf = sdf.replace(to_replace, value, subset)
kdf = DataFrame(sdf)
if inplace:
self._internal = kdf._internal
else:
return kdf
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
numeric_columns = [(c, self._internal.scol_for(c)) for c in self.columns
if isinstance(self._internal.spark_type_for(c), numeric_types)]
if lower is not None:
numeric_columns = [(c, F.when(scol < lower, lower).otherwise(scol).alias(c))
for c, scol in numeric_columns]
if upper is not None:
numeric_columns = [(c, F.when(scol > upper, upper).otherwise(scol).alias(c))
for c, scol in numeric_columns]
nonnumeric_columns = [self._internal.scol_for(c) for c in self.columns
if not isinstance(self._internal.spark_type_for(c), numeric_types)]
sdf = self._sdf.select([scol for _, scol in numeric_columns] + nonnumeric_columns)
return ks.DataFrame(sdf)[list(self.columns)]
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._internal.copy(sdf=self._sdf.limit(n)))
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list less than three or a string.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the resulting pivot table will have
columns concatenated by "_" where the first part is the value
of columns and the second part is the column name in values
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table # doctest: +NORMALIZE_WHITESPACE
C large small
A B
foo one 4.0 1
two NaN 6
bar two 7.0 6
one 4.0 5
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table # doctest: +NORMALIZE_WHITESPACE
C large small
A B
foo one 4 1
two 0 6
bar two 7 6
one 4 5
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values = ['D'], index =['C'],
... columns="A", aggfunc={'D':'mean'})
>>> table # doctest: +NORMALIZE_WHITESPACE
A bar foo
C
small 5.5 2.333333
large 5.5 2.000000
The next example aggregates on multiple values.
>>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'],
... aggfunc={'D': 'mean', 'E': 'sum'})
>>> table # doctest: +NORMALIZE_WHITESPACE
D E
A bar foo bar foo
C
small 5.5 2.333333 17 13
large 5.5 2.000000 15 9
"""
if not isinstance(columns, str):
raise ValueError("columns should be string.")
if not isinstance(values, str) and not isinstance(values, list):
raise ValueError('values should be string or list of one column.')
if not isinstance(aggfunc, str) and (not isinstance(aggfunc, dict) or not all(
isinstance(key, str) and isinstance(value, str) for key, value in aggfunc.items())):
raise ValueError("aggfunc must be a dict mapping from column name (string) "
"to aggregate functions (string).")
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError("pivot_table doesn't support aggfunc"
" as dict and without index.")
if isinstance(values, list) and index is None:
raise NotImplementedError("values can't be a list without index.")
if isinstance(values, list) and len(values) > 2:
raise NotImplementedError("values more than two is not supported yet!")
if columns not in self.columns.values:
raise ValueError("Wrong columns {}.".format(columns))
if isinstance(values, list):
if not all(isinstance(self._internal.spark_type_for(col), NumericType)
for col in values):
raise TypeError('values should be a numeric type.')
elif not isinstance(self._internal.spark_type_for(values), NumericType):
raise TypeError('values should be a numeric type.')
if isinstance(aggfunc, str):
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(values, aggfunc))]
elif isinstance(aggfunc, dict):
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(key, value))
for key, value in aggfunc.items()]
agg_columns = [key for key, value in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
if index is None:
sdf = self._sdf.groupBy().pivot(pivot_col=columns).agg(*agg_cols)
elif isinstance(index, list):
sdf = self._sdf.groupBy(index).pivot(pivot_col=columns).agg(*agg_cols)
else:
raise ValueError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
if index is not None:
if isinstance(values, list):
data_columns = [column for column in sdf.columns if column not in index]
if len(values) == 2:
# If we have two values, Spark will return column's name
# in this format: column_values, where column contains
# their values in the DataFrame and values is
# the column list passed to the pivot_table().
# E.g. if column is b and values is ['b','e'],
# then ['2_b', '2_e', '3_b', '3_e'].
# We sort the columns of Spark DataFrame by values.
data_columns.sort(key=lambda x: x.split('_', 1)[1])
sdf = sdf.select(index + data_columns)
index_map = [(column, column) for column in index]
internal = _InternalFrame(sdf=sdf, data_columns=data_columns,
index_map=index_map)
kdf = DataFrame(internal)
# We build the MultiIndex from the list of columns returned by Spark.
tuples = [(name.split('_')[1], self.dtypes[columns].type(name.split('_')[0]))
for name in kdf._internal.data_columns]
kdf.columns = pd.MultiIndex.from_tuples(tuples, names=[None, columns])
else:
index_map = [(column, column) for column in index]
internal = _InternalFrame(sdf=sdf, data_columns=data_columns,
index_map=index_map, column_index_names=[columns])
kdf = DataFrame(internal)
return kdf
else:
data_columns = [column for column in sdf.columns if column not in index]
index_map = [(column, column) for column in index]
internal = _InternalFrame(sdf=sdf, data_columns=data_columns, index_map=index_map,
column_index_names=[columns])
return DataFrame(internal)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
sdf = sdf.withColumn(columns, F.lit(index_values))
data_columns = [column for column in sdf.columns if column not in [columns]]
index_map = [(column, column) for column in [columns]]
internal = _InternalFrame(sdf=sdf, data_columns=data_columns, index_map=index_map,
column_index_names=[columns])
return DataFrame(internal)
def pivot(self, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation.
Parameters
----------
index : string, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string
Column to use to make new frame's columns.
values : string, object or a list of the previous
Column(s) to use for populating new frame's values.
Returns
-------
DataFrame
Returns reshaped DataFrame.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
Examples
--------
>>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},
... columns=['foo', 'bar', 'baz', 'zoo'])
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0
3 4.0 NaN NaN
4 NaN 5.0 NaN
5 NaN NaN 6.0
Notice that, unlike pandas raises an ValueError when duplicated values are found,
Koalas' pivot still works with its first value it meets during operation because pivot
is an expensive operation and it is preferred to permissively execute over failing fast
when processing large data.
>>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1.0 NaN NaN
two NaN 3.0 4.0
"""
if columns is None:
raise ValueError("columns should be set.")
if values is None:
raise ValueError("values should be set.")
should_use_existing_index = index is not None
if should_use_existing_index:
index = [index]
else:
index = self._internal.index_columns
df = self.pivot_table(
index=index, columns=columns, values=values, aggfunc='first')
if should_use_existing_index:
return df
else:
index_columns = df._internal.index_columns
# Note that the existing indexing column won't exist in the pivoted DataFrame.
internal = df._internal.copy(
index_map=[(index_column, None) for index_column in index_columns])
return DataFrame(internal)
@property
def columns(self):
"""The column labels of the DataFrame."""
if self._internal.column_index_level > 1:
columns = pd.MultiIndex.from_tuples(self._internal.column_index)
else:
columns = pd.Index([idx[0] for idx in self._internal.column_index])
if self._internal.column_index_names is not None:
columns.names = self._internal.column_index_names
return columns
@columns.setter
def columns(self, columns):
if isinstance(columns, pd.MultiIndex):
column_index = columns.tolist()
old_names = self._internal.data_columns
if len(old_names) != len(column_index):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(column_index)))
column_index_names = columns.names
self._internal = self._internal.copy(column_index=column_index,
column_index_names=column_index_names)
else:
old_names = self._internal.data_columns
if len(old_names) != len(columns):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(columns)))
if isinstance(columns, pd.Index):
column_index_names = columns.names
else:
column_index_names = None
sdf = self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(old_name).alias(new_name)
for (old_name, new_name) in zip(old_names, columns)])
self._internal = self._internal.copy(sdf=sdf, data_columns=columns, column_index=None,
column_index_names=column_index_names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._internal.data_columns],
index=self._internal.data_columns)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
TypeError: string dtypes are not allowed, use 'object' instead
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
if not any((include, exclude)):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# can't both include AND exclude!
if set(include).intersection(set(exclude)):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=set(include).intersection(set(exclude))))
# Handle Spark types
include_spark_type = []
for inc in include:
try:
include_spark_type.append(_parse_datatype_string(inc))
except:
pass
exclude_spark_type = []
for exc in exclude:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except:
pass
# Handle Pandas types
include_numpy_type = []
for inc in include:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except:
pass
exclude_numpy_type = []
for exc in exclude:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except:
pass
columns = []
column_index = []
for idx, col in zip(self._internal.column_index, self._internal.data_columns):
if len(include) > 0:
should_include = (
infer_dtype_from_object(self[idx].dtype.name) in include_numpy_type or
self._internal.spark_type_for(col) in include_spark_type)
else:
should_include = not (
infer_dtype_from_object(self[idx].dtype.name) in exclude_numpy_type or
self._internal.spark_type_for(col) in exclude_spark_type)
if should_include:
columns.append(col)
column_index.append(idx)
return DataFrame(self._internal.copy(
sdf=self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(col) for col in columns]),
data_columns=columns, column_index=column_index))
def count(self, axis=None):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
Name: 0, dtype: int64
"""
return self._reduce_for_stat_function(
_Frame._count_expr, name="count", axis=axis, numeric_only=False)
def drop(self, labels=None, axis=1,
columns: Union[str, Tuple[str], List[str], List[Tuple[str]]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [(columns,)]
elif isinstance(columns, tuple):
columns = [columns]
else:
columns = [col if isinstance(col, tuple) else (col,) for col in columns]
drop_column_index = set(idx for idx in self._internal.column_index
for col in columns
if idx[:len(col)] == col)
if len(drop_column_index) == 0:
raise KeyError(columns)
cols, idx = zip(*((column, idx)
for column, idx
in zip(self._internal.data_columns, self._internal.column_index)
if idx not in drop_column_index))
internal = self._internal.copy(data_columns=list(cols), column_index=list(idx))
return DataFrame(internal)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def _sort(self, by: List[Column], ascending: Union[bool, List[bool]],
inplace: bool, na_position: str):
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]
kdf = DataFrame(self._internal.copy(sdf=self._sdf.sort(*by))) # type: ks.DataFrame
if inplace:
self._internal = kdf._internal
return None
else:
return kdf
def sort_values(self, by: Union[str, List[str]], ascending: Union[bool, List[bool]] = True,
inplace: bool = False, na_position: str = 'last') -> Optional['DataFrame']:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
by = [self[colname]._scol for colname in by]
return self._sort(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
def sort_index(self, axis: int = 0,
level: Optional[Union[int, List[int]]] = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['DataFrame']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],
... columns=['A', 'B'])
>>> df.sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
>>> df.sort_index(level=1) # doctest: +SKIP
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
>>> df.sort_index(level=[1, 0])
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
"""
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore
by = self._internal.index_scols
elif is_list_like(level):
by = [self._internal.index_scols[l] for l in level] # type: ignore
else:
by = [self._internal.index_scols[level]]
return self._sort(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._internal.index_columns.copy()
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self._internal.scol_for(col)
.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self._internal.scol_for(col).isin(list(values)).alias(col)
for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._internal.copy(sdf=self._sdf.select(_select_columns)))
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(self, right: 'DataFrame', how: str = 'inner',
on: Optional[Union[str, List[str]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y'])
lkey value_x rkey value_y
0 bar 2 bar 6
5 baz 3 baz 7
1 foo 1 foo 5
2 foo 1 foo 8
3 foo 5 foo 5
4 foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
1 2.0 x
2 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
_to_list = lambda o: o if o is None or is_list_like(o) else [o]
if on:
if left_on or right_on:
raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", '
'not a combination of both.')
left_keys = _to_list(on)
right_keys = _to_list(on)
else:
# TODO: need special handling for multi-index.
if left_index:
left_keys = self._internal.index_columns
else:
left_keys = _to_list(left_on)
if right_index:
right_keys = right._internal.index_columns
else:
right_keys = _to_list(right_on)
if left_keys and not right_keys:
raise ValueError('Must pass right_on or right_index=True')
if right_keys and not left_keys:
raise ValueError('Must pass left_on or left_index=True')
if not left_keys and not right_keys:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
'No common columns to perform merge on. Merge options: '
'left_on=None, right_on=None, left_index=False, right_index=False')
left_keys = common
right_keys = common
if len(left_keys) != len(right_keys): # type: ignore
raise ValueError('len(left_keys) must equal len(right_keys)')
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
left_table = self._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
left_key_columns = [scol_for(left_table, col) for col in left_keys] # type: ignore
right_key_columns = [scol_for(right_table, col) for col in right_keys] # type: ignore
join_condition = reduce(lambda x, y: x & y,
[lkey == rkey for lkey, rkey
in zip(left_key_columns, right_key_columns)])
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = (set(self._internal.data_columns)
& set(right._internal.data_columns))
left_index_columns = set(self._internal.index_columns)
right_index_columns = set(right._internal.index_columns)
exprs = []
for col in left_table.columns:
if col in left_index_columns:
continue
scol = scol_for(left_table, col)
if col in duplicate_columns:
if col in left_keys and col in right_keys:
right_scol = scol_for(right_table, col)
if how == 'right':
scol = right_scol
elif how == 'full':
scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)
else:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
exprs.append(scol)
for col in right_table.columns:
if col in right_index_columns:
continue
scol = scol_for(right_table, col)
if col in duplicate_columns:
if col in left_keys and col in right_keys:
continue
else:
col = col + right_suffix
scol = scol.alias(col)
exprs.append(scol)
# Retain indices if they are used for joining
if left_index:
if right_index:
exprs.extend(['left_table.`{}`'.format(col) for col in left_index_columns])
exprs.extend(['right_table.`{}`'.format(col) for col in right_index_columns])
index_map = self._internal.index_map + [idx for idx in right._internal.index_map
if idx not in self._internal.index_map]
else:
exprs.extend(['right_table.`{}`'.format(col) for col in right_index_columns])
index_map = right._internal.index_map
elif right_index:
exprs.extend(['left_table.`{}`'.format(col) for col in left_index_columns])
index_map = self._internal.index_map
else:
index_map = []
selected_columns = joined_table.select(*exprs)
# Merge left and right indices after the join by replacing missing values in the left index
# with values from the right index and dropping
if (how == 'right' or how == 'full') and right_index:
for left_index_col, right_index_col in zip(self._internal.index_columns,
right._internal.index_columns):
selected_columns = selected_columns.withColumn(
'left_table.' + left_index_col,
F.when(F.col('left_table.`{}`'.format(left_index_col)).isNotNull(),
F.col('left_table.`{}`'.format(left_index_col)))
.otherwise(F.col('right_table.`{}`'.format(right_index_col)))
).withColumnRenamed(
'left_table.' + left_index_col, left_index_col
).drop(F.col('left_table.`{}`'.format(left_index_col)))
if not (left_index and not right_index):
for right_index_col in right_index_columns:
if right_index_col in left_index_columns:
selected_columns = \
selected_columns.drop(F.col('right_table.`{}`'.format(right_index_col)))
if index_map:
data_columns = [c for c in selected_columns.columns
if c not in [idx[0] for idx in index_map]]
internal = _InternalFrame(
sdf=selected_columns, data_columns=data_columns, index_map=index_map)
return DataFrame(internal)
else:
return DataFrame(selected_columns)
def join(self, right: 'DataFrame', on: Optional[Union[str, List[str]]] = None,
how: str = 'left', lsuffix: str = '', rsuffix: str = '') -> 'DataFrame':
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> kdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> kdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right')
>>> join_kdf.sort_values(by=join_kdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key'))
>>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method preserves the
original DataFrame’s index in the result.
>>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key')
>>> join_kdf.sort_index()
key A B
0 K3 A3 None
1 K0 A0 B0
2 K1 A1 B1
3 K2 A2 B2
"""
if on:
self = self.set_index(on)
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix)).reset_index()
else:
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix))
return join_kdf
def append(self, other: 'DataFrame', ignore_index: bool = False,
verify_integrity: bool = False, sort: bool = False) -> 'DataFrame':
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ks.Series):
raise ValueError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise ValueError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_scols = self._internal.index_scols
if len(index_scols) != len(other._internal.index_scols):
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_scols) > 0:
if (self._sdf.select(index_scols)
.intersect(other._sdf.select(other._internal.index_scols))
.count()) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from databricks.koalas.namespace import concat
return concat([self, other], ignore_index=ignore_index)
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: 'DataFrame', join: str = 'left', overwrite: bool = True):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != 'left':
raise NotImplementedError("Only left join is supported")
if isinstance(other, ks.Series):
other = DataFrame(other)
update_columns = list(set(self._internal.data_columns)
.intersection(set(other._internal.data_columns)))
update_sdf = self.join(other[update_columns], rsuffix='_new')._sdf
for column_name in update_columns:
old_col = scol_for(update_sdf, column_name)
new_col = scol_for(update_sdf, column_name + '_new')
if overwrite:
update_sdf = update_sdf.withColumn(column_name, F.when(new_col.isNull(), old_col)
.otherwise(new_col))
else:
update_sdf = update_sdf.withColumn(column_name, F.when(old_col.isNull(), new_col)
.otherwise(old_col))
internal = self._internal.copy(sdf=update_sdf.select([scol_for(update_sdf, col)
for col in self._internal.columns]))
self._internal = internal
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifing the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(self._internal.copy(sdf=sdf))
def astype(self, dtype) -> 'DataFrame':
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
results = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name]))
else:
results.append(col)
else:
for col_name, col in self.iteritems():
results.append(col.astype(dtype=dtype))
sdf = self._sdf.select(
self._internal.index_scols + list(map(lambda ser: ser._scol, results)))
return DataFrame(self._internal.copy(sdf=sdf))
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
column_index = [tuple([prefix + i for i in idx]) for idx in self._internal.column_index]
internal = self._internal.copy(column_index=column_index)
return DataFrame(internal)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
column_index = [tuple([i + suffix for i in idx]) for idx in self._internal.column_index]
internal = self._internal.copy(column_index=column_index)
return DataFrame(internal)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> 'DataFrame':
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the obersvations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: 0, dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
data_columns = []
for col in self.columns:
kseries = self[col]
spark_type = kseries.spark_type
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(F.nanvl(kseries._scol, F.lit(None)).alias(kseries.name))
data_columns.append(kseries.name)
elif isinstance(spark_type, NumericType):
exprs.append(kseries._scol)
data_columns.append(kseries.name)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._sdf.select(*exprs).summary(stats)
internal = _InternalFrame(sdf=sdf.replace("stddev", "std", subset='summary'),
data_columns=data_columns,
index_map=[('summary', None)])
return DataFrame(internal).astype('float64')
def _cum(self, func, skipna: bool):
# This is used for cummin, cummax, cumxum, etc.
if func == F.min:
func = "cummin"
elif func == F.max:
func = "cummax"
elif func == F.sum:
func = "cumsum"
elif func.__name__ == "cumprod":
func = "cumprod"
applied = []
for column in self.columns:
applied.append(getattr(self[column], func)(skipna))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
# FIXME(ueshin): no need to specify `column_index`.
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied],
column_index=self._internal.column_index)
return DataFrame(internal)
# TODO: implements 'keep' parameters
def drop_duplicates(self, subset=None, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
>>> df = ks.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_values(['a', 'b'])
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
"""
if subset is None:
subset = self._internal.data_columns
elif not isinstance(subset, list):
subset = [subset]
sdf = self._sdf.drop_duplicates(subset=subset)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reindex(self, labels: Optional[Any] = None, index: Optional[Any] = None,
columns: Optional[Any] = None, axis: Optional[Union[int, str]] = None,
copy: Optional[bool] = True, fill_value: Optional[Any] = None) -> 'DataFrame':
"""
Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ks.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
"""
if axis is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if labels is not None:
if axis in ('index', 0, None):
index = labels
elif axis in ('columns', 1):
columns = labels
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
if index is not None and not is_list_like(index):
raise TypeError("Index must be called with a collection of some kind, "
"%s was passed" % type(index))
if columns is not None and not is_list_like(columns):
raise TypeError("Columns must be called with a collection of some kind, "
"%s was passed" % type(columns))
df = self.copy()
if index is not None:
df = DataFrame(df._reindex_index(index))
if columns is not None:
df = DataFrame(df._reindex_columns(columns))
# Process missing values.
if fill_value is not None:
df = df.fillna(fill_value)
# Copy
if copy:
return df.copy()
else:
self._internal = df._internal
return self
def _reindex_index(self, index):
# When axis is index, we can mimic pandas' by a right outer join.
index_column = self._internal.index_columns
assert len(index_column) <= 1, "Index should be single column or not set."
if len(index_column) == 1:
kser = ks.Series(list(index))
index_column = index_column[0]
labels = kser._kdf._sdf.select(kser._scol.alias(index_column))
else:
index_column = None
labels = ks.Series(index).to_frame()._sdf
joined_df = self._sdf.join(labels, on=index_column, how="right")
new_data_columns = filter(lambda x: x not in index_column, joined_df.columns)
if index_column is not None:
index_map = [(index_column, None)] # type: List[IndexMap]
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns),
index_map=index_map)
else:
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns))
return internal
def _reindex_columns(self, columns):
level = self._internal.column_index_level
if level > 1:
label_columns = list(columns)
for col in label_columns:
if not isinstance(col, tuple):
raise TypeError('Expected tuple, got {}'.format(type(col)))
else:
label_columns = [(col,) for col in columns]
for col in label_columns:
if len(col) != level:
raise ValueError("shape (1,{}) doesn't match the shape (1,{})"
.format(len(col), level))
index_to_column = dict(zip(self._internal.column_index, self._internal.data_columns))
scols, columns, idx = [], [], []
null_columns = False
for label in label_columns:
if index_to_column.get(label, None) is not None:
scols.append(self._internal.scol_for(index_to_column[label]))
columns.append(index_to_column[label])
else:
scols.append(F.lit(np.nan).alias(str(label)))
columns.append(str(label))
null_columns = True
idx.append(label)
if null_columns:
sdf = self._sdf.select(self._internal.index_scols + list(scols))
return self._internal.copy(sdf=sdf, data_columns=columns, column_index=idx)
def melt(self, id_vars=None, value_vars=None, var_name='variable',
value_name='value'):
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ks.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> ks.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ks.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
if id_vars is None:
id_vars = []
if not isinstance(id_vars, (list, tuple, np.ndarray)):
id_vars = list(id_vars)
data_columns = self._internal.data_columns
if value_vars is None:
value_vars = []
if not isinstance(value_vars, (list, tuple, np.ndarray)):
value_vars = list(value_vars)
if len(value_vars) == 0:
value_vars = data_columns
data_columns = [data_column for data_column in data_columns if data_column not in id_vars]
sdf = self._sdf
pairs = F.explode(F.array(*[
F.struct(*(
[F.lit(column).alias(var_name)] +
[self._internal.scol_for(column).alias(value_name)])
) for column in data_columns if column in value_vars]))
columns = (id_vars +
[F.col("pairs.%s" % var_name), F.col("pairs.%s" % value_name)])
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(exploded_df)
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
Name: all, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
column_index = self._internal.column_index
for idx in column_index:
col = self[idx]._scol
all_col = F.min(F.coalesce(col.cast('boolean'), F.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.any, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
internal_index_column = "__index_level_{}__".format
value_column = "value"
cols = []
for idx, applied_col in zip(column_index, applied):
cols.append(F.struct(
[F.lit(col).alias(internal_index_column(i)) for i, col in enumerate(idx)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_index_names is None
else self._internal.column_index_names[i])
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(internal_index_column(i), index_column_name(i))
for i in range(self._internal.column_index_level)],
column_index=None,
column_index_names=None)
return DataFrame(internal)[value_column].rename("all")
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
Name: any, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
column_index = self._internal.column_index
for idx in column_index:
col = self[idx]._scol
all_col = F.max(F.coalesce(col.cast('boolean'), F.lit(False)))
applied.append(F.when(all_col.isNull(), False).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.all, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
internal_index_column = "__index_level_{}__".format
value_column = "value"
cols = []
for idx, applied_col in zip(column_index, applied):
cols.append(F.struct(
[F.lit(col).alias(internal_index_column(i)) for i, col in enumerate(idx)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_index_names is None
else self._internal.column_index_names[i])
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(internal_index_column(i), index_column_name(i))
for i in range(self._internal.column_index_level)],
column_index=None,
column_index_names=None)
return DataFrame(internal)[value_column].rename("any")
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method='average', ascending=True):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
"""
applied = []
for column in self._internal.data_columns:
applied.append(self[column].rank(method=method, ascending=ascending))
sdf = self._sdf.select(self._internal.index_columns + [column._scol for column in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[column.name for column in applied])
return DataFrame(internal)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
if sum(x is not None for x in (items, like, regex)) > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive")
if axis not in ('index', 0, 'columns', 1, None):
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
index_scols = self._internal.index_scols
sdf = self._sdf
if items is not None:
if is_list_like(items):
items = list(items)
else:
raise ValueError("items should be a list-like object.")
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
col = None
for item in items:
if col is None:
col = index_scols[0] == F.lit(item)
else:
col = col | (index_scols[0] == F.lit(item))
sdf = sdf.filter(col)
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
return self[items]
elif like is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].contains(like))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
data_columns = self._internal.data_columns
output_columns = [c for c in data_columns if like in c]
return self[output_columns]
elif regex is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].rlike(regex))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
data_columns = self._internal.data_columns
matcher = re.compile(regex)
output_columns = [c for c in data_columns if matcher.search(c) is not None]
return self[output_columns]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def _get_from_multiindex_column(self, key):
""" Select columns from multi-index columns.
:param key: the multi-index column keys represented by tuple
:return: DataFrame or Series
"""
from databricks.koalas.series import Series
assert isinstance(key, tuple)
indexes = [(idx, idx) for idx in self._internal.column_index]
for k in key:
indexes = [(index, idx[1:]) for index, idx in indexes if idx[0] == k]
if len(indexes) == 0:
raise KeyError(k)
recursive = False
if all(len(idx) > 0 and idx[0] == '' for _, idx in indexes):
# If the head is '', drill down recursively.
recursive = True
for i, (col, idx) in enumerate(indexes):
indexes[i] = (col, tuple([str(key), *idx[1:]]))
column_index_names = None
if self._internal.column_index_names is not None:
# Manage column index names
level = column_index_level([idx for _, idx in indexes])
column_index_names = self._internal.column_index_names[-level:]
if all(len(idx) == 0 for _, idx in indexes):
try:
idxes = set(idx for idx, _ in indexes)
assert len(idxes) == 1
kdf_or_ser = \
Series(self._internal.copy(scol=self._internal.scol_for(list(idxes)[0])),
anchor=self)
except AnalysisException:
raise KeyError(key)
else:
kdf_or_ser = DataFrame(self._internal.copy(
data_columns=[self._internal.column_name_for(idx) for idx, _ in indexes],
column_index=[idx for _, idx in indexes],
column_index_names=column_index_names))
if recursive:
kdf_or_ser = kdf_or_ser._get_from_multiindex_column((str(key),))
if isinstance(kdf_or_ser, Series):
name = str(key) if len(key) > 1 else key[0]
if kdf_or_ser.name != name:
kdf_or_ser.name = name
return kdf_or_ser
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
return self._get_from_multiindex_column((key,))
if isinstance(key, tuple):
return self._get_from_multiindex_column(key)
elif np.isscalar(key):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._internal.copy(scol=self._internal.scol_for(key)), anchor=self)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._internal.copy(sdf=self._sdf.filter(bcol)))
raise NotImplementedError(key)
def _to_internal_pandas(self):
"""
Return a pandas DataFrame directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._internal.pandas_df
def __repr__(self):
max_display_count = get_option("display.max_rows")
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
repr_string = repr(pdf.iloc[:max_display_count])
if pdf_length > max_display_count:
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return repr_string
def _repr_html_(self):
max_display_count = get_option("display.max_rows")
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
repr_html = pdf[:max_display_count]._repr_html_()
if pdf_length > max_display_count:
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return repr_html
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
if (isinstance(value, Series) and value._kdf is not self) or \
(isinstance(value, DataFrame) and value is not self):
# Different Series or DataFrames
if isinstance(value, Series):
value = value.to_frame()
if not isinstance(key, (tuple, list)):
key = [key]
def assign_columns(kdf, this_columns, that_columns):
assert len(key) == len(that_columns)
# Note that here intentionally uses `zip_longest` that combine
# that_columns.
for k, this_column, that_column in zip_longest(key, this_columns, that_columns):
yield kdf[that_column].rename(k)
if this_column != k and this_column is not None:
yield kdf[this_column]
kdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left")
elif isinstance(key, (tuple, list)):
assert isinstance(value, DataFrame)
# Same DataFrames.
field_names = value.columns
kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})
else:
# Same Series.
kdf = self.assign(**{key: value})
self._internal = kdf._internal
def __getattr__(self, key: str) -> Any:
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
try:
return self._get_from_multiindex_column((key,))
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key))
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
if sys.version_info >= (3, 7):
def __class_getitem__(cls, params):
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return super(cls, DataFrame).__class_getitem__(Tuple[params])
elif (3, 5) <= sys.version_info < (3, 7):
# This is a workaround to support variadic generic in DataFrame in Python 3.5+
# The implementation is in its metaclass so this flag is needed to distinguish
# Koalas DataFrame.
is_dataframe = None
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class _CachedDataFrame(DataFrame):
"""
Cached Koalas DataFrame, which corresponds to Pandas DataFrame logically, but internally
it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal):
self._cached = internal._sdf.cache()
super(_CachedDataFrame, self).__init__(internal)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.unpersist()
def unpersist(self):
"""
The `unpersist` function is used to uncache the Koalas DataFrame when it
is not used with `with` statement.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.cache()
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
if self._cached.is_cached:
self._cached.unpersist()
| 1 | 11,238 | I think we can entirely remove this `limit` parameter for now to be consistent with other APIs. | databricks-koalas | py |
@@ -0,0 +1,9 @@
+from localstack.services.infra import start_moto_server
+from localstack import config
+
+
+def start_rg(port=None, asynchronous=False, update_listener=None):
+ port = port or config.PORT_RESOURCE_GROUPS
+
+ return start_moto_server('resource-groups', port, name='Resource Groups Tagging API',
+ asynchronous=asynchronous, update_listener=update_listener) | 1 | 1 | 12,450 | nit: `Resource Groups Tagging API` -> `Resource Groups API` | localstack-localstack | py |
|
@@ -5,10 +5,16 @@
package ecr
import (
+ "encoding/base64"
+ "errors"
"fmt"
"strings"
+ "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/ecr"
+ "github.com/aws/aws-sdk-go/service/ecr/ecriface"
)
const ( | 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package ecr contains utility functions for dealing with ECR repos
package ecr
import (
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws/arn"
)
const (
urlFmtString = "%s.dkr.ecr.%s.amazonaws.com/%s"
arnResourcePrefix = "repository/"
)
// URIFromARN converts an ECR Repo ARN to a Repository URI
func URIFromARN(repositoryARN string) (string, error) {
repoARN, err := arn.Parse(repositoryARN)
if err != nil {
return "", fmt.Errorf("parsing repository ARN %s: %w", repositoryARN, err)
}
// Repo ARNs look like arn:aws:ecr:region:012345678910:repository/test
// so we have to strip the repository out.
repoName := strings.TrimPrefix(repoARN.Resource, arnResourcePrefix)
return fmt.Sprintf(urlFmtString,
repoARN.AccountID,
repoARN.Region,
repoName), nil
}
| 1 | 11,311 | This type of list + delete always make me a bit uneasy but guess there's no atomic way to do this...... Could you put a todo to retry the "ClearRepository + delete repo" flow a few times? Basically, imagine a new image is added right after we call `ListImages`, then `DeleteImages` will not delete that newly added image. Then the delete repo call will fail | aws-copilot-cli | go |
@@ -1,5 +1,11 @@
-define(['dom', 'scrollManager'], function (dom, scrollManager) {
- 'use strict';
+/* eslint-disable indent */
+
+import dom from 'dom';
+import scrollManager from 'scrollManager';
+
+/* eslint-disable no-unused-expressions */
+'use strict';
+/* eslint-enable no-unused-expressions */
var scopes = [];
function pushScope(elem) { | 1 | define(['dom', 'scrollManager'], function (dom, scrollManager) {
'use strict';
var scopes = [];
function pushScope(elem) {
scopes.push(elem);
}
function popScope(elem) {
if (scopes.length) {
scopes.length -= 1;
}
}
function autoFocus(view, defaultToFirst, findAutoFocusElement) {
var element;
if (findAutoFocusElement !== false) {
element = view.querySelector('*[autofocus]');
if (element) {
focus(element);
return element;
}
}
if (defaultToFirst !== false) {
element = getFocusableElements(view, 1, 'noautofocus')[0];
if (element) {
focus(element);
return element;
}
}
return null;
}
function focus(element) {
try {
element.focus({
preventScroll: scrollManager.isEnabled()
});
} catch (err) {
console.error('Error in focusManager.autoFocus: ' + err);
}
}
var focusableTagNames = ['INPUT', 'TEXTAREA', 'SELECT', 'BUTTON', 'A'];
var focusableContainerTagNames = ['BODY', 'DIALOG'];
var focusableQuery = focusableTagNames.map(function (t) {
if (t === 'INPUT') {
t += ':not([type="range"]):not([type="file"])';
}
return t + ':not([tabindex="-1"]):not(:disabled)';
}).join(',') + ',.focusable';
function isFocusable(elem) {
if (focusableTagNames.indexOf(elem.tagName) !== -1) {
return true;
}
if (elem.classList && elem.classList.contains('focusable')) {
return true;
}
return false;
}
function normalizeFocusable(elem, originalElement) {
if (elem) {
var tagName = elem.tagName;
if (!tagName || tagName === 'HTML' || tagName === 'BODY') {
elem = originalElement;
}
}
return elem;
}
function focusableParent(elem) {
var originalElement = elem;
while (!isFocusable(elem)) {
var parent = elem.parentNode;
if (!parent) {
return normalizeFocusable(elem, originalElement);
}
elem = parent;
}
return normalizeFocusable(elem, originalElement);
}
// Determines if a focusable element can be focused at a given point in time
function isCurrentlyFocusableInternal(elem) {
// http://stackoverflow.com/questions/19669786/check-if-element-is-visible-in-dom
if (elem.offsetParent === null) {
return false;
}
return true;
}
// Determines if a focusable element can be focused at a given point in time
function isCurrentlyFocusable(elem) {
if (elem.disabled) {
return false;
}
if (elem.getAttribute('tabindex') === '-1') {
return false;
}
if (elem.tagName === 'INPUT') {
var type = elem.type;
if (type === 'range') {
return false;
}
if (type === 'file') {
return false;
}
}
return isCurrentlyFocusableInternal(elem);
}
function getDefaultScope() {
return scopes[0] || document.body;
}
function getFocusableElements(parent, limit, excludeClass) {
var elems = (parent || getDefaultScope()).querySelectorAll(focusableQuery);
var focusableElements = [];
for (var i = 0, length = elems.length; i < length; i++) {
var elem = elems[i];
if (excludeClass && elem.classList.contains(excludeClass)) {
continue;
}
if (isCurrentlyFocusableInternal(elem)) {
focusableElements.push(elem);
if (limit && focusableElements.length >= limit) {
break;
}
}
}
return focusableElements;
}
function isFocusContainer(elem, direction) {
if (focusableContainerTagNames.indexOf(elem.tagName) !== -1) {
return true;
}
var classList = elem.classList;
if (classList.contains('focuscontainer')) {
return true;
}
if (direction === 0) {
if (classList.contains('focuscontainer-x')) {
return true;
}
if (classList.contains('focuscontainer-left')) {
return true;
}
} else if (direction === 1) {
if (classList.contains('focuscontainer-x')) {
return true;
}
if (classList.contains('focuscontainer-right')) {
return true;
}
} else if (direction === 2) {
if (classList.contains('focuscontainer-y')) {
return true;
}
} else if (direction === 3) {
if (classList.contains('focuscontainer-y')) {
return true;
}
if (classList.contains('focuscontainer-down')) {
return true;
}
}
return false;
}
function getFocusContainer(elem, direction) {
while (!isFocusContainer(elem, direction)) {
elem = elem.parentNode;
if (!elem) {
return getDefaultScope();
}
}
return elem;
}
function getOffset(elem) {
var box;
// Support: BlackBerry 5, iOS 3 (original iPhone)
// If we don't have gBCR, just use 0,0 rather than error
if (elem.getBoundingClientRect) {
box = elem.getBoundingClientRect();
} else {
box = {
top: 0,
left: 0,
width: 0,
height: 0
};
}
if (box.right === null) {
// Create a new object because some browsers will throw an error when trying to set data onto the Rect object
var newBox = {
top: box.top,
left: box.left,
width: box.width,
height: box.height
};
box = newBox;
box.right = box.left + box.width;
box.bottom = box.top + box.height;
}
return box;
}
function nav(activeElement, direction, container, focusableElements) {
activeElement = activeElement || document.activeElement;
if (activeElement) {
activeElement = focusableParent(activeElement);
}
container = container || (activeElement ? getFocusContainer(activeElement, direction) : getDefaultScope());
if (!activeElement) {
autoFocus(container, true, false);
return;
}
var focusableContainer = dom.parentWithClass(activeElement, 'focusable');
var rect = getOffset(activeElement);
// Get elements and work out x/y points
var point1x = parseFloat(rect.left) || 0;
var point1y = parseFloat(rect.top) || 0;
var point2x = parseFloat(point1x + rect.width - 1) || point1x;
var point2y = parseFloat(point1y + rect.height - 1) || point1y;
var sourceMidX = rect.left + (rect.width / 2);
var sourceMidY = rect.top + (rect.height / 2);
var focusable = focusableElements || container.querySelectorAll(focusableQuery);
var maxDistance = Infinity;
var minDistance = maxDistance;
var nearestElement;
for (var i = 0, length = focusable.length; i < length; i++) {
var curr = focusable[i];
if (curr === activeElement) {
continue;
}
// Don't refocus into the same container
if (curr === focusableContainer) {
continue;
}
var elementRect = getOffset(curr);
// not currently visible
if (!elementRect.width && !elementRect.height) {
continue;
}
switch (direction) {
case 0:
// left
if (elementRect.left >= rect.left) {
continue;
}
if (elementRect.right === rect.right) {
continue;
}
break;
case 1:
// right
if (elementRect.right <= rect.right) {
continue;
}
if (elementRect.left === rect.left) {
continue;
}
break;
case 2:
// up
if (elementRect.top >= rect.top) {
continue;
}
if (elementRect.bottom >= rect.bottom) {
continue;
}
break;
case 3:
// down
if (elementRect.bottom <= rect.bottom) {
continue;
}
if (elementRect.top <= rect.top) {
continue;
}
break;
default:
break;
}
var x = elementRect.left;
var y = elementRect.top;
var x2 = x + elementRect.width - 1;
var y2 = y + elementRect.height - 1;
var intersectX = intersects(point1x, point2x, x, x2);
var intersectY = intersects(point1y, point2y, y, y2);
var midX = elementRect.left + (elementRect.width / 2);
var midY = elementRect.top + (elementRect.height / 2);
var distX;
var distY;
switch (direction) {
case 0:
// left
distX = Math.abs(point1x - Math.min(point1x, x2));
distY = intersectY ? 0 : Math.abs(sourceMidY - midY);
break;
case 1:
// right
distX = Math.abs(point2x - Math.max(point2x, x));
distY = intersectY ? 0 : Math.abs(sourceMidY - midY);
break;
case 2:
// up
distY = Math.abs(point1y - Math.min(point1y, y2));
distX = intersectX ? 0 : Math.abs(sourceMidX - midX);
break;
case 3:
// down
distY = Math.abs(point2y - Math.max(point2y, y));
distX = intersectX ? 0 : Math.abs(sourceMidX - midX);
break;
default:
break;
}
var dist = Math.sqrt(distX * distX + distY * distY);
if (dist < minDistance) {
nearestElement = curr;
minDistance = dist;
}
}
if (nearestElement) {
// See if there's a focusable container, and if so, send the focus command to that
if (activeElement) {
var nearestElementFocusableParent = dom.parentWithClass(nearestElement, 'focusable');
if (nearestElementFocusableParent && nearestElementFocusableParent !== nearestElement) {
if (focusableContainer !== nearestElementFocusableParent) {
nearestElement = nearestElementFocusableParent;
}
}
}
focus(nearestElement);
}
}
function intersectsInternal(a1, a2, b1, b2) {
return (b1 >= a1 && b1 <= a2) || (b2 >= a1 && b2 <= a2);
}
function intersects(a1, a2, b1, b2) {
return intersectsInternal(a1, a2, b1, b2) || intersectsInternal(b1, b2, a1, a2);
}
function sendText(text) {
var elem = document.activeElement;
elem.value = text;
}
function focusFirst(container, focusableSelector) {
var elems = container.querySelectorAll(focusableSelector);
for (var i = 0, length = elems.length; i < length; i++) {
var elem = elems[i];
if (isCurrentlyFocusableInternal(elem)) {
focus(elem);
break;
}
}
}
function focusLast(container, focusableSelector) {
var elems = [].slice.call(container.querySelectorAll(focusableSelector), 0).reverse();
for (var i = 0, length = elems.length; i < length; i++) {
var elem = elems[i];
if (isCurrentlyFocusableInternal(elem)) {
focus(elem);
break;
}
}
}
function moveFocus(sourceElement, container, focusableSelector, offset) {
var elems = container.querySelectorAll(focusableSelector);
var list = [];
var i;
var length;
var elem;
for (i = 0, length = elems.length; i < length; i++) {
elem = elems[i];
if (isCurrentlyFocusableInternal(elem)) {
list.push(elem);
}
}
var currentIndex = -1;
for (i = 0, length = list.length; i < length; i++) {
elem = list[i];
if (sourceElement === elem || elem.contains(sourceElement)) {
currentIndex = i;
break;
}
}
if (currentIndex === -1) {
return;
}
var newIndex = currentIndex + offset;
newIndex = Math.max(0, newIndex);
newIndex = Math.min(newIndex, list.length - 1);
var newElem = list[newIndex];
if (newElem) {
focus(newElem);
}
}
return {
autoFocus: autoFocus,
focus: focus,
focusableParent: focusableParent,
getFocusableElements: getFocusableElements,
moveLeft: function (sourceElement, options) {
var container = options ? options.container : null;
var focusableElements = options ? options.focusableElements : null;
nav(sourceElement, 0, container, focusableElements);
},
moveRight: function (sourceElement, options) {
var container = options ? options.container : null;
var focusableElements = options ? options.focusableElements : null;
nav(sourceElement, 1, container, focusableElements);
},
moveUp: function (sourceElement, options) {
var container = options ? options.container : null;
var focusableElements = options ? options.focusableElements : null;
nav(sourceElement, 2, container, focusableElements);
},
moveDown: function (sourceElement, options) {
var container = options ? options.container : null;
var focusableElements = options ? options.focusableElements : null;
nav(sourceElement, 3, container, focusableElements);
},
sendText: sendText,
isCurrentlyFocusable: isCurrentlyFocusable,
pushScope: pushScope,
popScope: popScope,
focusFirst: focusFirst,
focusLast: focusLast,
moveFocus: moveFocus
};
});
| 1 | 17,169 | You can remove this since ES6 modules are strict by default. And thanks for contributing to the Jellyfin Project. | jellyfin-jellyfin-web | js |
@@ -32,7 +32,7 @@ public class TransactionRLPDecoderTest {
private static final String FRONTIER_TX_RLP =
"0xf901fc8032830138808080b901ae60056013565b6101918061001d6000396000f35b3360008190555056006001600060e060020a6000350480630a874df61461003a57806341c0e1b514610058578063a02b161e14610066578063dbbdf0831461007757005b610045600435610149565b80600160a060020a031660005260206000f35b610060610161565b60006000f35b6100716004356100d4565b60006000f35b61008560043560243561008b565b60006000f35b600054600160a060020a031632600160a060020a031614156100ac576100b1565b6100d0565b8060018360005260205260406000208190555081600060005260206000a15b5050565b600054600160a060020a031633600160a060020a031614158015610118575033600160a060020a0316600182600052602052604060002054600160a060020a031614155b61012157610126565b610146565b600060018260005260205260406000208190555080600060005260206000a15b50565b60006001826000526020526040600020549050919050565b600054600160a060020a031633600160a060020a0316146101815761018f565b600054600160a060020a0316ff5b561ca0c5689ed1ad124753d54576dfb4b571465a41900a1dff4058d8adf16f752013d0a01221cbd70ec28c94a3b55ec771bcbc70778d6ee0b51ca7ea9514594c861b1884";
private static final String EIP1559_TX_RLP =
- "0xf902028032830138808080b901ae60056013565b6101918061001d6000396000f35b3360008190555056006001600060e060020a6000350480630a874df61461003a57806341c0e1b514610058578063a02b161e14610066578063dbbdf0831461007757005b610045600435610149565b80600160a060020a031660005260206000f35b610060610161565b60006000f35b6100716004356100d4565b60006000f35b61008560043560243561008b565b60006000f35b600054600160a060020a031632600160a060020a031614156100ac576100b1565b6100d0565b8060018360005260205260406000208190555081600060005260206000a15b5050565b600054600160a060020a031633600160a060020a031614158015610118575033600160a060020a0316600182600052602052604060002054600160a060020a031614155b61012157610126565b610146565b600060018260005260205260406000208190555080600060005260206000a15b50565b60006001826000526020526040600020549050919050565b600054600160a060020a031633600160a060020a0316146101815761018f565b600054600160a060020a0316ff5b5682020f8201711ca0c5689ed1ad124753d54576dfb4b571465a41900a1dff4058d8adf16f752013d0a01221cbd70ec28c94a3b55ec771bcbc70778d6ee0b51ca7ea9514594c861b1884";
+ "0xb902060ff902028032830138808080b901ae60056013565b6101918061001d6000396000f35b3360008190555056006001600060e060020a6000350480630a874df61461003a57806341c0e1b514610058578063a02b161e14610066578063dbbdf0831461007757005b610045600435610149565b80600160a060020a031660005260206000f35b610060610161565b60006000f35b6100716004356100d4565b60006000f35b61008560043560243561008b565b60006000f35b600054600160a060020a031632600160a060020a031614156100ac576100b1565b6100d0565b8060018360005260205260406000208190555081600060005260206000a15b5050565b600054600160a060020a031633600160a060020a031614158015610118575033600160a060020a0316600182600052602052604060002054600160a060020a031614155b61012157610126565b610146565b600060018260005260205260406000208190555080600060005260206000a15b50565b60006001826000526020526040600020549050919050565b600054600160a060020a031633600160a060020a0316146101815761018f565b600054600160a060020a0316ff5b5682020f8201711ca0c5689ed1ad124753d54576dfb4b571465a41900a1dff4058d8adf16f752013d0a01221cbd70ec28c94a3b55ec771bcbc70778d6ee0b51ca7ea9514594c861b1884";
private static final String GOQUORUM_PRIVATE_TX_RLP =
"0xf88d0b808347b7608080b840290a80a37d198ff06abe189b638ff53ac8a8dc51a0aff07609d2aa75342783ae493b3e3c6b564c0eebe49284b05a0726fb33087b9e0231d349ea0c7b5661c8c526a07144db7045a395e608cda6ab051c86cc4fb42e319960b82087f3b26f0cbc3c2da00223ac129b22aec7a6c2ace3c3ef39c5eaaa54070fd82d8ee2140b0e70b1dca9";
| 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.core.encoding;
import static org.assertj.core.api.Assertions.assertThat;
import org.hyperledger.besu.config.GoQuorumOptions;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.rlp.RLPInput;
import org.apache.tuweni.bytes.Bytes;
import org.junit.Test;
public class TransactionRLPDecoderTest {
private static final String FRONTIER_TX_RLP =
"0xf901fc8032830138808080b901ae60056013565b6101918061001d6000396000f35b3360008190555056006001600060e060020a6000350480630a874df61461003a57806341c0e1b514610058578063a02b161e14610066578063dbbdf0831461007757005b610045600435610149565b80600160a060020a031660005260206000f35b610060610161565b60006000f35b6100716004356100d4565b60006000f35b61008560043560243561008b565b60006000f35b600054600160a060020a031632600160a060020a031614156100ac576100b1565b6100d0565b8060018360005260205260406000208190555081600060005260206000a15b5050565b600054600160a060020a031633600160a060020a031614158015610118575033600160a060020a0316600182600052602052604060002054600160a060020a031614155b61012157610126565b610146565b600060018260005260205260406000208190555080600060005260206000a15b50565b60006001826000526020526040600020549050919050565b600054600160a060020a031633600160a060020a0316146101815761018f565b600054600160a060020a0316ff5b561ca0c5689ed1ad124753d54576dfb4b571465a41900a1dff4058d8adf16f752013d0a01221cbd70ec28c94a3b55ec771bcbc70778d6ee0b51ca7ea9514594c861b1884";
private static final String EIP1559_TX_RLP =
"0xf902028032830138808080b901ae60056013565b6101918061001d6000396000f35b3360008190555056006001600060e060020a6000350480630a874df61461003a57806341c0e1b514610058578063a02b161e14610066578063dbbdf0831461007757005b610045600435610149565b80600160a060020a031660005260206000f35b610060610161565b60006000f35b6100716004356100d4565b60006000f35b61008560043560243561008b565b60006000f35b600054600160a060020a031632600160a060020a031614156100ac576100b1565b6100d0565b8060018360005260205260406000208190555081600060005260206000a15b5050565b600054600160a060020a031633600160a060020a031614158015610118575033600160a060020a0316600182600052602052604060002054600160a060020a031614155b61012157610126565b610146565b600060018260005260205260406000208190555080600060005260206000a15b50565b60006001826000526020526040600020549050919050565b600054600160a060020a031633600160a060020a0316146101815761018f565b600054600160a060020a0316ff5b5682020f8201711ca0c5689ed1ad124753d54576dfb4b571465a41900a1dff4058d8adf16f752013d0a01221cbd70ec28c94a3b55ec771bcbc70778d6ee0b51ca7ea9514594c861b1884";
private static final String GOQUORUM_PRIVATE_TX_RLP =
"0xf88d0b808347b7608080b840290a80a37d198ff06abe189b638ff53ac8a8dc51a0aff07609d2aa75342783ae493b3e3c6b564c0eebe49284b05a0726fb33087b9e0231d349ea0c7b5661c8c526a07144db7045a395e608cda6ab051c86cc4fb42e319960b82087f3b26f0cbc3c2da00223ac129b22aec7a6c2ace3c3ef39c5eaaa54070fd82d8ee2140b0e70b1dca9";
@Test
public void decodeGoQuorumPrivateTransactionRlp() {
GoQuorumOptions.goquorumCompatibilityMode = true;
RLPInput input = RLP.input(Bytes.fromHexString(GOQUORUM_PRIVATE_TX_RLP));
final Transaction transaction = TransactionRLPDecoder.decode(input);
assertThat(transaction).isNotNull();
assertThat(transaction.getV()).isEqualTo(38);
assertThat(transaction.getSender())
.isEqualByComparingTo(Address.fromHexString("0xed9d02e382b34818e88b88a309c7fe71e65f419d"));
GoQuorumOptions.goquorumCompatibilityMode =
GoQuorumOptions.GOQUORUM_COMPATIBILITY_MODE_DEFAULT_VALUE;
}
@Test
public void decodeFrontierNominalCase() {
final Transaction transaction =
TransactionRLPDecoder.decode(RLP.input(Bytes.fromHexString(FRONTIER_TX_RLP)));
assertThat(transaction).isNotNull();
assertThat(transaction.getGasPrice()).isEqualByComparingTo(Wei.of(50L));
assertThat(transaction.getGasPremium()).isEmpty();
assertThat(transaction.getFeeCap()).isEmpty();
}
@Test
public void decodeEIP1559NominalCase() {
ExperimentalEIPs.eip1559Enabled = true;
final Transaction transaction =
TransactionRLPDecoder.decode(RLP.input(Bytes.fromHexString(EIP1559_TX_RLP)));
assertThat(transaction).isNotNull();
assertThat(transaction.getGasPremium()).hasValue(Wei.of(527L));
assertThat(transaction.getFeeCap()).hasValue(Wei.of(369L));
ExperimentalEIPs.eip1559Enabled = ExperimentalEIPs.EIP1559_ENABLED_DEFAULT_VALUE;
}
}
| 1 | 24,410 | Why did eip1559 go from a list (0xf9020280.....) to a wrapped string (0xb902060ff9020280...) and not just concatenation (0x0ff9020280...)? implementation detail or is this how it sits on the wire now? | hyperledger-besu | java |
@@ -1334,9 +1334,9 @@ func TestConfigCheckIncludes(t *testing.T) {
if err == nil {
t.Errorf("Expected error processing include files with configuration check enabled: %v", err)
}
- expectedErr := errors.New(`configs/include_bad_conf_check_b.conf:10:19: unknown field "monitoring_port"` + "\n")
- if err != nil && expectedErr != nil && err.Error() != expectedErr.Error() {
- t.Errorf("Expected: \n%q, got\n: %q", expectedErr.Error(), err.Error())
+ expectedErr := `include_bad_conf_check_b.conf:10:19: unknown field "monitoring_port"` + "\n"
+ if err != nil && !strings.HasSuffix(err.Error(), expectedErr) {
+ t.Errorf("Expected: \n%q, got\n: %q", expectedErr, err.Error())
}
}
| 1 | // Copyright 2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"errors"
"fmt"
"os"
"strings"
"testing"
)
func TestConfigCheck(t *testing.T) {
tests := []struct {
// name is the name of the test.
name string
// config is content of the configuration file.
config string
// warningErr is an error that does not prevent server from starting.
warningErr error
// errorLine is the location of the error.
errorLine int
// errorPos is the position of the error.
errorPos int
// warning errors also include a reason optionally.
reason string
// newDefaultErr is a configuration error that includes source of error.
err error
}{
{
name: "when unknown field is used at top level",
config: `
monitor = "127.0.0.1:4442"
`,
err: errors.New(`unknown field "monitor"`),
errorLine: 2,
errorPos: 17,
},
{
name: "when default permissions are used at top level",
config: `
"default_permissions" {
publish = ["_SANDBOX.>"]
subscribe = ["_SANDBOX.>"]
}
`,
err: errors.New(`unknown field "default_permissions"`),
errorLine: 2,
errorPos: 18,
},
{
name: "when authorization config is empty",
config: `
authorization = {
}
`,
err: nil,
},
{
name: "when authorization config has unknown fields",
config: `
authorization = {
foo = "bar"
}
`,
err: errors.New(`unknown field "foo"`),
errorLine: 3,
errorPos: 5,
},
{
name: "when authorization config has unknown fields",
config: `
port = 4222
authorization = {
user = "hello"
foo = "bar"
password = "world"
}
`,
err: errors.New(`unknown field "foo"`),
errorLine: 6,
errorPos: 5,
},
{
name: "when user authorization config has unknown fields",
config: `
authorization = {
users = [
{
user = "foo"
pass = "bar"
token = "quux"
}
]
}
`,
err: errors.New(`unknown field "token"`),
errorLine: 7,
errorPos: 9,
},
{
name: "when user authorization permissions config has unknown fields",
config: `
authorization {
permissions {
subscribe = {}
inboxes = {}
publish = {}
}
}
`,
err: errors.New(`Unknown field "inboxes" parsing permissions`),
errorLine: 5,
errorPos: 7,
},
{
name: "when user authorization permissions config has unknown fields within allow or deny",
config: `
authorization {
permissions {
subscribe = {
allow = ["hello", "world"]
deny = ["foo", "bar"]
denied = "_INBOX.>"
}
publish = {}
}
}
`,
err: errors.New(`Unknown field name "denied" parsing subject permissions, only 'allow' or 'deny' are permitted`),
errorLine: 7,
errorPos: 9,
},
{
name: "when user authorization permissions config has unknown fields within allow or deny",
config: `
authorization {
permissions {
publish = {
allow = ["hello", "world"]
deny = ["foo", "bar"]
allowed = "_INBOX.>"
}
subscribe = {}
}
}
`,
err: errors.New(`Unknown field name "allowed" parsing subject permissions, only 'allow' or 'deny' are permitted`),
errorLine: 7,
errorPos: 9,
},
{
name: "when user authorization permissions config has unknown fields using arrays",
config: `
authorization {
default_permissions {
subscribe = ["a"]
publish = ["b"]
inboxes = ["c"]
}
users = [
{
user = "foo"
pass = "bar"
}
]
}
`,
err: errors.New(`Unknown field "inboxes" parsing permissions`),
errorLine: 7,
errorPos: 6,
},
{
name: "when user authorization permissions config has unknown fields using strings",
config: `
authorization {
default_permissions {
subscribe = "a"
requests = "b"
publish = "c"
}
users = [
{
user = "foo"
pass = "bar"
}
]
}
`,
err: errors.New(`Unknown field "requests" parsing permissions`),
errorLine: 6,
errorPos: 6,
},
{
name: "when user authorization permissions config is empty",
config: `
authorization = {
users = [
{
user = "foo", pass = "bar", permissions = {
}
}
]
}
`,
err: nil,
},
{
name: "when unknown permissions are included in user config",
config: `
authorization = {
users = [
{
user = "foo", pass = "bar", permissions {
inboxes = true
}
}
]
}
`,
err: errors.New(`Unknown field "inboxes" parsing permissions`),
errorLine: 6,
errorPos: 11,
},
{
name: "when clustering config is empty",
config: `
cluster = {
}
`,
err: nil,
},
{
name: "when unknown option is in clustering config",
config: `
# NATS Server Configuration
port = 4222
cluster = {
port = 6222
foo = "bar"
authorization {
user = "hello"
pass = "world"
}
}
`,
err: errors.New(`unknown field "foo"`),
errorLine: 9,
errorPos: 5,
},
{
name: "when unknown option is in clustering authorization config",
config: `
cluster = {
authorization {
foo = "bar"
}
}
`,
err: errors.New(`unknown field "foo"`),
errorLine: 4,
errorPos: 7,
},
{
name: "when unknown option is in tls config",
config: `
tls = {
hello = "world"
}
`,
err: errors.New(`error parsing tls config, unknown field ["hello"]`),
errorLine: 3,
errorPos: 5,
},
{
name: "when unknown option is in cluster tls config",
config: `
cluster {
tls = {
foo = "bar"
}
}
`,
err: errors.New(`error parsing tls config, unknown field ["foo"]`),
errorLine: 4,
errorPos: 7,
},
{
name: "when using cipher suites in the TLS config",
config: `
tls = {
cipher_suites: [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
]
preferences = []
}
`,
err: errors.New(`error parsing tls config, unknown field ["preferences"]`),
errorLine: 7,
errorPos: 7,
},
{
name: "when using curve preferences in the TLS config",
config: `
tls = {
curve_preferences: [
"CurveP256",
"CurveP384",
"CurveP521"
]
suites = []
}
`,
err: errors.New(`error parsing tls config, unknown field ["suites"]`),
errorLine: 8,
errorPos: 7,
},
{
name: "when using curve preferences in the TLS config",
config: `
tls = {
curve_preferences: [
"CurveP5210000"
]
}
`,
err: errors.New(`unrecognized curve preference CurveP5210000`),
errorLine: 4,
errorPos: 5,
},
{
name: "when unknown option is in cluster config with defined routes",
config: `
cluster {
port = 6222
routes = [
nats://127.0.0.1:6222
]
peers = []
}
`,
err: errors.New(`unknown field "peers"`),
errorLine: 7,
errorPos: 5,
},
{
name: "when used as variable in authorization block it should not be considered as unknown field",
config: `
# listen: 127.0.0.1:-1
listen: 127.0.0.1:4222
authorization {
# Superuser can do anything.
super_user = {
publish = ">"
subscribe = ">"
}
# Can do requests on foo or bar, and subscribe to anything
# that is a response to an _INBOX.
#
# Notice that authorization filters can be singletons or arrays.
req_pub_user = {
publish = ["req.foo", "req.bar"]
subscribe = "_INBOX.>"
}
# Setup a default user that can subscribe to anything, but has
# no publish capabilities.
default_user = {
subscribe = "PUBLIC.>"
}
unused = "hello"
# Default permissions if none presented. e.g. susan below.
default_permissions: $default_user
# Users listed with persmissions.
users = [
{user: alice, password: foo, permissions: $super_user}
{user: bob, password: bar, permissions: $req_pub_user}
{user: susan, password: baz}
]
}
`,
err: errors.New(`unknown field "unused"`),
errorLine: 27,
errorPos: 5,
},
{
name: "when used as variable in top level config it should not be considered as unknown field",
config: `
monitoring_port = 8222
http_port = $monitoring_port
port = 4222
`,
err: nil,
},
{
name: "when used as variable in cluster config it should not be considered as unknown field",
config: `
cluster {
clustering_port = 6222
port = $clustering_port
}
`,
err: nil,
},
{
name: "when setting permissions within cluster authorization block",
config: `
cluster {
authorization {
permissions = {
publish = { allow = ["foo", "bar"] }
}
}
permissions = {
publish = { deny = ["foo", "bar"] }
}
}
`,
warningErr: errors.New(`invalid use of field "authorization"`),
errorLine: 3,
errorPos: 5,
reason: `setting "permissions" within cluster authorization block is deprecated`,
},
{
name: "when write deadline is used with deprecated usage",
config: `
write_deadline = 100
`,
warningErr: errors.New(`invalid use of field "write_deadline"`),
errorLine: 2,
errorPos: 17,
reason: `write_deadline should be converted to a duration`,
},
/////////////////////
// ACCOUNTS //
/////////////////////
{
name: "when accounts block is correctly configured",
config: `
http_port = 8222
accounts {
#
# synadia > nats.io, cncf
#
synadia {
# SAADJL5XAEM6BDYSWDTGVILJVY54CQXZM5ZLG4FRUAKB62HWRTPNSGXOHA
nkey = "AC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"
users [
{
# SUAEL6RU3BSDAFKOHNTEOK5Q6FTM5FTAMWVIKBET6FHPO4JRII3CYELVNM
nkey = "UCARKS2E3KVB7YORL2DG34XLT7PUCOL2SVM7YXV6ETHLW6Z46UUJ2VZ3"
}
]
exports = [
{ service: "synadia.requests", accounts: [nats, cncf] }
]
}
#
# nats < synadia
#
nats {
# SUAJTM55JH4BNYDA22DMDZJSRBRKVDGSLYK2HDIOCM3LPWCDXIDV5Q4CIE
nkey = "ADRZ42QBM7SXQDXXTSVWT2WLLFYOQGAFC4TO6WOAXHEKQHIXR4HFYJDS"
users [
{
# SUADZTYQAKTY5NQM7XRB5XR3C24M6ROGZLBZ6P5HJJSSOFUGC5YXOOECOM
nkey = "UD6AYQSOIN2IN5OGC6VQZCR4H3UFMIOXSW6NNS6N53CLJA4PB56CEJJI"
}
]
imports = [
# This account has to send requests to 'nats.requests' subject
{ service: { account: "synadia", subject: "synadia.requests" }, to: "nats.requests" }
]
}
#
# cncf < synadia
#
cncf {
# SAAFHDZX7SGZ2SWHPS22JRPPK5WX44NPLNXQHR5C5RIF6QRI3U65VFY6C4
nkey = "AD4YRVUJF2KASKPGRMNXTYKIYSCB3IHHB4Y2ME6B2PDIV5QJ23C2ZRIT"
users [
{
# SUAKINP3Z2BPUXWOFSW2FZC7TFJCMMU7DHKP2C62IJQUDASOCDSTDTRMJQ
nkey = "UB57IEMPG4KOTPFV5A66QKE2HZ3XBXFHVRCCVMJEWKECMVN2HSH3VTSJ"
}
]
imports = [
# This account has to send requests to 'synadia.requests' subject
{ service: { account: "synadia", subject: "synadia.requests" } }
]
}
}
`,
err: nil,
},
{
name: "when nkey is invalid within accounts block",
config: `
accounts {
#
# synadia > nats.io, cncf
#
synadia {
# SAADJL5XAEM6BDYSWDTGVILJVY54CQXZM5ZLG4FRUAKB62HWRTPNSGXOHA
nkey = "AC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"
users [
{
# SUAEL6RU3BSDAFKOHNTEOK5Q6FTM5FTAMWVIKBET6FHPO4JRII3CYELVNM
nkey = "SCARKS2E3KVB7YORL2DG34XLT7PUCOL2SVM7YXV6ETHLW6Z46UUJ2VZ3"
}
]
exports = [
{ service: "synadia.requests", accounts: [nats, cncf] }
]
}
#
# nats < synadia
#
nats {
# SUAJTM55JH4BNYDA22DMDZJSRBRKVDGSLYK2HDIOCM3LPWCDXIDV5Q4CIE
nkey = "ADRZ42QBM7SXQDXXTSVWT2WLLFYOQGAFC4TO6WOAXHEKQHIXR4HFYJDS"
users [
{
# SUADZTYQAKTY5NQM7XRB5XR3C24M6ROGZLBZ6P5HJJSSOFUGC5YXOOECOM
nkey = "UD6AYQSOIN2IN5OGC6VQZCR4H3UFMIOXSW6NNS6N53CLJA4PB56CEJJI"
}
]
imports = [
# This account has to send requests to 'nats.requests' subject
{ service: { account: "synadia", subject: "synadia.requests" }, to: "nats.requests" }
]
}
#
# cncf < synadia
#
cncf {
# SAAFHDZX7SGZ2SWHPS22JRPPK5WX44NPLNXQHR5C5RIF6QRI3U65VFY6C4
nkey = "AD4YRVUJF2KASKPGRMNXTYKIYSCB3IHHB4Y2ME6B2PDIV5QJ23C2ZRIT"
users [
{
# SUAKINP3Z2BPUXWOFSW2FZC7TFJCMMU7DHKP2C62IJQUDASOCDSTDTRMJQ
nkey = "UB57IEMPG4KOTPFV5A66QKE2HZ3XBXFHVRCCVMJEWKECMVN2HSH3VTSJ"
}
]
imports = [
# This account has to send requests to 'synadia.requests' subject
{ service: { account: "synadia", subject: "synadia.requests" } }
]
}
}
`,
err: errors.New(`Not a valid public nkey for a user`),
errorLine: 14,
errorPos: 11,
},
{
name: "when accounts block has unknown fields",
config: `
http_port = 8222
accounts {
foo = "bar"
}`,
err: errors.New(`Expected map entries for accounts`),
errorLine: 5,
errorPos: 19,
},
{
name: "when accounts has a referenced config variable within same block",
config: `
accounts {
PERMISSIONS = {
publish = {
allow = ["foo","bar"]
deny = ["quux"]
}
}
synadia {
nkey = "AC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"
users [
{
nkey = "UCARKS2E3KVB7YORL2DG34XLT7PUCOL2SVM7YXV6ETHLW6Z46UUJ2VZ3"
permissions = $PERMISSIONS
}
]
exports = [
{ stream: "synadia.>" }
]
}
}`,
err: nil,
},
{
name: "when accounts has an unreferenced config variables within same block",
config: `
accounts {
PERMISSIONS = {
publish = {
allow = ["foo","bar"]
deny = ["quux"]
}
}
synadia {
nkey = "AC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"
users [
{
nkey = "UCARKS2E3KVB7YORL2DG34XLT7PUCOL2SVM7YXV6ETHLW6Z46UUJ2VZ3"
}
]
exports = [
{ stream: "synadia.>" }
]
}
}`,
err: errors.New(`unknown field "publish"`),
errorLine: 4,
errorPos: 5,
},
{
name: "when accounts block defines a global account",
config: `
http_port = 8222
accounts {
$G = {
}
}
`,
err: errors.New(`"$G" is a Reserved Account`),
errorLine: 5,
errorPos: 19,
},
{
name: "when accounts block uses an invalid public key",
config: `
accounts {
synadia = {
nkey = "invalid"
}
}
`,
err: errors.New(`Not a valid public nkey for an account: "invalid"`),
errorLine: 4,
errorPos: 21,
},
{
name: "when accounts list includes reserved account",
config: `
port = 4222
accounts = [foo, bar, "$G"]
http_port = 8222
`,
err: errors.New(`"$G" is a Reserved Account`),
errorLine: 4,
errorPos: 26,
},
{
name: "when accounts list includes a dupe entry",
config: `
port = 4222
accounts = [foo, bar, bar]
http_port = 8222
`,
err: errors.New(`Duplicate Account Entry: bar`),
errorLine: 4,
errorPos: 25,
},
{
name: "when accounts block includes a dupe user",
config: `
port = 4222
accounts = {
nats {
users = [
{ user: "foo", pass: "bar" },
{ user: "hello", pass: "world" },
{ user: "foo", pass: "bar" }
]
}
}
http_port = 8222
`,
err: errors.New(`Duplicate user "foo" detected`),
errorLine: 6,
errorPos: 21,
},
{
name: "when accounts block imports are not a list",
config: `
port = 4222
accounts = {
nats {
imports = true
}
}
http_port = 8222
`,
err: errors.New(`Imports should be an array, got bool`),
errorLine: 6,
errorPos: 21,
},
{
name: "when accounts block exports are not a list",
config: `
port = 4222
accounts = {
nats {
exports = true
}
}
http_port = 8222
`,
err: errors.New(`Exports should be an array, got bool`),
errorLine: 6,
errorPos: 21,
},
{
name: "when accounts block imports items are not a map",
config: `
port = 4222
accounts = {
nats {
imports = [
false
]
}
}
http_port = 8222
`,
err: errors.New(`Import Items should be a map with type entry, got bool`),
errorLine: 7,
errorPos: 23,
},
{
name: "when accounts block export items are not a map",
config: `
port = 4222
accounts = {
nats {
exports = [
false
]
}
}
http_port = 8222
`,
err: errors.New(`Export Items should be a map with type entry, got bool`),
errorLine: 7,
errorPos: 23,
},
{
name: "when accounts exports has a stream name that is not a string",
config: `
port = 4222
accounts = {
nats {
exports = [
{
stream: false
}
]
}
}
http_port = 8222
`,
err: errors.New(`Expected stream name to be string, got bool`),
errorLine: 8,
errorPos: 25,
},
{
name: "when accounts exports has a service name that is not a string",
config: `
accounts = {
nats {
exports = [
{
service: false
}
]
}
}
`,
err: errors.New(`Expected service name to be string, got bool`),
errorLine: 6,
errorPos: 25,
},
{
name: "when accounts imports stream without name",
config: `
port = 4222
accounts = {
nats {
imports = [
{ stream: { }}
]
}
}
http_port = 8222
`,
err: errors.New(`Expect an account name and a subject`),
errorLine: 7,
errorPos: 25,
},
{
name: "when accounts imports service without name",
config: `
port = 4222
accounts = {
nats {
imports = [
{ service: { }}
]
}
}
http_port = 8222
`,
err: errors.New(`Expect an account name and a subject`),
errorLine: 7,
errorPos: 25,
},
{
name: "when user authorization config has both token and users",
config: `
authorization = {
token = "s3cr3t"
users = [
{
user = "foo"
pass = "bar"
}
]
}
`,
err: errors.New(`Can not have a token and a users array`),
errorLine: 2,
errorPos: 3,
},
{
name: "when user authorization config has both token and user",
config: `
authorization = {
user = "foo"
pass = "bar"
users = [
{
user = "foo"
pass = "bar"
}
]
}
`,
err: errors.New(`Can not have a single user/pass and a users array`),
errorLine: 2,
errorPos: 3,
},
{
name: "when user authorization config has users not as a list",
config: `
authorization = {
users = false
}
`,
err: errors.New(`Expected users field to be an array, got false`),
errorLine: 3,
errorPos: 5,
},
{
name: "when user authorization config has users not as a map",
config: `
authorization = {
users = [false]
}
`,
err: errors.New(`Expected user entry to be a map/struct, got false`),
errorLine: 3,
errorPos: 14,
},
{
name: "when user authorization config has permissions not as a map",
config: `
authorization = {
users = [{user: hello, pass: world}]
permissions = false
}
`,
err: errors.New(`Expected permissions to be a map/struct, got false`),
errorLine: 4,
errorPos: 19,
},
{
name: "when user authorization permissions config has invalid fields within allow",
config: `
authorization {
permissions {
publish = {
allow = [false, "hello", "world"]
deny = ["foo", "bar"]
}
subscribe = {}
}
}
`,
err: errors.New(`Subject in permissions array cannot be cast to string`),
errorLine: 5,
errorPos: 18,
},
{
name: "when user authorization permissions config has invalid fields within deny",
config: `
authorization {
permissions {
publish = {
allow = ["hello", "world"]
deny = [true, "foo", "bar"]
}
subscribe = {}
}
}
`,
err: errors.New(`Subject in permissions array cannot be cast to string`),
errorLine: 6,
errorPos: 17,
},
{
name: "when user authorization permissions config has invalid type",
config: `
authorization {
permissions {
publish = {
allow = false
}
subscribe = {}
}
}
`,
err: errors.New(`Expected subject permissions to be a subject, or array of subjects, got bool`),
errorLine: 5,
errorPos: 9,
},
{
name: "when user authorization permissions subject is invalid",
config: `
authorization {
permissions {
publish = {
allow = ["foo..bar"]
}
subscribe = {}
}
}
`,
err: errors.New(`subject "foo..bar" is not a valid subject`),
errorLine: 5,
errorPos: 9,
},
{
name: "when cluster config listen is invalid",
config: `
cluster {
listen = "0.0.0.0:XXXX"
}
`,
err: errors.New(`could not parse port "XXXX"`),
errorLine: 3,
errorPos: 5,
},
{
name: "when cluster config includes multiple users",
config: `
cluster {
authorization {
users = []
}
}
`,
err: errors.New(`Cluster authorization does not allow multiple users`),
errorLine: 3,
errorPos: 5,
},
{
name: "when cluster routes are invalid",
config: `
cluster {
routes = [
"0.0.0.0:XXXX"
# "0.0.0.0:YYYY"
# "0.0.0.0:ZZZZ"
]
}
`,
err: errors.New(`error parsing route url ["0.0.0.0:XXXX"]`),
errorLine: 4,
errorPos: 22,
},
{
name: "when setting invalid TLS config within cluster block",
config: `
cluster {
tls {
}
}
`,
err: nil,
errorLine: 0,
errorPos: 0,
},
{
name: "invalid lame_duck_duration type",
config: `
lame_duck_duration: abc
`,
err: errors.New(`error parsing lame_duck_duration: time: invalid duration abc`),
errorLine: 2,
errorPos: 3,
},
{
name: "when only setting TLS timeout for a leafnode remote",
config: `
leafnodes {
remotes = [
{
url: "tls://connect.ngs.global:7422"
tls {
timeout: 0.01
}
}
]
}`,
err: nil,
errorLine: 0,
errorPos: 0,
},
{
name: "when setting latency tracking without a system account",
config: `
accounts {
sys { users = [ {user: sys, pass: "" } ] }
nats.io: {
users = [ { user : bar, pass: "" } ]
exports = [
{ service: "nats.add"
response: singleton
latency: {
sampling: 100%
subject: "latency.tracking.add"
}
}
]
}
}
`,
err: errors.New(`Error adding service latency sampling for "nats.add": system account not setup`),
errorLine: 2,
errorPos: 17,
},
{
name: "when setting latency tracking with a system account",
config: `
system_account: sys
accounts {
sys { users = [ {user: sys, pass: "" } ] }
nats.io: {
users = [ { user : bar, pass: "" } ]
exports = [
{ service: "nats.add"
response: singleton
latency: {
sampling: 100%
subject: "latency.tracking.add"
}
}
]
}
}
`,
err: nil,
errorLine: 0,
errorPos: 0,
},
{
name: "when setting latency tracking with an invalid publish subject",
config: `
system_account = sys
accounts {
sys { users = [ {user: sys, pass: "" } ] }
nats.io: {
users = [ { user : bar, pass: "" } ]
exports = [
{ service: "nats.add"
response: singleton
latency: "*"
}
]
}
}
`,
err: errors.New(`Error adding service latency sampling for "nats.add" on subject "*": invalid publish subject`),
errorLine: 3,
errorPos: 17,
},
{
name: "when setting latency tracking on a stream",
config: `
system_account = sys
accounts {
sys { users = [ {user: sys, pass: "" } ] }
nats.io: {
users = [ { user : bar, pass: "" } ]
exports = [
{ stream: "nats.add"
latency: "foo"
}
]
}
}
`,
err: errors.New(`Detected latency directive on non-service`),
errorLine: 11,
errorPos: 25,
},
{
name: "when using duplicate service import subject",
config: `
accounts {
A: {
users = [ {user: user1, pass: ""} ]
exports = [
{service: "remote1"}
{service: "remote2"}
]
}
B: {
users = [ {user: user2, pass: ""} ]
imports = [
{service: {account: "A", subject: "remote1"}, to: "local"}
{service: {account: "A", subject: "remote2"}, to: "local"}
]
}
}
`,
err: errors.New(`Duplicate service import subject "local", previously used in import for account "A", subject "remote1"`),
errorLine: 14,
errorPos: 71,
},
{
name: "mixing single and multi users in leafnode authorization",
config: `
leafnodes {
authorization {
user: user1
password: pwd
users = [{user: user2, password: pwd}]
}
}
`,
err: errors.New("can not have a single user/pass and a users array"),
errorLine: 3,
errorPos: 20,
},
{
name: "duplicate usernames in leafnode authorization",
config: `
leafnodes {
authorization {
users = [
{user: user, password: pwd}
{user: user, password: pwd}
]
}
}
`,
err: errors.New(`duplicate user "user" detected in leafnode authorization`),
errorLine: 3,
errorPos: 20,
},
}
checkConfig := func(config string) error {
opts := &Options{
CheckConfig: true,
}
return opts.ProcessConfigFile(config)
}
checkErr := func(t *testing.T, err, expectedErr error) {
t.Helper()
switch {
case err == nil && expectedErr == nil:
// OK
case err != nil && expectedErr == nil:
t.Errorf("Unexpected error after processing config: %s", err)
case err == nil && expectedErr != nil:
t.Errorf("Expected %q error after processing invalid config but got nothing", expectedErr)
}
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
conf := createConfFile(t, []byte(test.config))
defer os.Remove(conf)
err := checkConfig(conf)
var expectedErr error
// Check for either warnings or errors.
if test.err != nil {
expectedErr = test.err
} else if test.warningErr != nil {
expectedErr = test.warningErr
}
if err != nil && expectedErr != nil {
msg := fmt.Sprintf("%s:%d:%d: %s", conf, test.errorLine, test.errorPos, expectedErr.Error())
if test.reason != "" {
msg += ": " + test.reason
}
msg += "\n"
if err.Error() != msg {
t.Errorf("Expected:\n%q\ngot:\n%q", msg, err.Error())
}
}
checkErr(t, err, expectedErr)
})
}
}
func TestConfigCheckIncludes(t *testing.T) {
// Check happy path first.
opts := &Options{
CheckConfig: true,
}
err := opts.ProcessConfigFile("./configs/include_conf_check_a.conf")
if err != nil {
t.Errorf("Unexpected error processing include files with configuration check enabled: %v", err)
}
opts = &Options{
CheckConfig: true,
}
err = opts.ProcessConfigFile("./configs/include_bad_conf_check_a.conf")
if err == nil {
t.Errorf("Expected error processing include files with configuration check enabled: %v", err)
}
expectedErr := errors.New(`configs/include_bad_conf_check_b.conf:10:19: unknown field "monitoring_port"` + "\n")
if err != nil && expectedErr != nil && err.Error() != expectedErr.Error() {
t.Errorf("Expected: \n%q, got\n: %q", expectedErr.Error(), err.Error())
}
}
func TestConfigCheckMultipleErrors(t *testing.T) {
opts := &Options{
CheckConfig: true,
}
err := opts.ProcessConfigFile("./configs/multiple_errors.conf")
if err == nil {
t.Errorf("Expected error processing config files with multiple errors check enabled: %v", err)
}
cerr, ok := err.(*processConfigErr)
if !ok {
t.Fatalf("Expected a configuration process error")
}
got := len(cerr.Warnings())
expected := 1
if got != expected {
t.Errorf("Expected a %d warning, got: %d", expected, got)
}
got = len(cerr.Errors())
expected = 7
if got != 7 {
t.Errorf("Expected a %d errors, got: %d", expected, got)
}
errMsg := err.Error()
errs := []string{
`./configs/multiple_errors.conf:12:1: invalid use of field "write_deadline": write_deadline should be converted to a duration`,
`./configs/multiple_errors.conf:2:1: Cannot have a user/pass and token`,
`./configs/multiple_errors.conf:10:1: unknown field "monitoring"`,
`./configs/multiple_errors.conf:67:3: Cluster authorization does not allow multiple users`,
`./configs/multiple_errors.conf:21:5: Not a valid public nkey for an account: "OC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"`,
`./configs/multiple_errors.conf:26:9: Not a valid public nkey for a user`,
`./configs/multiple_errors.conf:36:5: Not a valid public nkey for an account: "ODRZ42QBM7SXQDXXTSVWT2WLLFYOQGAFC4TO6WOAXHEKQHIXR4HFYJDS"`,
`./configs/multiple_errors.conf:41:9: Not a valid public nkey for a user`,
}
for _, msg := range errs {
found := strings.Contains(errMsg, msg)
if !found {
t.Errorf("Expected to find error %q", msg)
}
}
}
| 1 | 9,682 | @wallyqs Since on Windows it would be `\` instead of `/` I just look at the suffix past `configs/`. Let me know if that's ok or not. | nats-io-nats-server | go |
@@ -1385,5 +1385,12 @@ func (a *WebAPI) GetInsightData(ctx context.Context, req *webservice.GetInsightD
return &webservice.GetInsightDataResponse{
UpdatedAt: updateAt,
DataPoints: idp,
+ Type: model.InsightResultType_MATRIX,
+ Matrix: []*model.InsightSampleStream{
+ {
+ Labels: nil,
+ DataPoints: idp,
+ },
+ },
}, nil
} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcapi
import (
"context"
"encoding/base64"
"errors"
"fmt"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore"
"github.com/pipe-cd/pipe/pkg/app/api/commandstore"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/app/api/stagelogstore"
"github.com/pipe-cd/pipe/pkg/cache"
"github.com/pipe-cd/pipe/pkg/cache/memorycache"
"github.com/pipe-cd/pipe/pkg/cache/rediscache"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/crypto"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/git"
"github.com/pipe-cd/pipe/pkg/insight/insightstore"
"github.com/pipe-cd/pipe/pkg/model"
"github.com/pipe-cd/pipe/pkg/redis"
"github.com/pipe-cd/pipe/pkg/rpc/rpcauth"
)
type encrypter interface {
Encrypt(text string) (string, error)
}
// WebAPI implements the behaviors for the gRPC definitions of WebAPI.
type WebAPI struct {
applicationStore datastore.ApplicationStore
environmentStore datastore.EnvironmentStore
deploymentStore datastore.DeploymentStore
pipedStore datastore.PipedStore
projectStore datastore.ProjectStore
apiKeyStore datastore.APIKeyStore
stageLogStore stagelogstore.Store
applicationLiveStateStore applicationlivestatestore.Store
insightstore insightstore.Store
commandStore commandstore.Store
encrypter encrypter
appProjectCache cache.Cache
deploymentProjectCache cache.Cache
pipedProjectCache cache.Cache
insightCache cache.Cache
projectsInConfig map[string]config.ControlPlaneProject
logger *zap.Logger
}
// NewWebAPI creates a new WebAPI instance.
func NewWebAPI(
ctx context.Context,
ds datastore.DataStore,
sls stagelogstore.Store,
alss applicationlivestatestore.Store,
cmds commandstore.Store,
is insightstore.Store,
rd redis.Redis,
projs map[string]config.ControlPlaneProject,
encrypter encrypter,
logger *zap.Logger) *WebAPI {
a := &WebAPI{
applicationStore: datastore.NewApplicationStore(ds),
environmentStore: datastore.NewEnvironmentStore(ds),
deploymentStore: datastore.NewDeploymentStore(ds),
pipedStore: datastore.NewPipedStore(ds),
projectStore: datastore.NewProjectStore(ds),
apiKeyStore: datastore.NewAPIKeyStore(ds),
stageLogStore: sls,
insightstore: is,
applicationLiveStateStore: alss,
commandStore: cmds,
projectsInConfig: projs,
encrypter: encrypter,
appProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
deploymentProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
pipedProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
insightCache: rediscache.NewTTLCache(rd, 3*time.Hour),
logger: logger.Named("web-api"),
}
return a
}
// Register registers all handling of this service into the specified gRPC server.
func (a *WebAPI) Register(server *grpc.Server) {
webservice.RegisterWebServiceServer(server, a)
}
func (a *WebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
env := model.Environment{
Id: uuid.New().String(),
Name: req.Name,
Desc: req.Desc,
ProjectId: claims.Role.ProjectId,
}
err = a.environmentStore.AddEnvironment(ctx, &env)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The environment already exists")
}
if err != nil {
a.logger.Error("failed to create environment", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create environment")
}
return &webservice.AddEnvironmentResponse{}, nil
}
func (a *WebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *WebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
},
}
envs, err := a.environmentStore.ListEnvironments(ctx, opts)
if err != nil {
a.logger.Error("failed to get environments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get environments")
}
return &webservice.ListEnvironmentsResponse{
Environments: envs,
}, nil
}
func (a *WebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
piped := model.Piped{
Id: uuid.New().String(),
Name: req.Name,
Desc: req.Desc,
ProjectId: claims.Role.ProjectId,
EnvIds: req.EnvIds,
Status: model.Piped_OFFLINE,
}
piped.AddKey(keyHash, claims.Subject, time.Now())
err = a.pipedStore.AddPiped(ctx, &piped)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The piped already exists")
}
if err != nil {
a.logger.Error("failed to register piped", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to register piped")
}
return &webservice.RegisterPipedResponse{
Id: piped.Id,
Key: key,
}, nil
}
func (a *WebAPI) UpdatePiped(ctx context.Context, req *webservice.UpdatePipedRequest) (*webservice.UpdatePipedResponse, error) {
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.UpdatePiped(ctx, req.PipedId, func(p *model.Piped) error {
p.Name = req.Name
p.Desc = req.Desc
p.EnvIds = req.EnvIds
return nil
})
}
if err := a.updatePiped(ctx, req.PipedId, updater); err != nil {
return nil, err
}
return &webservice.UpdatePipedResponse{}, nil
}
func (a *WebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.AddKey(ctx, pipedID, keyHash, claims.Subject, time.Now())
}
if err := a.updatePiped(ctx, req.Id, updater); err != nil {
return nil, err
}
return &webservice.RecreatePipedKeyResponse{
Key: key,
}, nil
}
func (a *WebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.EnablePiped); err != nil {
return nil, err
}
return &webservice.EnablePipedResponse{}, nil
}
func (a *WebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.DisablePiped); err != nil {
return nil, err
}
return &webservice.DisablePipedResponse{}, nil
}
func (a *WebAPI) updatePiped(ctx context.Context, pipedID string, updater func(context.Context, string) error) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validatePipedBelongsToProject(ctx, pipedID, claims.Role.ProjectId); err != nil {
return err
}
if err := updater(ctx, pipedID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.InvalidArgument, "The piped is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the piped",
zap.String("piped-id", pipedID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the piped ")
}
}
return nil
}
// TODO: Consider using piped-stats to decide piped connection status.
func (a *WebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
},
}
if req.Options != nil {
if req.Options.Enabled != nil {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "Disabled",
Operator: "==",
Value: !req.Options.Enabled.GetValue(),
})
}
}
pipeds, err := a.pipedStore.ListPipeds(ctx, opts)
if err != nil {
a.logger.Error("failed to get pipeds", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get pipeds")
}
// Redact all sensitive data inside piped message before sending to the client.
for i := range pipeds {
pipeds[i].RedactSensitiveData()
}
return &webservice.ListPipedsResponse{
Pipeds: pipeds,
}, nil
}
func (a *WebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
// Redact all sensitive data inside piped message before sending to the client.
piped.RedactSensitiveData()
return &webservice.GetPipedResponse{
Piped: piped,
}, nil
}
// validatePipedBelongsToProject checks if the given piped belongs to the given project.
// It gives back error unless the piped belongs to the project.
func (a *WebAPI) validatePipedBelongsToProject(ctx context.Context, pipedID, projectID string) error {
pid, err := a.pipedProjectCache.Get(pipedID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
}
return nil
}
piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger)
if err != nil {
return err
}
a.pipedProjectCache.Put(pipedID, piped.ProjectId)
if piped.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
}
return nil
}
// TODO: Validate the specified piped to ensure that it belongs to the specified environment.
func (a *WebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if piped.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested piped does not belong to your project")
}
gitpath, err := makeGitPath(
req.GitPath.Repo.Id,
req.GitPath.Path,
req.GitPath.ConfigFilename,
piped,
a.logger,
)
if err != nil {
return nil, err
}
app := model.Application{
Id: uuid.New().String(),
Name: req.Name,
EnvId: req.EnvId,
PipedId: req.PipedId,
ProjectId: claims.Role.ProjectId,
GitPath: gitpath,
Kind: req.Kind,
CloudProvider: req.CloudProvider,
}
err = a.applicationStore.AddApplication(ctx, &app)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The application already exists")
}
if err != nil {
a.logger.Error("failed to create application", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create application")
}
return &webservice.AddApplicationResponse{
ApplicationId: app.Id,
}, nil
}
func (a *WebAPI) UpdateApplication(ctx context.Context, req *webservice.UpdateApplicationRequest) (*webservice.UpdateApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if piped.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested piped does not belong to your project")
}
gitpath, err := makeGitPath(
req.GitPath.Repo.Id,
req.GitPath.Path,
req.GitPath.ConfigFilename,
piped,
a.logger,
)
if err != nil {
return nil, err
}
err = a.applicationStore.UpdateApplication(ctx, req.ApplicationId, func(app *model.Application) error {
app.Name = req.Name
app.EnvId = req.EnvId
app.PipedId = req.PipedId
app.GitPath = gitpath
app.Kind = req.Kind
app.CloudProvider = req.CloudProvider
return nil
})
if err != nil {
a.logger.Error("failed to update application", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update application")
}
return &webservice.UpdateApplicationResponse{}, nil
}
func (a *WebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, true); err != nil {
return nil, err
}
return &webservice.EnableApplicationResponse{}, nil
}
func (a *WebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, false); err != nil {
return nil, err
}
return &webservice.DisableApplicationResponse{}, nil
}
func (a *WebAPI) DeleteApplication(ctx context.Context, req *webservice.DeleteApplicationRequest) (*webservice.DeleteApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
if err := a.applicationStore.DeleteApplication(ctx, req.ApplicationId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.NotFound, "The application is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value to delete")
default:
a.logger.Error("failed to delete the application",
zap.String("application-id", req.ApplicationId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to delete the application")
}
}
return &webservice.DeleteApplicationResponse{}, nil
}
func (a *WebAPI) updateApplicationEnable(ctx context.Context, appID string, enable bool) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validateAppBelongsToProject(ctx, appID, claims.Role.ProjectId); err != nil {
return err
}
var updater func(context.Context, string) error
if enable {
updater = a.applicationStore.EnableApplication
} else {
updater = a.applicationStore.DisableApplication
}
if err := updater(ctx, appID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.InvalidArgument, "The application is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the application",
zap.String("application-id", appID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the application")
}
}
return nil
}
func (a *WebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
}
if o := req.Options; o != nil {
if o.Enabled != nil {
filters = append(filters, datastore.ListFilter{
Field: "Disabled",
Operator: "==",
Value: !o.Enabled.GetValue(),
})
}
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: "==",
Value: o.Kinds[0],
})
}
if len(o.SyncStatuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "SyncState.Status",
Operator: "==",
Value: o.SyncStatuses[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: "==",
Value: o.EnvIds[0],
})
}
}
apps, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{
Filters: filters,
Orders: orders,
})
if err != nil {
a.logger.Error("failed to get applications", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get applications")
}
return &webservice.ListApplicationsResponse{
Applications: apps,
}, nil
}
func (a *WebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != app.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project")
}
cmd := model.Command{
Id: uuid.New().String(),
PipedId: app.PipedId,
ApplicationId: app.Id,
Type: model.Command_SYNC_APPLICATION,
Commander: claims.Subject,
SyncApplication: &model.Command_SyncApplication{
ApplicationId: app.Id,
SyncStrategy: req.SyncStrategy,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.SyncApplicationResponse{
CommandId: cmd.Id,
}, nil
}
func (a *WebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if app.ProjectId != claims.Role.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested application does not belong to your project")
}
return &webservice.GetApplicationResponse{
Application: app,
}, nil
}
func (a *WebAPI) GenerateApplicationSealedSecret(ctx context.Context, req *webservice.GenerateApplicationSealedSecretRequest) (*webservice.GenerateApplicationSealedSecretResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
sse := piped.SealedSecretEncryption
if sse == nil {
return nil, status.Error(codes.FailedPrecondition, "The piped does not contain the encryption configuration")
}
data := req.Data
if req.Base64Encoding {
data = base64.StdEncoding.EncodeToString([]byte(data))
}
var enc encrypter
switch model.SealedSecretManagementType(sse.Type) {
case model.SealedSecretManagementSealingKey:
if sse.PublicKey == "" {
return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a public key")
}
enc, err = crypto.NewHybridEncrypter(sse.PublicKey)
if err != nil {
a.logger.Error("failed to initialize the crypter", zap.Error(err))
return nil, status.Error(codes.FailedPrecondition, "Failed to initialize the encrypter")
}
default:
return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a valid encryption type")
}
encryptedText, err := enc.Encrypt(data)
if err != nil {
a.logger.Error("failed to encrypt the secret", zap.Error(err))
return nil, status.Error(codes.FailedPrecondition, "Failed to encrypt the secret")
}
return &webservice.GenerateApplicationSealedSecretResponse{
Data: encryptedText,
}, nil
}
// validateAppBelongsToProject checks if the given application belongs to the given project.
// It gives back error unless the application belongs to the project.
func (a *WebAPI) validateAppBelongsToProject(ctx context.Context, appID, projectID string) error {
pid, err := a.appProjectCache.Get(appID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in")
}
return nil
}
app, err := getApplication(ctx, a.applicationStore, appID, a.logger)
if err != nil {
return err
}
a.appProjectCache.Put(appID, app.ProjectId)
if app.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
}
if o := req.Options; o != nil {
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Statuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Status",
Operator: "==",
Value: o.Statuses[0],
})
}
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: "==",
Value: o.Kinds[0],
})
}
if len(o.ApplicationIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "ApplicationId",
Operator: "==",
Value: o.ApplicationIds[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: "==",
Value: o.EnvIds[0],
})
}
if o.MaxUpdatedAt != 0 {
filters = append(filters, datastore.ListFilter{
Field: "UpdatedAt",
Operator: "<=",
Value: o.MaxUpdatedAt,
})
}
}
deployments, err := a.deploymentStore.ListDeployments(ctx, datastore.ListOptions{
Filters: filters,
Orders: orders,
PageSize: int(req.PageSize),
})
if err != nil {
a.logger.Error("failed to get deployments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get deployments")
}
return &webservice.ListDeploymentsResponse{
Deployments: deployments,
}, nil
}
func (a *WebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != deployment.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project")
}
return &webservice.GetDeploymentResponse{
Deployment: deployment,
}, nil
}
// validateDeploymentBelongsToProject checks if the given deployment belongs to the given project.
// It gives back error unless the deployment belongs to the project.
func (a *WebAPI) validateDeploymentBelongsToProject(ctx context.Context, deploymentID, projectID string) error {
pid, err := a.deploymentProjectCache.Get(deploymentID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in")
}
return nil
}
deployment, err := getDeployment(ctx, a.deploymentStore, deploymentID, a.logger)
if err != nil {
return err
}
a.deploymentProjectCache.Put(deploymentID, deployment.ProjectId)
if deployment.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
blocks, completed, err := a.stageLogStore.FetchLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.OffsetIndex)
if errors.Is(err, stagelogstore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The stage log not found")
}
if err != nil {
a.logger.Error("failed to get stage logs", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get stage logs")
}
return &webservice.GetStageLogResponse{
Blocks: blocks,
Completed: completed,
}, nil
}
func (a *WebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if claims.Role.ProjectId != deployment.ProjectId {
return nil, status.Error(codes.InvalidArgument, "Requested deployment does not belong to your project")
}
if model.IsCompletedDeployment(deployment.Status) {
return nil, status.Errorf(codes.FailedPrecondition, "could not cancel the deployment because it was already completed")
}
cmd := model.Command{
Id: uuid.New().String(),
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
DeploymentId: req.DeploymentId,
Type: model.Command_CANCEL_DEPLOYMENT,
Commander: claims.Subject,
CancelDeployment: &model.Command_CancelDeployment{
DeploymentId: req.DeploymentId,
ForceRollback: req.ForceRollback,
ForceNoRollback: req.ForceNoRollback,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.CancelDeploymentResponse{
CommandId: cmd.Id,
}, nil
}
func (a *WebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
stage, ok := deployment.StageStatusMap()[req.StageId]
if !ok {
return nil, status.Error(codes.FailedPrecondition, "The stage was not found in the deployment")
}
if model.IsCompletedStage(stage) {
return nil, status.Errorf(codes.FailedPrecondition, "Could not approve the stage because it was already completed")
}
commandID := uuid.New().String()
cmd := model.Command{
Id: commandID,
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
DeploymentId: req.DeploymentId,
StageId: req.StageId,
Type: model.Command_APPROVE_STAGE,
Commander: claims.Subject,
ApproveStage: &model.Command_ApproveStage{
DeploymentId: req.DeploymentId,
StageId: req.StageId,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
return nil, err
}
return &webservice.ApproveStageResponse{
CommandId: commandID,
}, nil
}
func (a *WebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
snapshot, err := a.applicationLiveStateStore.GetStateSnapshot(ctx, req.ApplicationId)
if err != nil {
a.logger.Error("failed to get application live state", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get application live state")
}
return &webservice.GetApplicationLiveStateResponse{
Snapshot: snapshot,
}, nil
}
// GetProject gets the specified porject without sensitive data.
func (a *WebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
project, err := a.getProject(ctx, claims.Role.ProjectId)
if err != nil {
return nil, err
}
// Redact all sensitive data inside project message before sending to the client.
project.RedactSensitiveData()
return &webservice.GetProjectResponse{
Project: project,
}, nil
}
func (a *WebAPI) getProject(ctx context.Context, projectID string) (*model.Project, error) {
if p, ok := a.projectsInConfig[projectID]; ok {
return &model.Project{
Id: p.Id,
Desc: p.Desc,
StaticAdmin: &model.ProjectStaticUser{
Username: p.StaticAdmin.Username,
PasswordHash: p.StaticAdmin.PasswordHash,
},
}, nil
}
project, err := a.projectStore.GetProject(ctx, projectID)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The project is not found")
}
if err != nil {
a.logger.Error("failed to get project", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get project")
}
return project, nil
}
// UpdateProjectStaticAdmin updates the static admin user settings.
func (a *WebAPI) UpdateProjectStaticAdmin(ctx context.Context, req *webservice.UpdateProjectStaticAdminRequest) (*webservice.UpdateProjectStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectStaticAdmin(ctx, claims.Role.ProjectId, req.Username, req.Password); err != nil {
a.logger.Error("failed to update static admin", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update static admin")
}
return &webservice.UpdateProjectStaticAdminResponse{}, nil
}
// EnableStaticAdmin enables static admin login.
func (a *WebAPI) EnableStaticAdmin(ctx context.Context, req *webservice.EnableStaticAdminRequest) (*webservice.EnableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.EnableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to enable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to enable static admin login")
}
return &webservice.EnableStaticAdminResponse{}, nil
}
// DisableStaticAdmin disables static admin login.
func (a *WebAPI) DisableStaticAdmin(ctx context.Context, req *webservice.DisableStaticAdminRequest) (*webservice.DisableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.DisableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to disenable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to disenable static admin login")
}
return &webservice.DisableStaticAdminResponse{}, nil
}
// UpdateProjectSSOConfig updates the sso settings.
func (a *WebAPI) UpdateProjectSSOConfig(ctx context.Context, req *webservice.UpdateProjectSSOConfigRequest) (*webservice.UpdateProjectSSOConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := req.Sso.Encrypt(a.encrypter); err != nil {
a.logger.Error("failed to encrypt sensitive data in sso configurations", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to encrypt sensitive data in sso configurations")
}
if err := a.projectStore.UpdateProjectSSOConfig(ctx, claims.Role.ProjectId, req.Sso); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectSSOConfigResponse{}, nil
}
// UpdateProjectRBACConfig updates the sso settings.
func (a *WebAPI) UpdateProjectRBACConfig(ctx context.Context, req *webservice.UpdateProjectRBACConfigRequest) (*webservice.UpdateProjectRBACConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectRBACConfig(ctx, claims.Role.ProjectId, req.Rbac); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectRBACConfigResponse{}, nil
}
// GetMe gets information about the current user.
func (a *WebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
return &webservice.GetMeResponse{
Subject: claims.Subject,
AvatarUrl: claims.AvatarURL,
ProjectId: claims.Role.ProjectId,
ProjectRole: claims.Role.ProjectRole,
}, nil
}
func (a *WebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) {
cmd, err := getCommand(ctx, a.commandStore, req.CommandId, a.logger)
if err != nil {
return nil, err
}
// TODO: Add check if requested command belongs to logged-in project, after adding project id field to model.Command.
return &webservice.GetCommandResponse{
Command: cmd,
}, nil
}
func (a *WebAPI) ListDeploymentConfigTemplates(ctx context.Context, req *webservice.ListDeploymentConfigTemplatesRequest) (*webservice.ListDeploymentConfigTemplatesResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
var templates []*webservice.DeploymentConfigTemplate
switch app.Kind {
case model.ApplicationKind_KUBERNETES:
templates = k8sDeploymentConfigTemplates
case model.ApplicationKind_TERRAFORM:
templates = terraformDeploymentConfigTemplates
case model.ApplicationKind_CROSSPLANE:
templates = crossplaneDeploymentConfigTemplates
case model.ApplicationKind_LAMBDA:
templates = lambdaDeploymentConfigTemplates
case model.ApplicationKind_CLOUDRUN:
templates = cloudrunDeploymentConfigTemplates
default:
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Unknown application kind %v", app.Kind))
}
for _, t := range templates {
g := app.GetGitPath()
filename := g.ConfigFilename
if filename == "" {
filename = ".pipe.yaml"
}
t.FileCreationUrl, err = git.MakeFileCreationURL(g.Repo.Remote, g.Path, g.Repo.Branch, filename, t.Content)
if err != nil {
a.logger.Error("failed to make a link to create a file", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to make a link to create a file")
}
}
if len(req.Labels) == 0 {
return &webservice.ListDeploymentConfigTemplatesResponse{Templates: templates}, nil
}
filtered := filterDeploymentConfigTemplates(templates, req.Labels)
return &webservice.ListDeploymentConfigTemplatesResponse{Templates: filtered}, nil
}
// Returns the one from the given templates with all the specified labels.
func filterDeploymentConfigTemplates(templates []*webservice.DeploymentConfigTemplate, labels []webservice.DeploymentConfigTemplateLabel) []*webservice.DeploymentConfigTemplate {
filtered := make([]*webservice.DeploymentConfigTemplate, 0, len(templates))
L:
for _, template := range templates {
for _, l := range labels {
if !template.HasLabel(l) {
continue L
}
}
filtered = append(filtered, template)
}
return filtered
}
func (a *WebAPI) GenerateAPIKey(ctx context.Context, req *webservice.GenerateAPIKeyRequest) (*webservice.GenerateAPIKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
id := uuid.New().String()
key, hash, err := model.GenerateAPIKey(id)
if err != nil {
a.logger.Error("failed to generate API key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate API key")
}
apiKey := model.APIKey{
Id: id,
Name: req.Name,
KeyHash: hash,
ProjectId: claims.Role.ProjectId,
Role: req.Role,
Creator: claims.Subject,
}
err = a.apiKeyStore.AddAPIKey(ctx, &apiKey)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The API key already exists")
}
if err != nil {
a.logger.Error("failed to create API key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create API key")
}
return &webservice.GenerateAPIKeyResponse{
Key: key,
}, nil
}
func (a *WebAPI) DisableAPIKey(ctx context.Context, req *webservice.DisableAPIKeyRequest) (*webservice.DisableAPIKeyResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.apiKeyStore.DisableAPIKey(ctx, req.Id, claims.Role.ProjectId); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "The API key is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to disable the API key",
zap.String("apikey-id", req.Id),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to disable the API key")
}
}
return &webservice.DisableAPIKeyResponse{}, nil
}
func (a *WebAPI) ListAPIKeys(ctx context.Context, req *webservice.ListAPIKeysRequest) (*webservice.ListAPIKeysResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
},
}
if req.Options != nil {
if req.Options.Enabled != nil {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "Disabled",
Operator: "==",
Value: !req.Options.Enabled.GetValue(),
})
}
}
apiKeys, err := a.apiKeyStore.ListAPIKeys(ctx, opts)
if err != nil {
a.logger.Error("failed to list API keys", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to list API keys")
}
// Redact all sensitive data inside API key before sending to the client.
for i := range apiKeys {
apiKeys[i].RedactSensitiveData()
}
return &webservice.ListAPIKeysResponse{
Keys: apiKeys,
}, nil
}
// GetInsightData returns the accumulated insight data.
func (a *WebAPI) GetInsightData(ctx context.Context, req *webservice.GetInsightDataRequest) (*webservice.GetInsightDataResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
count := int(req.DataPointCount)
from := time.Unix(req.RangeFrom, 0)
chunks, err := insightstore.LoadChunksFromCache(a.insightCache, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count)
if err != nil {
a.logger.Error("failed to load chunks from cache", zap.Error(err))
chunks, err = a.insightstore.LoadChunks(ctx, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count)
if err != nil {
a.logger.Error("failed to load chunks from insightstore", zap.Error(err))
return nil, err
}
if err := insightstore.PutChunksToCache(a.insightCache, chunks); err != nil {
a.logger.Error("failed to put chunks to cache", zap.Error(err))
}
}
idp, err := chunks.ExtractDataPoints(req.Step, from, count)
if err != nil {
a.logger.Error("failed to extract data points from chunks", zap.Error(err))
}
var updateAt int64
for _, c := range chunks {
accumulatedTo := c.GetAccumulatedTo()
if accumulatedTo > updateAt {
updateAt = accumulatedTo
}
}
return &webservice.GetInsightDataResponse{
UpdatedAt: updateAt,
DataPoints: idp,
}, nil
}
| 1 | 14,317 | nit: Remove this assignment because it is not necessary. | pipe-cd-pipe | go |
@@ -217,7 +217,10 @@ ostree_builtin_summary (int argc, char **argv, GCancellable *cancellable, GError
if (opt_raw)
flags |= OSTREE_DUMP_RAW;
- summary_data = ot_file_mapat_bytes (repo->repo_dir_fd, "summary", error);
+ glnx_fd_close int fd = -1;
+ if (!glnx_openat_rdonly (repo->repo_dir_fd, "summary", TRUE, &fd, error))
+ return FALSE;
+ summary_data = ot_fd_readall_or_mmap (fd, 0, error);
if (!summary_data)
return FALSE;
| 1 | /*
* Copyright (C) 2014 Colin Walters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include "ostree-repo-private.h"
#include "ot-dump.h"
#include "ot-main.h"
#include "ot-builtins.h"
#include "ostree.h"
#include "otutil.h"
static gboolean opt_update, opt_view, opt_raw;
static char **opt_key_ids;
static char *opt_gpg_homedir;
static char **opt_metadata;
/* ATTENTION:
* Please remember to update the bash-completion script (bash/ostree) and
* man page (man/ostree-summary.xml) when changing the option list.
*/
static GOptionEntry options[] = {
{ "update", 'u', 0, G_OPTION_ARG_NONE, &opt_update, "Update the summary", NULL },
{ "view", 'v', 0, G_OPTION_ARG_NONE, &opt_view, "View the local summary file", NULL },
{ "raw", 0, 0, G_OPTION_ARG_NONE, &opt_raw, "View the raw bytes of the summary file", NULL },
{ "gpg-sign", 0, 0, G_OPTION_ARG_STRING_ARRAY, &opt_key_ids, "GPG Key ID to sign the summary with", "KEY-ID"},
{ "gpg-homedir", 0, 0, G_OPTION_ARG_FILENAME, &opt_gpg_homedir, "GPG Homedir to use when looking for keyrings", "HOMEDIR"},
{ "add-metadata", 'm', 0, G_OPTION_ARG_STRING_ARRAY, &opt_metadata, "Additional metadata field to add to the summary", "KEY=VALUE" },
{ NULL }
};
/* Take arguments of the form KEY=VALUE and put them into an a{sv} variant. The
* value arguments must be parsable using g_variant_parse(). */
static GVariant *
build_additional_metadata (const char * const *args,
GError **error)
{
g_autoptr(GVariantBuilder) builder = NULL;
builder = g_variant_builder_new (G_VARIANT_TYPE_VARDICT);
for (gsize i = 0; args[i] != NULL; i++)
{
const gchar *equals = strchr (args[i], '=');
g_autofree gchar *key = NULL;
const gchar *value_str;
g_autoptr(GVariant) value = NULL;
if (equals == NULL)
return glnx_null_throw (error,
"Missing '=' in KEY=VALUE metadata '%s'", args[i]);
key = g_strndup (args[i], equals - args[i]);
value_str = equals + 1;
value = g_variant_parse (NULL, value_str, NULL, NULL, error);
if (value == NULL)
return glnx_prefix_error_null (error, "Error parsing variant ‘%s’: ", value_str);
g_variant_builder_add (builder, "{sv}", key, value);
}
return g_variant_ref_sink (g_variant_builder_end (builder));
}
gboolean
ostree_builtin_summary (int argc, char **argv, GCancellable *cancellable, GError **error)
{
g_autoptr(GOptionContext) context = NULL;
g_autoptr(OstreeRepo) repo = NULL;
OstreeDumpFlags flags = OSTREE_DUMP_NONE;
context = g_option_context_new ("Manage summary metadata");
if (!ostree_option_context_parse (context, options, &argc, &argv, OSTREE_BUILTIN_FLAG_NONE, &repo, cancellable, error))
return FALSE;
if (opt_update)
{
g_autoptr(GVariant) additional_metadata = NULL;
if (!ostree_ensure_repo_writable (repo, error))
return FALSE;
if (opt_metadata != NULL)
{
additional_metadata = build_additional_metadata ((const char * const *) opt_metadata, error);
if (additional_metadata == NULL)
return FALSE;
}
#ifdef OSTREE_ENABLE_EXPERIMENTAL_API
const char *collection_id = ostree_repo_get_collection_id (repo);
#else /* if !OSTREE_ENABLE_EXPERIMENTAL_API */
const char *collection_id = NULL;
#endif /* OSTREE_ENABLE_EXPERIMENTAL_API */
/* Write out a new metadata commit for the repository. */
if (collection_id != NULL)
{
#ifdef OSTREE_ENABLE_EXPERIMENTAL_API
OstreeCollectionRef collection_ref = { (gchar *) collection_id, (gchar *) OSTREE_REPO_METADATA_REF };
g_autofree char *old_ostree_metadata_checksum = NULL;
g_autofree gchar *new_ostree_metadata_checksum = NULL;
g_autoptr(OstreeMutableTree) mtree = NULL;
g_autoptr(OstreeRepoFile) repo_file = NULL;
g_autoptr(GVariantDict) new_summary_commit_dict = NULL;
g_autoptr(GVariant) new_summary_commit = NULL;
if (!ostree_repo_resolve_rev (repo, OSTREE_REPO_METADATA_REF,
TRUE, &old_ostree_metadata_checksum, error))
return FALSE;
/* Add bindings to the metadata. */
new_summary_commit_dict = g_variant_dict_new (additional_metadata);
g_variant_dict_insert (new_summary_commit_dict, OSTREE_COMMIT_META_KEY_COLLECTION_BINDING,
"s", collection_ref.collection_id);
g_variant_dict_insert_value (new_summary_commit_dict, OSTREE_COMMIT_META_KEY_REF_BINDING,
g_variant_new_strv ((const gchar * const *) &collection_ref.ref_name, 1));
new_summary_commit = g_variant_dict_end (new_summary_commit_dict);
if (!ostree_repo_prepare_transaction (repo, NULL, cancellable, error))
return FALSE;
/* Set up an empty mtree. */
mtree = ostree_mutable_tree_new ();
glnx_unref_object GFileInfo *fi = g_file_info_new ();
g_file_info_set_attribute_uint32 (fi, "unix::uid", 0);
g_file_info_set_attribute_uint32 (fi, "unix::gid", 0);
g_file_info_set_attribute_uint32 (fi, "unix::mode", (0755 | S_IFDIR));
g_autofree guchar *csum_raw = NULL;
g_autofree char *csum = NULL;
g_autoptr(GVariant) dirmeta = ostree_create_directory_metadata (fi, NULL /* xattrs */);
if (!ostree_repo_write_metadata (repo, OSTREE_OBJECT_TYPE_DIR_META, NULL,
dirmeta, &csum_raw, cancellable, error))
return FALSE;
csum = ostree_checksum_from_bytes (csum_raw);
ostree_mutable_tree_set_metadata_checksum (mtree, csum);
if (!ostree_repo_write_mtree (repo, mtree, (GFile **) &repo_file, NULL, error))
return FALSE;
if (!ostree_repo_write_commit (repo, old_ostree_metadata_checksum,
NULL /* subject */, NULL /* body */,
new_summary_commit, repo_file, &new_ostree_metadata_checksum,
NULL, error))
return FALSE;
if (opt_key_ids != NULL)
{
for (const char * const *iter = (const char * const *) opt_key_ids;
iter != NULL && *iter != NULL; iter++)
{
const char *key_id = *iter;
if (!ostree_repo_sign_commit (repo,
new_ostree_metadata_checksum,
key_id,
opt_gpg_homedir,
cancellable,
error))
return FALSE;
}
}
ostree_repo_transaction_set_collection_ref (repo, &collection_ref,
new_ostree_metadata_checksum);
if (!ostree_repo_commit_transaction (repo, NULL, cancellable, error))
return FALSE;
#else /* if !OSTREE_ENABLE_EXPERIMENTAL_API */
g_assert_not_reached ();
return FALSE;
#endif /* OSTREE_ENABLE_EXPERIMENTAL_API */
}
/* Regenerate and sign the conventional summary file. */
if (!ostree_repo_regenerate_summary (repo, additional_metadata, cancellable, error))
return FALSE;
if (opt_key_ids)
{
if (!ostree_repo_add_gpg_signature_summary (repo,
(const gchar **) opt_key_ids,
opt_gpg_homedir,
cancellable,
error))
return FALSE;
}
}
else if (opt_view)
{
g_autoptr(GBytes) summary_data = NULL;
if (opt_raw)
flags |= OSTREE_DUMP_RAW;
summary_data = ot_file_mapat_bytes (repo->repo_dir_fd, "summary", error);
if (!summary_data)
return FALSE;
ot_dump_summary_bytes (summary_data, flags);
}
else
{
g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED,
"No option specified; use -u to update summary");
return FALSE;
}
return TRUE;
}
| 1 | 12,771 | This seems like a common enough pattern to offer an equivalent wrapper in `ot-fs-util.c`, no? | ostreedev-ostree | c |
@@ -328,6 +328,7 @@ public abstract class MediaplayerActivity extends CastEnabledActivity implements
boolean isItemHasDownloadLink = media != null && (media instanceof FeedMedia) && ((FeedMedia) media).getDownload_url() != null;
menu.findItem(R.id.share_download_url_item).setVisible(isItemHasDownloadLink);
menu.findItem(R.id.share_download_url_with_position_item).setVisible(isItemHasDownloadLink);
+ menu.findItem(R.id.share_file).setVisible(((FeedMedia) media).fileExists());
menu.findItem(R.id.share_item).setVisible(hasWebsiteLink || isItemAndHasLink || isItemHasDownloadLink);
| 1 | package de.danoeh.antennapod.activity;
import android.annotation.TargetApi;
import android.app.Activity;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.res.TypedArray;
import android.graphics.Color;
import android.graphics.PixelFormat;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v7.app.AlertDialog;
import android.util.Log;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.widget.Button;
import android.widget.CheckBox;
import android.widget.ImageButton;
import android.widget.SeekBar;
import android.widget.SeekBar.OnSeekBarChangeListener;
import android.widget.TextView;
import android.widget.Toast;
import com.afollestad.materialdialogs.MaterialDialog;
import com.bumptech.glide.Glide;
import com.joanzapata.iconify.IconDrawable;
import com.joanzapata.iconify.fonts.FontAwesomeIcons;
import java.util.Locale;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.playback.PlaybackService;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DBTasks;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.util.Converter;
import de.danoeh.antennapod.core.util.Flavors;
import de.danoeh.antennapod.core.util.ShareUtils;
import de.danoeh.antennapod.core.util.StorageUtils;
import de.danoeh.antennapod.core.util.Supplier;
import de.danoeh.antennapod.core.util.playback.MediaPlayerError;
import de.danoeh.antennapod.core.util.playback.Playable;
import de.danoeh.antennapod.core.util.playback.PlaybackController;
import de.danoeh.antennapod.dialog.SleepTimerDialog;
import de.danoeh.antennapod.dialog.VariableSpeedDialog;
import rx.Observable;
import rx.android.schedulers.AndroidSchedulers;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
/**
* Provides general features which are both needed for playing audio and video
* files.
*/
public abstract class MediaplayerActivity extends CastEnabledActivity implements OnSeekBarChangeListener {
private static final String TAG = "MediaplayerActivity";
private static final String PREFS = "MediaPlayerActivityPreferences";
private static final String PREF_SHOW_TIME_LEFT = "showTimeLeft";
protected PlaybackController controller;
protected TextView txtvPosition;
protected TextView txtvLength;
protected SeekBar sbPosition;
protected ImageButton butRev;
protected TextView txtvRev;
protected ImageButton butPlay;
protected ImageButton butFF;
protected TextView txtvFF;
protected ImageButton butSkip;
protected boolean showTimeLeft = false;
private boolean isFavorite = false;
private PlaybackController newPlaybackController() {
return new PlaybackController(this, false) {
@Override
public void setupGUI() {
MediaplayerActivity.this.setupGUI();
}
@Override
public void onPositionObserverUpdate() {
MediaplayerActivity.this.onPositionObserverUpdate();
}
@Override
public void onBufferStart() {
MediaplayerActivity.this.onBufferStart();
}
@Override
public void onBufferEnd() {
MediaplayerActivity.this.onBufferEnd();
}
@Override
public void onBufferUpdate(float progress) {
MediaplayerActivity.this.onBufferUpdate(progress);
}
@Override
public void handleError(int code) {
MediaplayerActivity.this.handleError(code);
}
@Override
public void onReloadNotification(int code) {
MediaplayerActivity.this.onReloadNotification(code);
}
@Override
public void onSleepTimerUpdate() {
supportInvalidateOptionsMenu();
}
@Override
public ImageButton getPlayButton() {
return butPlay;
}
@Override
public void postStatusMsg(int msg, boolean showToast) {
MediaplayerActivity.this.postStatusMsg(msg, showToast);
}
@Override
public void clearStatusMsg() {
MediaplayerActivity.this.clearStatusMsg();
}
@Override
public boolean loadMediaInfo() {
return MediaplayerActivity.this.loadMediaInfo();
}
@Override
public void onAwaitingVideoSurface() {
MediaplayerActivity.this.onAwaitingVideoSurface();
}
@Override
public void onServiceQueried() {
MediaplayerActivity.this.onServiceQueried();
}
@Override
public void onShutdownNotification() {
finish();
}
@Override
public void onPlaybackEnd() {
finish();
}
@Override
public void onPlaybackSpeedChange() {
MediaplayerActivity.this.onPlaybackSpeedChange();
}
@Override
protected void setScreenOn(boolean enable) {
super.setScreenOn(enable);
MediaplayerActivity.this.setScreenOn(enable);
}
@Override
public void onSetSpeedAbilityChanged() {
MediaplayerActivity.this.onSetSpeedAbilityChanged();
}
};
}
protected static TextView getTxtvFFFromActivity(MediaplayerActivity activity) {
return activity.txtvFF;
}
protected static TextView getTxtvRevFromActivity(MediaplayerActivity activity) {
return activity.txtvRev;
}
protected void onSetSpeedAbilityChanged() {
Log.d(TAG, "onSetSpeedAbilityChanged()");
updatePlaybackSpeedButton();
}
protected void onPlaybackSpeedChange() {
updatePlaybackSpeedButtonText();
}
protected void onServiceQueried() {
supportInvalidateOptionsMenu();
}
protected void chooseTheme() {
setTheme(UserPreferences.getTheme());
}
protected void setScreenOn(boolean enable) {
}
@Override
protected void onCreate(Bundle savedInstanceState) {
chooseTheme();
super.onCreate(savedInstanceState);
Log.d(TAG, "onCreate()");
StorageUtils.checkStorageAvailability(this);
orientation = getResources().getConfiguration().orientation;
getWindow().setFormat(PixelFormat.TRANSPARENT);
}
@Override
protected void onPause() {
if(controller != null) {
controller.reinitServiceIfPaused();
controller.pause();
}
super.onPause();
}
/**
* Should be used to switch to another player activity if the mime type is
* not the correct one for the current activity.
*/
protected abstract void onReloadNotification(int notificationCode);
/**
* Should be used to inform the user that the PlaybackService is currently
* buffering.
*/
protected abstract void onBufferStart();
/**
* Should be used to hide the view that was showing the 'buffering'-message.
*/
protected abstract void onBufferEnd();
protected void onBufferUpdate(float progress) {
if (sbPosition != null) {
sbPosition.setSecondaryProgress((int) progress * sbPosition.getMax());
}
}
/**
* Current screen orientation.
*/
protected int orientation;
@Override
protected void onStart() {
super.onStart();
if (controller != null) {
controller.release();
}
controller = newPlaybackController();
}
@Override
protected void onStop() {
Log.d(TAG, "onStop()");
if (controller != null) {
controller.release();
controller = null; // prevent leak
}
super.onStop();
}
@TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH)
@Override
public void onTrimMemory(int level) {
super.onTrimMemory(level);
Glide.get(this).trimMemory(level);
}
@Override
public void onLowMemory() {
super.onLowMemory();
Glide.get(this).clearMemory();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
super.onCreateOptionsMenu(menu);
if (Flavors.FLAVOR == Flavors.PLAY) {
requestCastButton(MenuItem.SHOW_AS_ACTION_ALWAYS);
}
MenuInflater inflater = getMenuInflater();
inflater.inflate(R.menu.mediaplayer, menu);
return true;
}
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
super.onPrepareOptionsMenu(menu);
if (controller == null) {
return false;
}
Playable media = controller.getMedia();
menu.findItem(R.id.support_item).setVisible(
media != null && media.getPaymentLink() != null &&
(media instanceof FeedMedia) &&
((FeedMedia) media).getItem() != null &&
((FeedMedia) media).getItem().getFlattrStatus().flattrable()
);
boolean hasWebsiteLink = media != null && media.getWebsiteLink() != null;
menu.findItem(R.id.visit_website_item).setVisible(hasWebsiteLink);
boolean isItemAndHasLink = media != null && (media instanceof FeedMedia) &&
((FeedMedia) media).getItem() != null && ((FeedMedia) media).getItem().getLink() != null;
menu.findItem(R.id.share_link_item).setVisible(isItemAndHasLink);
menu.findItem(R.id.share_link_with_position_item).setVisible(isItemAndHasLink);
boolean isItemHasDownloadLink = media != null && (media instanceof FeedMedia) && ((FeedMedia) media).getDownload_url() != null;
menu.findItem(R.id.share_download_url_item).setVisible(isItemHasDownloadLink);
menu.findItem(R.id.share_download_url_with_position_item).setVisible(isItemHasDownloadLink);
menu.findItem(R.id.share_item).setVisible(hasWebsiteLink || isItemAndHasLink || isItemHasDownloadLink);
menu.findItem(R.id.add_to_favorites_item).setVisible(false);
menu.findItem(R.id.remove_from_favorites_item).setVisible(false);
if(media != null && media instanceof FeedMedia) {
menu.findItem(R.id.add_to_favorites_item).setVisible(!isFavorite);
menu.findItem(R.id.remove_from_favorites_item).setVisible(isFavorite);
}
boolean sleepTimerSet = controller.sleepTimerActive();
boolean sleepTimerNotSet = controller.sleepTimerNotActive();
menu.findItem(R.id.set_sleeptimer_item).setVisible(sleepTimerNotSet);
menu.findItem(R.id.disable_sleeptimer_item).setVisible(sleepTimerSet);
if (this instanceof AudioplayerActivity) {
int[] attrs = {R.attr.action_bar_icon_color};
TypedArray ta = obtainStyledAttributes(UserPreferences.getTheme(), attrs);
int textColor = ta.getColor(0, Color.GRAY);
ta.recycle();
menu.findItem(R.id.audio_controls).setIcon(new IconDrawable(this,
FontAwesomeIcons.fa_sliders).color(textColor).actionBarSize());
} else {
menu.findItem(R.id.audio_controls).setVisible(false);
}
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (controller == null) {
return false;
}
Playable media = controller.getMedia();
if (item.getItemId() == android.R.id.home) {
Intent intent = new Intent(MediaplayerActivity.this,
MainActivity.class);
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP
| Intent.FLAG_ACTIVITY_NEW_TASK);
startActivity(intent);
return true;
} else {
if (media != null) {
switch (item.getItemId()) {
case R.id.add_to_favorites_item:
if(media instanceof FeedMedia) {
FeedItem feedItem = ((FeedMedia)media).getItem();
if(feedItem != null) {
DBWriter.addFavoriteItem(feedItem);
isFavorite = true;
invalidateOptionsMenu();
Toast.makeText(this, R.string.added_to_favorites, Toast.LENGTH_SHORT)
.show();
}
}
break;
case R.id.remove_from_favorites_item:
if(media instanceof FeedMedia) {
FeedItem feedItem = ((FeedMedia)media).getItem();
if(feedItem != null) {
DBWriter.removeFavoriteItem(feedItem);
isFavorite = false;
invalidateOptionsMenu();
Toast.makeText(this, R.string.removed_from_favorites, Toast.LENGTH_SHORT)
.show();
}
}
break;
case R.id.disable_sleeptimer_item:
if (controller.serviceAvailable()) {
MaterialDialog.Builder stDialog = new MaterialDialog.Builder(this);
stDialog.title(R.string.sleep_timer_label);
stDialog.content(getString(R.string.time_left_label)
+ Converter.getDurationStringLong((int) controller
.getSleepTimerTimeLeft()));
stDialog.positiveText(R.string.disable_sleeptimer_label);
stDialog.negativeText(R.string.cancel_label);
stDialog.onPositive((dialog, which) -> {
dialog.dismiss();
controller.disableSleepTimer();
});
stDialog.onNegative((dialog, which) -> dialog.dismiss());
stDialog.build().show();
}
break;
case R.id.set_sleeptimer_item:
if (controller.serviceAvailable()) {
SleepTimerDialog td = new SleepTimerDialog(this) {
@Override
public void onTimerSet(long millis, boolean shakeToReset, boolean vibrate) {
controller.setSleepTimer(millis, shakeToReset, vibrate);
}
};
td.createNewDialog().show();
}
break;
case R.id.audio_controls:
MaterialDialog dialog = new MaterialDialog.Builder(this)
.title(R.string.audio_controls)
.customView(R.layout.audio_controls, true)
.neutralText(R.string.close_label)
.onNeutral((dialog1, which) -> {
final SeekBar left = (SeekBar) dialog1.findViewById(R.id.volume_left);
final SeekBar right = (SeekBar) dialog1.findViewById(R.id.volume_right);
UserPreferences.setVolume(left.getProgress(), right.getProgress());
})
.show();
final SeekBar barPlaybackSpeed = (SeekBar) dialog.findViewById(R.id.playback_speed);
final Button butDecSpeed = (Button) dialog.findViewById(R.id.butDecSpeed);
butDecSpeed.setOnClickListener(v -> {
if(controller != null && controller.canSetPlaybackSpeed()) {
barPlaybackSpeed.setProgress(barPlaybackSpeed.getProgress() - 2);
} else {
VariableSpeedDialog.showGetPluginDialog(this);
}
});
final Button butIncSpeed = (Button) dialog.findViewById(R.id.butIncSpeed);
butIncSpeed.setOnClickListener(v -> {
if(controller != null && controller.canSetPlaybackSpeed()) {
barPlaybackSpeed.setProgress(barPlaybackSpeed.getProgress() + 2);
} else {
VariableSpeedDialog.showGetPluginDialog(this);
}
});
final TextView txtvPlaybackSpeed = (TextView) dialog.findViewById(R.id.txtvPlaybackSpeed);
float currentSpeed = 1.0f;
try {
currentSpeed = Float.parseFloat(UserPreferences.getPlaybackSpeed());
} catch (NumberFormatException e) {
Log.e(TAG, Log.getStackTraceString(e));
UserPreferences.setPlaybackSpeed(String.valueOf(currentSpeed));
}
txtvPlaybackSpeed.setText(String.format("%.2fx", currentSpeed));
barPlaybackSpeed.setOnSeekBarChangeListener(new OnSeekBarChangeListener() {
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
if(controller != null && controller.canSetPlaybackSpeed()) {
float playbackSpeed = (progress + 10) / 20.0f;
controller.setPlaybackSpeed(playbackSpeed);
String speedPref = String.format(Locale.US, "%.2f", playbackSpeed);
UserPreferences.setPlaybackSpeed(speedPref);
String speedStr = String.format("%.2fx", playbackSpeed);
txtvPlaybackSpeed.setText(speedStr);
} else if(fromUser) {
float speed = Float.valueOf(UserPreferences.getPlaybackSpeed());
barPlaybackSpeed.post(() -> barPlaybackSpeed.setProgress((int) (20 * speed) - 10));
}
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
if(controller != null && !controller.canSetPlaybackSpeed()) {
VariableSpeedDialog.showGetPluginDialog(MediaplayerActivity.this);
}
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
}
});
barPlaybackSpeed.setProgress((int) (20 * currentSpeed) - 10);
final SeekBar barLeftVolume = (SeekBar) dialog.findViewById(R.id.volume_left);
barLeftVolume.setProgress(UserPreferences.getLeftVolumePercentage());
final SeekBar barRightVolume = (SeekBar) dialog.findViewById(R.id.volume_right);
barRightVolume.setProgress(UserPreferences.getRightVolumePercentage());
final CheckBox stereoToMono = (CheckBox) dialog.findViewById(R.id.stereo_to_mono);
stereoToMono.setChecked(UserPreferences.stereoToMono());
if (controller != null && !controller.canDownmix()) {
stereoToMono.setEnabled(false);
String sonicOnly = getString(R.string.sonic_only);
stereoToMono.setText(stereoToMono.getText() + " [" + sonicOnly + "]");
}
barLeftVolume.setOnSeekBarChangeListener(new OnSeekBarChangeListener() {
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
controller.setVolume(
Converter.getVolumeFromPercentage(progress),
Converter.getVolumeFromPercentage(barRightVolume.getProgress()));
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
}
});
barRightVolume.setOnSeekBarChangeListener(new OnSeekBarChangeListener() {
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
controller.setVolume(
Converter.getVolumeFromPercentage(barLeftVolume.getProgress()),
Converter.getVolumeFromPercentage(progress));
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
}
});
stereoToMono.setOnCheckedChangeListener((buttonView, isChecked) -> {
UserPreferences.stereoToMono(isChecked);
if (controller != null) {
controller.setDownmix(isChecked);
}
});
break;
case R.id.visit_website_item:
Uri uri = Uri.parse(media.getWebsiteLink());
startActivity(new Intent(Intent.ACTION_VIEW, uri));
break;
case R.id.support_item:
if (media instanceof FeedMedia) {
DBTasks.flattrItemIfLoggedIn(this, ((FeedMedia) media).getItem());
}
break;
case R.id.share_link_item:
if (media instanceof FeedMedia) {
ShareUtils.shareFeedItemLink(this, ((FeedMedia) media).getItem());
}
break;
case R.id.share_download_url_item:
if (media instanceof FeedMedia) {
ShareUtils.shareFeedItemDownloadLink(this, ((FeedMedia) media).getItem());
}
break;
case R.id.share_link_with_position_item:
if (media instanceof FeedMedia) {
ShareUtils.shareFeedItemLink(this, ((FeedMedia) media).getItem(), true);
}
break;
case R.id.share_download_url_with_position_item:
if (media instanceof FeedMedia) {
ShareUtils.shareFeedItemDownloadLink(this, ((FeedMedia) media).getItem(), true);
}
break;
default:
return false;
}
return true;
} else {
return false;
}
}
}
@Override
protected void onResume() {
super.onResume();
Log.d(TAG, "onResume()");
StorageUtils.checkStorageAvailability(this);
if(controller != null) {
controller.init();
}
}
/**
* Called by 'handleStatus()' when the PlaybackService is waiting for
* a video surface.
*/
protected abstract void onAwaitingVideoSurface();
protected abstract void postStatusMsg(int resId, boolean showToast);
protected abstract void clearStatusMsg();
protected void onPositionObserverUpdate() {
if (controller == null || txtvPosition == null || txtvLength == null) {
return;
}
int currentPosition = controller.getPosition();
int duration = controller.getDuration();
Log.d(TAG, "currentPosition " + Converter.getDurationStringLong(currentPosition));
if (currentPosition == PlaybackService.INVALID_TIME ||
duration == PlaybackService.INVALID_TIME) {
Log.w(TAG, "Could not react to position observer update because of invalid time");
return;
}
txtvPosition.setText(Converter.getDurationStringLong(currentPosition));
if (showTimeLeft) {
txtvLength.setText("-" + Converter.getDurationStringLong(duration - currentPosition));
} else {
txtvLength.setText(Converter.getDurationStringLong(duration));
}
updateProgressbarPosition(currentPosition, duration);
}
private void updateProgressbarPosition(int position, int duration) {
Log.d(TAG, "updateProgressbarPosition(" + position + ", " + duration + ")");
if(sbPosition == null) {
return;
}
float progress = ((float) position) / duration;
sbPosition.setProgress((int) (progress * sbPosition.getMax()));
}
/**
* Load information about the media that is going to be played or currently
* being played. This method will be called when the activity is connected
* to the PlaybackService to ensure that the activity has the right
* FeedMedia object.
*/
protected boolean loadMediaInfo() {
Log.d(TAG, "loadMediaInfo()");
if(controller == null || controller.getMedia() == null) {
return false;
}
Playable media = controller.getMedia();
SharedPreferences prefs = getSharedPreferences(PREFS, MODE_PRIVATE);
showTimeLeft = prefs.getBoolean(PREF_SHOW_TIME_LEFT, false);
onPositionObserverUpdate();
checkFavorite();
updatePlaybackSpeedButton();
return true;
}
protected void updatePlaybackSpeedButton() {
// Only meaningful on AudioplayerActivity, where it is overridden.
}
protected void updatePlaybackSpeedButtonText() {
// Only meaningful on AudioplayerActivity, where it is overridden.
}
/**
* Abstract directions to skip forward or back (rewind) and encapsulates behavior to get or set preference (including update of UI on the skip buttons).
*/
static public enum SkipDirection {
SKIP_FORWARD(
UserPreferences::getFastForwardSecs,
MediaplayerActivity::getTxtvFFFromActivity,
UserPreferences::setFastForwardSecs,
R.string.pref_fast_forward),
SKIP_REWIND(UserPreferences::getRewindSecs,
MediaplayerActivity::getTxtvRevFromActivity,
UserPreferences::setRewindSecs,
R.string.pref_rewind);
private final Supplier<Integer> getPrefSecsFn;
private final Func1<MediaplayerActivity, TextView> getTextViewFn;
private final Action1<Integer> setPrefSecsFn;
private final int titleResourceID;
/**
* Constructor for skip direction enum. Stores references to utility functions and resource
* id's that vary dependending on the direction.
*
* @param getPrefSecsFn Handle to function that retrieves current seconds of the skip delta
* @param getTextViewFn Handle to function that gets the TextView which displays the current skip delta value
* @param setPrefSecsFn Handle to function that sets the preference (setting) for the skip delta value (and optionally updates the button label with the current values)
* @param titleResourceID ID of the resource string with the title for a view
*/
SkipDirection(Supplier<Integer> getPrefSecsFn, Func1<MediaplayerActivity, TextView> getTextViewFn, Action1<Integer> setPrefSecsFn, int titleResourceID) {
this.getPrefSecsFn = getPrefSecsFn;
this.getTextViewFn = getTextViewFn;
this.setPrefSecsFn = setPrefSecsFn;
this.titleResourceID = titleResourceID;
}
public int getPrefSkipSeconds() {
return(getPrefSecsFn.get());
}
/**
* Updates preferences for a forward or backward skip depending on the direction of the instance, optionally updating the UI.
*
* @param seconds Number of seconds to set the preference associated with the direction of the instance.
* @param activity MediaplyerActivity that contains textview to update the display of the skip delta setting (or null if nothing to update)
*/
public void setPrefSkipSeconds(int seconds, @Nullable Activity activity) {
setPrefSecsFn.call(seconds);
if (activity != null && activity instanceof MediaplayerActivity) {
TextView tv = getTextViewFn.call((MediaplayerActivity)activity);
if (tv != null) tv.setText(String.valueOf(seconds));
}
}
public int getTitleResourceID() {
return titleResourceID;
}
}
static public void showSkipPreference(Activity activity, SkipDirection direction) {
int checked = 0;
int skipSecs = direction.getPrefSkipSeconds();
final int[] values = activity.getResources().getIntArray(R.array.seek_delta_values);
final String[] choices = new String[values.length];
for (int i = 0; i < values.length; i++) {
if (skipSecs == values[i]) {
checked = i;
}
choices[i] = String.valueOf(values[i]) + " " + activity.getString(R.string.time_seconds);
}
AlertDialog.Builder builder = new AlertDialog.Builder(activity);
builder.setTitle(direction.getTitleResourceID());
builder.setSingleChoiceItems(choices, checked, null);
builder.setNegativeButton(R.string.cancel_label, null);
builder.setPositiveButton(R.string.confirm_label, (dialog, which) -> {
int choice = ((AlertDialog)dialog).getListView().getCheckedItemPosition();
if (choice < 0 || choice >= values.length) {
System.err.printf("Choice in showSkipPreference is out of bounds %d", choice);
} else {
direction.setPrefSkipSeconds(values[choice], activity);
}
});
builder.create().show();
}
protected void setupGUI() {
setContentView(getContentViewResourceId());
sbPosition = (SeekBar) findViewById(R.id.sbPosition);
txtvPosition = (TextView) findViewById(R.id.txtvPosition);
SharedPreferences prefs = getSharedPreferences(PREFS, MODE_PRIVATE);
showTimeLeft = prefs.getBoolean(PREF_SHOW_TIME_LEFT, false);
Log.d("timeleft", showTimeLeft ? "true" : "false");
txtvLength = (TextView) findViewById(R.id.txtvLength);
if (txtvLength != null) {
txtvLength.setOnClickListener(v -> {
showTimeLeft = !showTimeLeft;
Playable media = controller.getMedia();
if (media == null) {
return;
}
String length;
if (showTimeLeft) {
length = "-" + Converter.getDurationStringLong(media.getDuration() - media.getPosition());
} else {
length = Converter.getDurationStringLong(media.getDuration());
}
txtvLength.setText(length);
SharedPreferences.Editor editor = prefs.edit();
editor.putBoolean(PREF_SHOW_TIME_LEFT, showTimeLeft);
editor.apply();
Log.d("timeleft on click", showTimeLeft ? "true" : "false");
});
}
butRev = (ImageButton) findViewById(R.id.butRev);
txtvRev = (TextView) findViewById(R.id.txtvRev);
if (txtvRev != null) {
txtvRev.setText(String.valueOf(UserPreferences.getRewindSecs()));
}
butPlay = (ImageButton) findViewById(R.id.butPlay);
butFF = (ImageButton) findViewById(R.id.butFF);
txtvFF = (TextView) findViewById(R.id.txtvFF);
if (txtvFF != null) {
txtvFF.setText(String.valueOf(UserPreferences.getFastForwardSecs()));
}
butSkip = (ImageButton) findViewById(R.id.butSkip);
// SEEKBAR SETUP
sbPosition.setOnSeekBarChangeListener(this);
// BUTTON SETUP
if (butRev != null) {
butRev.setOnClickListener(v -> onRewind());
butRev.setOnLongClickListener(v -> {
showSkipPreference(MediaplayerActivity.this, SkipDirection.SKIP_REWIND);
return true;
});
}
butPlay.setOnClickListener(v -> onPlayPause());
if (butFF != null) {
butFF.setOnClickListener(v -> onFastForward());
butFF.setOnLongClickListener(v -> {
showSkipPreference(MediaplayerActivity.this, SkipDirection.SKIP_FORWARD);
return false;
});
}
if (butSkip != null) {
butSkip.setOnClickListener(v -> sendBroadcast(new Intent(PlaybackService.ACTION_SKIP_CURRENT_EPISODE)));
}
}
protected void onRewind() {
if (controller == null) {
return;
}
int curr = controller.getPosition();
controller.seekTo(curr - UserPreferences.getRewindSecs() * 1000);
}
protected void onPlayPause() {
if(controller == null) {
return;
}
controller.playPause();
}
protected void onFastForward() {
if (controller == null) {
return;
}
int curr = controller.getPosition();
controller.seekTo(curr + UserPreferences.getFastForwardSecs() * 1000);
}
protected abstract int getContentViewResourceId();
void handleError(int errorCode) {
final AlertDialog.Builder errorDialog = new AlertDialog.Builder(this);
errorDialog.setTitle(R.string.error_label);
errorDialog.setMessage(MediaPlayerError.getErrorString(this, errorCode));
errorDialog.setNeutralButton("OK",
(dialog, which) -> {
dialog.dismiss();
finish();
}
);
errorDialog.create().show();
}
float prog;
@Override
public void onProgressChanged (SeekBar seekBar,int progress, boolean fromUser) {
if (controller == null || txtvLength == null) {
return;
}
prog = controller.onSeekBarProgressChanged(seekBar, progress, fromUser, txtvPosition);
if (showTimeLeft && prog != 0) {
int duration = controller.getDuration();
String length = "-" + Converter.getDurationStringLong(duration - (int) (prog * duration));
txtvLength.setText(length);
}
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
if (controller != null) {
controller.onSeekBarStartTrackingTouch(seekBar);
}
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
if (controller != null) {
controller.onSeekBarStopTrackingTouch(seekBar, prog);
}
}
private void checkFavorite() {
Playable playable = controller.getMedia();
if (playable != null && playable instanceof FeedMedia) {
FeedItem feedItem = ((FeedMedia) playable).getItem();
if (feedItem != null) {
Observable.fromCallable(() -> DBReader.getFeedItem(feedItem.getId()))
.subscribeOn(Schedulers.newThread())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(
item -> {
boolean isFav = item.isTagged(FeedItem.TAG_FAVORITE);
if (isFavorite != isFav) {
isFavorite = isFav;
invalidateOptionsMenu();
}
}, error -> Log.e(TAG, Log.getStackTraceString(error)));
}
}
}
}
| 1 | 13,516 | Wouldn't this crash the app if the user is currently listening to a stream? | AntennaPod-AntennaPod | java |
@@ -530,10 +530,10 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
//
// Set recurse to read sub directories
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
- if prefix != "" {
+ if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
- if directory != "" {
+ if directory != "" && !strings.HasSuffix(directory, "/") {
directory += "/"
}
// Options for ObjectsWalk | 1 | // Package swift provides an interface to the Swift object storage system
package swift
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
// Constants
const (
directoryMarkerContentType = "application/directory" // content type of directory marker objects
listChunks = 1000 // chunk size to read directory listings
defaultChunkSize = 5 * fs.GibiByte
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// SharedOptions are shared between swift and hubic
var SharedOptions = []fs.Option{{
Name: "chunk_size",
Help: `Above this size files will be chunked into a _segments container.
Above this size files will be chunked into a _segments container. The
default for this is 5GB which is its maximum value.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "no_chunk",
Help: `Don't chunk files during streaming upload.
When doing streaming uploads (eg using rcat or mount) setting this
flag will cause the swift backend to not upload chunked files.
This will limit the maximum upload size to 5GB. However non chunked
files are easier to deal with and have an MD5SUM.
Rclone will still chunk files bigger than chunk_size when doing normal
copy operations.`,
Default: false,
Advanced: true,
}}
const enc = encodings.Swift
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "swift",
Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
NewFs: NewFs,
Options: append([]fs.Option{{
Name: "env_auth",
Help: "Get swift credentials from environment variables in standard OpenStack form.",
Default: false,
Examples: []fs.OptionExample{
{
Value: "false",
Help: "Enter swift credentials in the next step",
}, {
Value: "true",
Help: "Get swift credentials from environment vars. Leave other fields blank if using this.",
},
},
}, {
Name: "user",
Help: "User name to log in (OS_USERNAME).",
}, {
Name: "key",
Help: "API key or password (OS_PASSWORD).",
}, {
Name: "auth",
Help: "Authentication URL for server (OS_AUTH_URL).",
Examples: []fs.OptionExample{{
Help: "Rackspace US",
Value: "https://auth.api.rackspacecloud.com/v1.0",
}, {
Help: "Rackspace UK",
Value: "https://lon.auth.api.rackspacecloud.com/v1.0",
}, {
Help: "Rackspace v2",
Value: "https://identity.api.rackspacecloud.com/v2.0",
}, {
Help: "Memset Memstore UK",
Value: "https://auth.storage.memset.com/v1.0",
}, {
Help: "Memset Memstore UK v2",
Value: "https://auth.storage.memset.com/v2.0",
}, {
Help: "OVH",
Value: "https://auth.cloud.ovh.net/v2.0",
}},
}, {
Name: "user_id",
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
}, {
Name: "domain",
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
}, {
Name: "tenant",
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)",
}, {
Name: "tenant_id",
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)",
}, {
Name: "tenant_domain",
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)",
}, {
Name: "region",
Help: "Region name - optional (OS_REGION_NAME)",
}, {
Name: "storage_url",
Help: "Storage URL - optional (OS_STORAGE_URL)",
}, {
Name: "auth_token",
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
}, {
Name: "application_credential_id",
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
}, {
Name: "application_credential_name",
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
}, {
Name: "application_credential_secret",
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
}, {
Name: "auth_version",
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
Default: 0,
}, {
Name: "endpoint_type",
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
Default: "public",
Examples: []fs.OptionExample{{
Help: "Public (default, choose this if not sure)",
Value: "public",
}, {
Help: "Internal (use internal service net)",
Value: "internal",
}, {
Help: "Admin",
Value: "admin",
}},
}, {
Name: "storage_policy",
Help: `The storage policy to use when creating a new container
This applies the specified storage policy when creating a new
container. The policy cannot be changed afterwards. The allowed
configuration values and their meaning depend on your Swift storage
provider.`,
Default: "",
Examples: []fs.OptionExample{{
Help: "Default",
Value: "",
}, {
Help: "OVH Public Cloud Storage",
Value: "pcs",
}, {
Help: "OVH Public Cloud Archive",
Value: "pca",
}},
}}, SharedOptions...),
})
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
User string `config:"user"`
Key string `config:"key"`
Auth string `config:"auth"`
UserID string `config:"user_id"`
Domain string `config:"domain"`
Tenant string `config:"tenant"`
TenantID string `config:"tenant_id"`
TenantDomain string `config:"tenant_domain"`
Region string `config:"region"`
StorageURL string `config:"storage_url"`
AuthToken string `config:"auth_token"`
AuthVersion int `config:"auth_version"`
ApplicationCredentialID string `config:"application_credential_id"`
ApplicationCredentialName string `config:"application_credential_name"`
ApplicationCredentialSecret string `config:"application_credential_secret"`
StoragePolicy string `config:"storage_policy"`
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"`
}
// Fs represents a remote swift server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
opt Options // options for this backend
c *swift.Connection // the connection to the swift server
rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of container status
noCheckContainer bool // don't check the container before creating it
pacer *fs.Pacer // To pace the API calls
}
// Object describes a swift object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64
lastModified time.Time
contentType string
md5 string
headers swift.Headers // The object headers if known
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootContainer == "" {
return fmt.Sprintf("Swift root")
}
if f.rootDirectory == "" {
return fmt.Sprintf("Swift container %s", f.rootContainer)
}
return fmt.Sprintf("Swift container %s path %s", f.rootContainer, f.rootDirectory)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
408, // Request Timeout
409, // Conflict - various states that could be resolved on a retry
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable/Slow Down - "Reduce your request rate"
504, // Gateway Time-out
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
// If this is an swift.Error object extract the HTTP error code
if swiftError, ok := err.(*swift.Error); ok {
for _, e := range retryErrorCodes {
if swiftError.StatusCode == e {
return true, err
}
}
}
// Check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// shouldRetryHeaders returns a boolean as to whether this err
// deserves to be retried. It reads the headers passed in looking for
// `Retry-After`. It returns the err as a convenience
func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
if value := headers["Retry-After"]; value != "" {
retryAfter, parseErr := strconv.Atoi(value)
if parseErr != nil {
fs.Errorf(nil, "Failed to parse Retry-After: %q: %v", value, parseErr)
} else {
duration := time.Second * time.Duration(retryAfter)
if duration <= 60*time.Second {
// Do a short sleep immediately
fs.Debugf(nil, "Sleeping for %v to obey Retry-After", duration)
time.Sleep(duration)
return true, err
}
// Delay a long sleep for a retry
return false, fserrors.NewErrorRetryAfter(duration)
}
}
}
return shouldRetry(err)
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
return enc.FromStandardName(container), enc.FromStandardPath(containerPath)
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// swiftConnection makes a connection to swift
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
c := &swift.Connection{
// Keep these in the same order as the Config for ease of checking
UserName: opt.User,
ApiKey: opt.Key,
AuthUrl: opt.Auth,
UserId: opt.UserID,
Domain: opt.Domain,
Tenant: opt.Tenant,
TenantId: opt.TenantID,
TenantDomain: opt.TenantDomain,
Region: opt.Region,
StorageUrl: opt.StorageURL,
AuthToken: opt.AuthToken,
AuthVersion: opt.AuthVersion,
ApplicationCredentialId: opt.ApplicationCredentialID,
ApplicationCredentialName: opt.ApplicationCredentialName,
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
}
if opt.EnvAuth {
err := c.ApplyEnvironment()
if err != nil {
return nil, errors.Wrap(err, "failed to read environment variables")
}
}
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
if !c.Authenticated() {
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
if c.UserName == "" && c.UserId == "" {
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
}
if c.ApiKey == "" {
return nil, errors.New("key not found")
}
}
if c.AuthUrl == "" {
return nil, errors.New("auth not found")
}
err := c.Authenticate() // fills in c.StorageUrl and c.AuthToken
if err != nil {
return nil, err
}
}
// Make sure we re-auth with the AuthToken and StorageUrl
// provided by wrapping the existing auth, so we can just
// override one or the other or both.
if StorageUrl != "" || AuthToken != "" {
// Re-write StorageURL and AuthToken if they are being
// overridden as c.Authenticate above will have
// overwritten them.
if StorageUrl != "" {
c.StorageUrl = StorageUrl
}
if AuthToken != "" {
c.AuthToken = AuthToken
}
c.Auth = newAuth(c.Auth, StorageUrl, AuthToken)
}
return c, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFsWithConnection constructs an Fs from the path, container:path
// and authenticated connection.
//
// if noCheckContainer is set then the Fs won't check the container
// exists before creating it.
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
c: c,
noCheckContainer: noCheckContainer,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
if f.rootContainer != "" && f.rootDirectory != "" {
// Check to see if the object exists - ignoring directory markers
var info swift.Object
var err error
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
return shouldRetryHeaders(rxHeaders, err)
})
if err == nil && info.ContentType != directoryMarkerContentType {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "swift: chunk size")
}
c, err := swiftConnection(opt, name)
if err != nil {
return nil, err
}
return NewFsWithConnection(opt, name, root, c, false)
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
// Note that due to a quirk of swift, dynamic large objects are
// returned as 0 bytes in the listing. Correct this here by
// making sure we read the full metadata for all 0 byte files.
// We don't read the metadata for directory marker objects.
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
info = nil
}
if info != nil {
// Set info but not headers
err := o.decodeMetaData(info)
if err != nil {
return nil, err
}
} else {
err := o.readMetaData() // reads info and headers, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found it
// returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// listFn is called from list and listContainerRoot to handle an object.
type listFn func(remote string, object *swift.Object, isDirectory bool) error
// listContainerRoot lists the objects into the function supplied from
// the container and directory supplied. The remote has prefix
// removed from it and if addContainer is set then it adds the
// container to the start.
//
// Set recurse to read sub directories
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
// Options for ObjectsWalk
opts := swift.ObjectsOpts{
Prefix: directory,
Limit: listChunks,
}
if !recurse {
opts.Delimiter = '/'
}
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
var objects []swift.Object
var err error
err = f.pacer.Call(func() (bool, error) {
objects, err = f.c.Objects(container, opts)
return shouldRetry(err)
})
if err == nil {
for i := range objects {
object := &objects[i]
isDirectory := false
if !recurse {
isDirectory = strings.HasSuffix(object.Name, "/")
}
remote := enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
if remote == prefix {
// If we have zero length directory markers ending in / then swift
// will return them in the listing for the directory which causes
// duplicate directories. Ignore them here.
continue
}
remote = remote[len(prefix):]
if addContainer {
remote = path.Join(container, remote)
}
err = fn(remote, object, isDirectory)
if err != nil {
break
}
}
}
return objects, err
})
}
type addEntryFn func(fs.DirEntry) error
// list the objects into the function supplied
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, fn addEntryFn) error {
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, func(remote string, object *swift.Object, isDirectory bool) (err error) {
if isDirectory {
remote = strings.TrimRight(remote, "/")
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
err = fn(d)
} else {
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
var o fs.Object
o, err = f.newObjectWithInfo(remote, object)
if err != nil {
return err
}
if o.Storable() {
err = fn(o)
}
}
return err
})
if err == swift.ContainerNotFound {
err = fs.ErrorDirNotFound
}
return err
}
// listDir lists a single directory
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if container == "" {
return nil, fs.ErrorListBucketRequired
}
// List the objects
err = f.list(container, directory, prefix, addContainer, false, func(entry fs.DirEntry) error {
entries = append(entries, entry)
return nil
})
if err != nil {
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return entries, nil
}
// listContainers lists the containers
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
}
for _, container := range containers {
f.cache.MarkOK(container.Name)
d := fs.NewDir(enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
}
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(container, directory, prefix, addContainer, true, func(entry fs.DirEntry) error {
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
return list.Flush()
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var containers []swift.Container
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
}
var total, objects int64
for _, c := range containers {
total += c.Bytes
objects += c.Count
}
usage := &fs.Usage{
Used: fs.NewUsageValue(total), // bytes in use
Objects: fs.NewUsageValue(objects), // objects in use
}
return usage, nil
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
headers: swift.Headers{}, // Empty object headers to stop readMetaData being called
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
return f.makeContainer(ctx, container)
}
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// Check to see if container exists first
var err error = swift.ContainerNotFound
if !f.noCheckContainer {
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = f.c.Container(container)
return shouldRetryHeaders(rxHeaders, err)
})
}
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if f.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = f.opt.StoragePolicy
}
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(container, headers)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q created", container)
}
}
return err
}, nil)
}
// Rmdir deletes the container if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" || directory != "" {
return nil
}
err := f.cache.Remove(container, func() error {
err := f.pacer.Call(func() (bool, error) {
err := f.c.ContainerDelete(container)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q removed", container)
}
return err
})
return err
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Purge deletes all the files and directories
//
// Implemented here so we can make sure we delete directory markers
func (f *Fs) Purge(ctx context.Context) error {
// Delete all the files including the directory markers
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
}()
err := f.list(f.rootContainer, f.rootDirectory, f.rootDirectory, f.rootContainer == "", true, func(entry fs.DirEntry) error {
if o, ok := entry.(*Object); ok {
toBeDeleted <- o
}
return nil
})
close(toBeDeleted)
delError := <-delErr
if err == nil {
err = delError
}
if err != nil {
return err
}
return f.Rmdir(ctx, "")
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote)
err := f.makeContainer(ctx, dstContainer)
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcContainer, srcPath := srcObj.split()
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return "", err
}
isStaticLargeObject, err := o.isStaticLargeObject()
if err != nil {
return "", err
}
if isDynamicLargeObject || isStaticLargeObject {
fs.Debugf(o, "Returning empty Md5sum for swift large object")
return "", nil
}
return strings.ToLower(o.md5), nil
}
// hasHeader checks for the header passed in returning false if the
// object isn't found.
func (o *Object) hasHeader(header string) (bool, error) {
err := o.readMetaData()
if err != nil {
if err == fs.ErrorObjectNotFound {
return false, nil
}
return false, err
}
_, isDynamicLargeObject := o.headers[header]
return isDynamicLargeObject, nil
}
// isDynamicLargeObject checks for X-Object-Manifest header
func (o *Object) isDynamicLargeObject() (bool, error) {
return o.hasHeader("X-Object-Manifest")
}
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
func (o *Object) isStaticLargeObject() (bool, error) {
return o.hasHeader("X-Static-Large-Object")
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// decodeMetaData sets the metadata in the object from a swift.Object
//
// Sets
// o.lastModified
// o.size
// o.md5
// o.contentType
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
o.lastModified = info.LastModified
o.size = info.Bytes
o.md5 = info.Hash
o.contentType = info.ContentType
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
//
// it returns fs.ErrorObjectNotFound if the object isn't found
func (o *Object) readMetaData() (err error) {
if o.headers != nil {
return nil
}
var info swift.Object
var h swift.Headers
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(container, containerPath)
return shouldRetryHeaders(h, err)
})
if err != nil {
if err == swift.ObjectNotFound {
return fs.ErrorObjectNotFound
}
return err
}
o.headers = h
err = o.decodeMetaData(&info)
if err != nil {
return err
}
return nil
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime {
return o.lastModified
}
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %s", err)
return o.lastModified
}
modTime, err := o.headers.ObjectMetadata().GetModTime()
if err != nil {
// fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData()
if err != nil {
return err
}
meta := o.headers.ObjectMetadata()
meta.SetModTime(modTime)
newHeaders := meta.ObjectHeaders()
for k, v := range newHeaders {
o.headers[k] = v
}
// Include any other metadata from request
for k, v := range o.headers {
if strings.HasPrefix(k, "X-Object-") {
newHeaders[k] = v
}
}
container, containerPath := o.split()
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
return shouldRetry(err)
})
}
// Storable returns if this object is storable
//
// It compares the Content-Type to directoryMarkerContentType - that
// makes it a directory marker which is not storable.
func (o *Object) Storable() bool {
return o.contentType != directoryMarkerContentType
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
headers := fs.OpenOptionHeaders(options)
_, isRanging := headers["Range"]
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
return shouldRetryHeaders(rxHeaders, err)
})
return
}
// min returns the smallest of x, y
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
// removeSegments removes any old segments from o
//
// if except is passed in then segments with that prefix won't be deleted
func (o *Object) removeSegments(except string) error {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
err := o.fs.listContainerRoot(segmentsContainer, containerPath, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
if isDirectory {
return nil
}
if except != "" && strings.HasPrefix(remote, except) {
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, segmentsContainer)
return nil
}
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
var err error
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(segmentsContainer, remote)
return shouldRetry(err)
})
})
if err != nil {
return err
}
// remove the segments container if empty, ignore errors
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerDelete(segmentsContainer)
return shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Removed empty container %q", segmentsContainer)
}
return nil
}
// urlEncode encodes a string so that it is a valid URL
//
// We don't use any of Go's standard methods as we need `/` not
// encoded but we need '&' encoded.
func urlEncode(str string) string {
var buf bytes.Buffer
for i := 0; i < len(str); i++ {
c := str[i]
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' {
_ = buf.WriteByte(c)
} else {
_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
}
}
return buf.String()
}
// updateChunks updates the existing object using chunks to a separate
// container. It returns a string which prefixes current segments.
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
// Create the segmentsContainer if it doesn't exist
var err error
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
return shouldRetryHeaders(rxHeaders, err)
})
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if o.fs.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
}
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
return shouldRetry(err)
})
}
if err != nil {
return "", err
}
// Upload the chunks
left := size
i := 0
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
segmentsPath := path.Join(containerPath, uniquePrefix)
in := bufio.NewReader(in0)
segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
for {
// can we read at least one byte?
if _, err := in.Peek(1); err != nil {
if left > 0 {
return "", err // read less than expected
}
fs.Debugf(o, "Uploading segments into %q seems done (%v)", segmentsContainer, err)
break
}
n := int64(o.fs.opt.ChunkSize)
if size != -1 {
n = min(left, n)
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
left -= n
}
segmentReader := io.LimitReader(in, n)
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
if err == nil {
segmentInfos = append(segmentInfos, segmentPath)
}
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
segmentInfos = nil
return "", err
}
i++
}
// Upload the manifest
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", segmentsContainer, segmentsPath))
headers["Content-Length"] = "0" // set Content-Length as we know it
emptyReader := bytes.NewReader(nil)
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
segmentInfos = nil
}
return uniquePrefix + "/", err
}
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
if segmentInfos != nil && len(segmentInfos) > 0 {
for _, v := range segmentInfos {
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
e := o.fs.c.ObjectDelete(segmentsContainer, v)
if e != nil {
fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, segmentsContainer, e)
}
}
}
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
container, containerPath := o.split()
if container == "" {
return fserrors.FatalError(errors.New("can't upload files to the root"))
}
err := o.fs.makeContainer(ctx, container)
if err != nil {
return err
}
size := src.Size()
modTime := src.ModTime(ctx)
// Note whether this is a dynamic large object before starting
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// Set the mtime
m := swift.Metadata{}
m.SetModTime(modTime)
contentType := fs.MimeType(ctx, src)
headers := m.ObjectHeaders()
uniquePrefix := ""
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
if err != nil {
return err
}
o.headers = nil // wipe old metadata
} else {
var inCount *readers.CountingReader
if size >= 0 {
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
} else {
// otherwise count the size for later
inCount = readers.NewCountingReader(in)
in = inCount
}
var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return err
}
// set Metadata since ObjectPut checked the hash and length so we know the
// object has been safely uploaded
o.lastModified = modTime
o.size = size
o.md5 = rxHeaders["ETag"]
o.contentType = contentType
o.headers = headers
if inCount != nil {
// update the size if streaming from the reader
o.size = int64(inCount.BytesRead())
}
}
// If file was a dynamic large object then remove old/all segments
if isDynamicLargeObject {
err = o.removeSegments(uniquePrefix)
if err != nil {
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
}
}
// Read the metadata from the newly created object if necessary
return o.readMetaData()
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
container, containerPath := o.split()
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(container, containerPath)
return shouldRetry(err)
})
if err != nil {
return err
}
// ...then segments if required
if isDynamicLargeObject {
err = o.removeSegments("")
if err != nil {
return err
}
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Copier = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)
| 1 | 9,707 | This looks like an unrelated change? What is it for? | rclone-rclone | go |
@@ -0,0 +1,7 @@
+package csv
+
+import "sync/atomic"
+
+func (d *tableDecoder) IsDone() bool {
+ return d.empty || atomic.LoadInt32(&d.used) != 0
+} | 1 | 1 | 11,834 | Wait, what is this doing? Is this a way to create methods that are only accessible from tests? | influxdata-flux | go |
|
@@ -93,6 +93,11 @@ func (s *Service) list(c *gin.Context) {
name := c.Query("name")
ns := c.Query("namespace")
+ canList := utils.CanListChaos(c, ns)
+ if !canList {
+ return
+ }
+
metas, err := s.archive.ListMeta(context.Background(), kind, ns, name, true)
if err != nil {
c.Status(http.StatusInternalServerError) | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package archive
import (
"context"
"fmt"
"net/http"
"time"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/pkg/apiserver/utils"
"github.com/chaos-mesh/chaos-mesh/pkg/core"
)
// Service defines a handler service for archive experiments.
type Service struct {
archive core.ExperimentStore
event core.EventStore
}
// NewService returns an archive experiment service instance.
func NewService(
archive core.ExperimentStore,
event core.EventStore,
) *Service {
return &Service{
archive: archive,
event: event,
}
}
// Register mounts our HTTP handler on the mux.
func Register(r *gin.RouterGroup, s *Service) {
endpoint := r.Group("/archives")
endpoint.GET("", s.list)
endpoint.GET("/detail", s.detail)
endpoint.GET("/report", s.report)
}
// Archive defines the basic information of an archive.
type Archive struct {
UID string `json:"uid"`
Kind string `json:"kind"`
Namespace string `json:"namespace"`
Name string `json:"name"`
Action string `json:"action"`
StartTime time.Time `json:"start_time"`
FinishTime time.Time `json:"finish_time"`
}
// Detail represents an archive instance.
type Detail struct {
Archive
YAML core.ExperimentYAMLDescription `json:"yaml"`
}
// Report defines the report of archive experiments.
type Report struct {
Meta *Archive `json:"meta"`
Events []*core.Event `json:"events"`
TotalTime string `json:"total_time"`
TotalFaultTime string `json:"total_fault_time"`
}
// @Summary Get archived chaos experiments.
// @Description Get archived chaos experiments.
// @Tags archives
// @Produce json
// @Param namespace query string false "namespace"
// @Param name query string false "name"
// @Param kind query string false "kind" Enums(PodChaos, IoChaos, NetworkChaos, TimeChaos, KernelChaos, StressChaos)
// @Success 200 {array} Archive
// @Router /archives [get]
// @Failure 500 {object} utils.APIError
func (s *Service) list(c *gin.Context) {
kind := c.Query("kind")
name := c.Query("name")
ns := c.Query("namespace")
metas, err := s.archive.ListMeta(context.Background(), kind, ns, name, true)
if err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
return
}
archives := make([]Archive, 0)
for _, meta := range metas {
archives = append(archives, Archive{
UID: meta.UID,
Kind: meta.Kind,
Namespace: meta.Namespace,
Name: meta.Name,
Action: meta.Action,
StartTime: meta.StartTime,
FinishTime: meta.FinishTime,
})
}
c.JSON(http.StatusOK, archives)
}
// @Summary Get the detail of an archived chaos experiment.
// @Description Get the detail of an archived chaos experiment.
// @Tags archives
// @Produce json
// @Param uid query string true "uid"
// @Success 200 {object} Detail
// @Router /archives/detail [get]
// @Failure 500 {object} utils.APIError
func (s *Service) detail(c *gin.Context) {
var (
err error
yaml core.ExperimentYAMLDescription
detail Detail
)
uid := c.Query("uid")
if uid == "" {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.New("uid cannot be empty"))
return
}
exp, err := s.archive.FindByUID(context.Background(), uid)
if err != nil {
if !gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the archive is not found"))
}
return
}
switch exp.Kind {
case v1alpha1.KindPodChaos:
yaml, err = exp.ParsePodChaos()
case v1alpha1.KindIoChaos:
yaml, err = exp.ParseIOChaos()
case v1alpha1.KindNetworkChaos:
yaml, err = exp.ParseNetworkChaos()
case v1alpha1.KindTimeChaos:
yaml, err = exp.ParseTimeChaos()
case v1alpha1.KindKernelChaos:
yaml, err = exp.ParseKernelChaos()
case v1alpha1.KindStressChaos:
yaml, err = exp.ParseStressChaos()
default:
err = fmt.Errorf("kind %s is not support", exp.Kind)
}
if err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
detail = Detail{
Archive: Archive{
UID: exp.UID,
Kind: exp.Kind,
Name: exp.Name,
Namespace: exp.Namespace,
Action: exp.Action,
StartTime: exp.StartTime,
FinishTime: exp.FinishTime,
},
YAML: yaml,
}
c.JSON(http.StatusOK, detail)
}
// @Summary Get the report of an archived chaos experiment.
// @Description Get the report of an archived chaos experiment.
// @Tags archives
// @Produce json
// @Param uid query string true "uid"
// @Success 200 {array} Report
// @Router /archives/report [get]
// @Failure 500 {object} utils.APIError
func (s *Service) report(c *gin.Context) {
var (
err error
report Report
)
uid := c.Query("uid")
if uid == "" {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.New("uid cannot be empty"))
return
}
meta, err := s.archive.FindMetaByUID(context.Background(), uid)
if err != nil {
if !gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the archive is not found"))
}
return
}
report.Meta = &Archive{
UID: meta.UID,
Kind: meta.Kind,
Namespace: meta.Namespace,
Name: meta.Name,
Action: meta.Action,
StartTime: meta.StartTime,
FinishTime: meta.FinishTime,
}
report.Events, err = s.event.ListByUID(context.TODO(), uid)
if err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
return
}
report.TotalTime = report.Meta.FinishTime.Sub(report.Meta.StartTime).String()
timeNow := time.Now()
timeAfter := timeNow
for _, et := range report.Events {
timeAfter = timeAfter.Add(et.FinishTime.Sub(*et.StartTime))
}
report.TotalFaultTime = timeAfter.Sub(timeNow).String()
c.JSON(http.StatusOK, report)
}
| 1 | 18,706 | Is it more reasonable to return some errors here, such as returning error codes (403 and so on?) ? | chaos-mesh-chaos-mesh | go |
@@ -68,7 +68,11 @@ export function createVNode(type, props, key, ref) {
_parent: null,
_depth: 0,
_dom: null,
- _lastDomChild: null,
+ // _lastDomChildSibling must be initialized to undefined b/c it will eventually
+ // be set to dom.nextSibling which can return `null` and it is important
+ // to be able to distinguish between an uninitialized _lastDomChildSibling and
+ // a _lastDomChildSibling that has been set to `null`
+ _lastDomChildSibling: undefined,
_component: null,
constructor: undefined
}; | 1 | import options from './options';
/**
* Create an virtual node (used for JSX)
* @param {import('./internal').VNode["type"]} type The node name or Component
* constructor for this virtual node
* @param {object | null | undefined} [props] The properties of the virtual node
* @param {Array<import('.').ComponentChildren>} [children] The children of the virtual node
* @returns {import('./internal').VNode}
*/
export function createElement(type, props, children) {
let normalizedProps = {},
i;
for (i in props) {
if (i !== 'key' && i !== 'ref') normalizedProps[i] = props[i];
}
if (arguments.length > 3) {
children = [children];
// https://github.com/preactjs/preact/issues/1916
for (i = 3; i < arguments.length; i++) {
children.push(arguments[i]);
}
}
if (children != null) {
normalizedProps.children = children;
}
// If a Component VNode, check for and apply defaultProps
// Note: type may be undefined in development, must never error here.
if (typeof type === 'function' && type.defaultProps != null) {
for (i in type.defaultProps) {
if (normalizedProps[i] === undefined) {
normalizedProps[i] = type.defaultProps[i];
}
}
}
return createVNode(
type,
normalizedProps,
props && props.key,
props && props.ref
);
}
/**
* Create a VNode (used internally by Preact)
* @param {import('./internal').VNode["type"]} type The node name or Component
* Constructor for this virtual node
* @param {object | string | number | null} props The properties of this virtual node.
* If this virtual node represents a text node, this is the text of the node (string or number).
* @param {string | number | null} key The key for this virtual node, used when
* diffing it against its children
* @param {import('./internal').VNode["ref"]} ref The ref property that will
* receive a reference to its created child
* @returns {import('./internal').VNode}
*/
export function createVNode(type, props, key, ref) {
// V8 seems to be better at detecting type shapes if the object is allocated from the same call site
// Do not inline into createElement and coerceToVNode!
const vnode = {
type,
props,
key,
ref,
_children: null,
_parent: null,
_depth: 0,
_dom: null,
_lastDomChild: null,
_component: null,
constructor: undefined
};
if (options.vnode) options.vnode(vnode);
return vnode;
}
export function createRef() {
return {};
}
export function Fragment(props) {
return props.children;
}
/**
* Check if a the argument is a valid Preact VNode.
* @param {*} vnode
* @returns {vnode is import('./internal').VNode}
*/
export const isValidElement = vnode =>
vnode != null && vnode.constructor === undefined;
| 1 | 15,132 | Thinking out loud... would `_nextDom` be a better name for this? | preactjs-preact | js |
@@ -165,11 +165,6 @@ class TestSuperfluousParentheses(CheckerTestCase):
(Message("superfluous-parens", line=1, args="if"), "if (foo):", 0),
(Message("superfluous-parens", line=1, args="if"), "if ((foo, bar)):", 0),
(Message("superfluous-parens", line=1, args="if"), "if (foo(bar)):", 0),
- (
- Message("superfluous-parens", line=1, args="return"),
- "return ((x for x in x))",
- 0,
- ),
(Message("superfluous-parens", line=1, args="not"), "not (foo)", 0),
(Message("superfluous-parens", line=1, args="not"), "if not (foo):", 1),
(Message("superfluous-parens", line=1, args="if"), "if (not (foo)):", 0), | 1 | # Copyright (c) 2009-2011, 2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2014-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 buck <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Harut <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016 Petr Pulc <[email protected]>
# Copyright (c) 2016 Derek Gustafson <[email protected]>
# Copyright (c) 2017 Krzysztof Czapla <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 James M. Allen <[email protected]>
# Copyright (c) 2017 vinnyrose <[email protected]>
# Copyright (c) 2018, 2020 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018, 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2019 Ashley Whetter <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Andreas Finkler <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Check format checker helper functions"""
import os
import tempfile
import tokenize
import astroid
from pylint import lint, reporters
from pylint.checkers.format import FormatChecker
from pylint.testutils import CheckerTestCase, Message, _tokenize_str
class TestMultiStatementLine(CheckerTestCase):
CHECKER_CLASS = FormatChecker
def testSingleLineIfStmts(self):
stmt = astroid.extract_node(
"""
if True: pass #@
"""
)
self.checker.config.single_line_if_stmt = False
with self.assertAddsMessages(Message("multiple-statements", node=stmt.body[0])):
self.visitFirst(stmt)
self.checker.config.single_line_if_stmt = True
with self.assertNoMessages():
self.visitFirst(stmt)
stmt = astroid.extract_node(
"""
if True: pass #@
else:
pass
"""
)
with self.assertAddsMessages(Message("multiple-statements", node=stmt.body[0])):
self.visitFirst(stmt)
def testSingleLineClassStmts(self):
stmt = astroid.extract_node(
"""
class MyError(Exception): pass #@
"""
)
self.checker.config.single_line_class_stmt = False
with self.assertAddsMessages(Message("multiple-statements", node=stmt.body[0])):
self.visitFirst(stmt)
self.checker.config.single_line_class_stmt = True
with self.assertNoMessages():
self.visitFirst(stmt)
stmt = astroid.extract_node(
"""
class MyError(Exception): a='a' #@
"""
)
self.checker.config.single_line_class_stmt = False
with self.assertAddsMessages(Message("multiple-statements", node=stmt.body[0])):
self.visitFirst(stmt)
self.checker.config.single_line_class_stmt = True
with self.assertNoMessages():
self.visitFirst(stmt)
stmt = astroid.extract_node(
"""
class MyError(Exception): a='a'; b='b' #@
"""
)
self.checker.config.single_line_class_stmt = False
with self.assertAddsMessages(Message("multiple-statements", node=stmt.body[0])):
self.visitFirst(stmt)
self.checker.config.single_line_class_stmt = True
with self.assertAddsMessages(Message("multiple-statements", node=stmt.body[0])):
self.visitFirst(stmt)
def testTryExceptFinallyNoMultipleStatement(self):
tree = astroid.extract_node(
"""
try: #@
pass
except:
pass
finally:
pass"""
)
with self.assertNoMessages():
self.visitFirst(tree)
def visitFirst(self, tree):
self.checker.process_tokens([])
self.checker.visit_default(tree.body[0])
def test_ellipsis_is_ignored(self):
code = """
from typing import overload
@overload
def concat2(arg1: str) -> str: ...
"""
tree = astroid.extract_node(code)
with self.assertNoMessages():
self.visitFirst(tree)
code = """
def concat2(arg1: str) -> str: ...
"""
stmt = astroid.extract_node(code)
with self.assertAddsMessages(Message("multiple-statements", node=stmt.body[0])):
self.visitFirst(stmt)
class TestSuperfluousParentheses(CheckerTestCase):
CHECKER_CLASS = FormatChecker
def testCheckKeywordParensHandlesValidCases(self):
cases = [
"if foo:",
"if foo():",
"if (x and y) or z:",
"assert foo()",
"assert ()",
"if (1, 2) in (3, 4):",
"if (a or b) in c:",
"return (x for x in x)",
"if (x for x in x):",
"for x in (x for x in x):",
"not (foo or bar)",
"not (foo or bar) and baz",
"return [x for x in (3 if 1 else [4])]",
"return (x for x in ((3, 4) if 2 > 1 else (5, 6)))",
]
with self.assertNoMessages():
for code in cases:
self.checker._check_keyword_parentheses(_tokenize_str(code), 0)
def testCheckKeywordParensHandlesUnnecessaryParens(self):
cases = [
(Message("superfluous-parens", line=1, args="if"), "if (foo):", 0),
(Message("superfluous-parens", line=1, args="if"), "if ((foo, bar)):", 0),
(Message("superfluous-parens", line=1, args="if"), "if (foo(bar)):", 0),
(
Message("superfluous-parens", line=1, args="return"),
"return ((x for x in x))",
0,
),
(Message("superfluous-parens", line=1, args="not"), "not (foo)", 0),
(Message("superfluous-parens", line=1, args="not"), "if not (foo):", 1),
(Message("superfluous-parens", line=1, args="if"), "if (not (foo)):", 0),
(Message("superfluous-parens", line=1, args="not"), "if (not (foo)):", 2),
(
Message("superfluous-parens", line=1, args="for"),
"for (x) in (1, 2, 3):",
0,
),
(
Message("superfluous-parens", line=1, args="if"),
"if (1) in (1, 2, 3):",
0,
),
]
for msg, code, offset in cases:
with self.assertAddsMessages(msg):
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
def testNoSuperfluousParensWalrusOperatorIf(self):
"""Parenthesis change the meaning of assignment in the walrus operator
and so are not always superfluous:"""
cases = [
("if (odd := is_odd(i))\n"),
("not (foo := 5)\n"),
]
for code in cases:
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str(code))
def testPositiveSuperfluousParensWalrusOperatorIf(self):
"""Test positive superfluous parens cases with the walrus operator"""
cases = [
(Message("superfluous-parens", line=1, args="if"), "if ((x := y)):\n"),
(Message("superfluous-parens", line=1, args="not"), "if not ((x := y)):\n"),
]
for msg, code in cases:
with self.assertAddsMessages(msg):
self.checker.process_tokens(_tokenize_str(code))
def testCheckIfArgsAreNotUnicode(self):
cases = [("if (foo):", 0), ("assert (1 == 1)", 0)]
for code, offset in cases:
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
got = self.linter.release_messages()
assert isinstance(got[-1].args, str)
def testFuturePrintStatementWithoutParensWarning(self):
code = """from __future__ import print_function
print('Hello world!')
"""
tree = astroid.parse(code)
with self.assertNoMessages():
self.checker.process_module(tree)
self.checker.process_tokens(_tokenize_str(code))
def testKeywordParensFalsePositive(self):
code = "if 'bar' in (DICT or {}):"
with self.assertNoMessages():
self.checker._check_keyword_parentheses(_tokenize_str(code), start=2)
class TestCheckSpace(CheckerTestCase):
CHECKER_CLASS = FormatChecker
def test_encoding_token(self):
"""Make sure the encoding token doesn't change the checker's behavior
_tokenize_str doesn't produce an encoding token, but
reading a file does
"""
with self.assertNoMessages():
encoding_token = tokenize.TokenInfo(
tokenize.ENCODING, "utf-8", (0, 0), (0, 0), ""
)
tokens = [encoding_token] + _tokenize_str(
"if (\n None):\n pass\n"
)
self.checker.process_tokens(tokens)
def test_disable_global_option_end_of_line():
"""
Test for issue with disabling tokenizer messages
that extend beyond the scope of the ast tokens
"""
file_ = tempfile.NamedTemporaryFile( # pylint: disable=consider-using-with
"w", delete=False
)
with file_:
file_.write(
"""
mylist = [
None
]
"""
)
try:
linter = lint.PyLinter()
checker = FormatChecker(linter)
linter.register_checker(checker)
args = linter.load_command_line_configuration(
[file_.name, "-d", "bad-continuation"]
)
myreporter = reporters.CollectingReporter()
linter.set_reporter(myreporter)
linter.check(args)
assert not myreporter.messages
finally:
os.remove(file_.name)
| 1 | 15,503 | Turns out the unittests also had a false positive. | PyCQA-pylint | py |
@@ -24,12 +24,12 @@ namespace OpenTelemetry.Metrics
public abstract class Meter
{
/// <summary>
- /// Creates a counter for long with given name.
+ /// Creates a counter for Int64 with given name.
/// </summary>
/// <param name="name">The name of the counter.</param>
/// <param name="monotonic">indicates if only positive values are expected.</param>
/// <returns>The counter instance.</returns>
- public abstract Counter<long> CreateLongCounter(string name, bool monotonic = true);
+ public Counter<long> CreateInt64Counter(string name, bool monotonic = true) => this.CreateCounter<long>(name, monotonic);
/// <summary>
/// Creates a counter for double with given name. | 1 | // <copyright file="Meter.cs" company="OpenTelemetry Authors">
// Copyright 2018, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System.Collections.Generic;
namespace OpenTelemetry.Metrics
{
/// <summary>
/// Main interface to obtain metric instruments.
/// </summary>
public abstract class Meter
{
/// <summary>
/// Creates a counter for long with given name.
/// </summary>
/// <param name="name">The name of the counter.</param>
/// <param name="monotonic">indicates if only positive values are expected.</param>
/// <returns>The counter instance.</returns>
public abstract Counter<long> CreateLongCounter(string name, bool monotonic = true);
/// <summary>
/// Creates a counter for double with given name.
/// </summary>
/// <param name="name">indicates if only positive values are expected.</param>
/// <param name="monotonic">The name of the counter.</param>
/// <returns>The counter instance.</returns>
public abstract Counter<double> CreateDoubleCounter(string name, bool monotonic = true);
/// <summary>
/// Creates a Gauge for long with given name.
/// </summary>
/// <param name="name">The name of the counter.</param>
/// <param name="monotonic">indicates if only positive values are expected.</param>
/// <returns>The Gauge instance.</returns>
public abstract Gauge<long> CreateLongGauge(string name, bool monotonic = false);
/// <summary>
/// Creates a Gauge for long with given name.
/// </summary>
/// <param name="name">The name of the counter.</param>
/// <param name="monotonic">indicates if only positive values are expected.</param>
/// <returns>The Gauge instance.</returns>
public abstract Gauge<double> CreateDoubleGauge(string name, bool monotonic = false);
/// <summary>
/// Creates a measure for long with given name.
/// </summary>
/// <param name="name">The name of the measure.</param>
/// <param name="absolute">indicates if only positive values are expected.</param>
/// <returns>The measure instance.</returns>
public abstract Measure<long> CreateLongMeasure(string name, bool absolute = true);
/// <summary>
/// Creates a measure for long with given name.
/// </summary>
/// <param name="name">The name of the measure.</param>
/// <param name="absolute">indicates if only positive values are expected.</param>
/// <returns>The measure instance.</returns>
public abstract Measure<double> CreateDoubleMeasure(string name, bool absolute = true);
/// <summary>
/// Constructs or retrieves the <see cref="LabelSet"/> from the given label key-value pairs.
/// </summary>
/// <param name="labels">Label key value pairs.</param>
/// <returns>The <see cref="LabelSet"/> with given label key value pairs.</returns>
public abstract LabelSet GetLabelSet(IEnumerable<KeyValuePair<string, string>> labels);
}
}
| 1 | 12,613 | I think these method names would be clearer if written as "Create a {type} counter|gauge|measure with given name". eg "Create a int64 counter with given name" "Create a double gauge with given name" | open-telemetry-opentelemetry-dotnet | .cs |
@@ -3530,7 +3530,7 @@ int LuaScriptInterface::luaDoChallengeCreature(lua_State* L)
int LuaScriptInterface::luaIsValidUID(lua_State* L)
{
//isValidUID(uid)
- pushBoolean(L, getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1)) != nullptr);
+ pushBoolean(L, getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1)));
return 1;
}
| 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include <boost/range/adaptor/reversed.hpp>
#include <fmt/format.h>
#include "luascript.h"
#include "chat.h"
#include "player.h"
#include "game.h"
#include "protocolstatus.h"
#include "spells.h"
#include "iologindata.h"
#include "iomapserialize.h"
#include "configmanager.h"
#include "teleport.h"
#include "databasemanager.h"
#include "bed.h"
#include "monster.h"
#include "scheduler.h"
#include "databasetasks.h"
#include "events.h"
#include "movement.h"
#include "globalevent.h"
#include "script.h"
#include "weapons.h"
extern Chat* g_chat;
extern Game g_game;
extern Monsters g_monsters;
extern ConfigManager g_config;
extern Vocations g_vocations;
extern Spells* g_spells;
extern Events* g_events;
extern Actions* g_actions;
extern TalkActions* g_talkActions;
extern CreatureEvents* g_creatureEvents;
extern MoveEvents* g_moveEvents;
extern GlobalEvents* g_globalEvents;
extern Scripts* g_scripts;
extern Weapons* g_weapons;
ScriptEnvironment::DBResultMap ScriptEnvironment::tempResults;
uint32_t ScriptEnvironment::lastResultId = 0;
std::multimap<ScriptEnvironment*, Item*> ScriptEnvironment::tempItems;
LuaEnvironment g_luaEnvironment;
ScriptEnvironment::ScriptEnvironment()
{
resetEnv();
}
ScriptEnvironment::~ScriptEnvironment()
{
resetEnv();
}
void ScriptEnvironment::resetEnv()
{
scriptId = 0;
callbackId = 0;
timerEvent = false;
interface = nullptr;
localMap.clear();
tempResults.clear();
auto pair = tempItems.equal_range(this);
auto it = pair.first;
while (it != pair.second) {
Item* item = it->second;
if (item->getParent() == VirtualCylinder::virtualCylinder) {
g_game.ReleaseItem(item);
}
it = tempItems.erase(it);
}
}
bool ScriptEnvironment::setCallbackId(int32_t callbackId, LuaScriptInterface* scriptInterface)
{
if (this->callbackId != 0) {
//nested callbacks are not allowed
if (interface) {
reportErrorFunc(interface->getLuaState(), "Nested callbacks!");
}
return false;
}
this->callbackId = callbackId;
interface = scriptInterface;
return true;
}
void ScriptEnvironment::getEventInfo(int32_t& scriptId, LuaScriptInterface*& scriptInterface, int32_t& callbackId, bool& timerEvent) const
{
scriptId = this->scriptId;
scriptInterface = interface;
callbackId = this->callbackId;
timerEvent = this->timerEvent;
}
uint32_t ScriptEnvironment::addThing(Thing* thing)
{
if (!thing || thing->isRemoved()) {
return 0;
}
Creature* creature = thing->getCreature();
if (creature) {
return creature->getID();
}
Item* item = thing->getItem();
if (item && item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
return item->getUniqueId();
}
for (const auto& it : localMap) {
if (it.second == item) {
return it.first;
}
}
localMap[++lastUID] = item;
return lastUID;
}
void ScriptEnvironment::insertItem(uint32_t uid, Item* item)
{
auto result = localMap.emplace(uid, item);
if (!result.second) {
std::cout << std::endl << "Lua Script Error: Thing uid already taken.";
}
}
Thing* ScriptEnvironment::getThingByUID(uint32_t uid)
{
if (uid >= 0x10000000) {
return g_game.getCreatureByID(uid);
}
if (uid <= std::numeric_limits<uint16_t>::max()) {
Item* item = g_game.getUniqueItem(uid);
if (item && !item->isRemoved()) {
return item;
}
return nullptr;
}
auto it = localMap.find(uid);
if (it != localMap.end()) {
Item* item = it->second;
if (!item->isRemoved()) {
return item;
}
}
return nullptr;
}
Item* ScriptEnvironment::getItemByUID(uint32_t uid)
{
Thing* thing = getThingByUID(uid);
if (!thing) {
return nullptr;
}
return thing->getItem();
}
Container* ScriptEnvironment::getContainerByUID(uint32_t uid)
{
Item* item = getItemByUID(uid);
if (!item) {
return nullptr;
}
return item->getContainer();
}
void ScriptEnvironment::removeItemByUID(uint32_t uid)
{
if (uid <= std::numeric_limits<uint16_t>::max()) {
g_game.removeUniqueItem(uid);
return;
}
auto it = localMap.find(uid);
if (it != localMap.end()) {
localMap.erase(it);
}
}
void ScriptEnvironment::addTempItem(Item* item)
{
tempItems.emplace(this, item);
}
void ScriptEnvironment::removeTempItem(Item* item)
{
for (auto it = tempItems.begin(), end = tempItems.end(); it != end; ++it) {
if (it->second == item) {
tempItems.erase(it);
break;
}
}
}
uint32_t ScriptEnvironment::addResult(DBResult_ptr res)
{
tempResults[++lastResultId] = res;
return lastResultId;
}
bool ScriptEnvironment::removeResult(uint32_t id)
{
auto it = tempResults.find(id);
if (it == tempResults.end()) {
return false;
}
tempResults.erase(it);
return true;
}
DBResult_ptr ScriptEnvironment::getResultByID(uint32_t id)
{
auto it = tempResults.find(id);
if (it == tempResults.end()) {
return nullptr;
}
return it->second;
}
std::string LuaScriptInterface::getErrorDesc(ErrorCode_t code)
{
switch (code) {
case LUA_ERROR_PLAYER_NOT_FOUND: return "Player not found";
case LUA_ERROR_CREATURE_NOT_FOUND: return "Creature not found";
case LUA_ERROR_ITEM_NOT_FOUND: return "Item not found";
case LUA_ERROR_THING_NOT_FOUND: return "Thing not found";
case LUA_ERROR_TILE_NOT_FOUND: return "Tile not found";
case LUA_ERROR_HOUSE_NOT_FOUND: return "House not found";
case LUA_ERROR_COMBAT_NOT_FOUND: return "Combat not found";
case LUA_ERROR_CONDITION_NOT_FOUND: return "Condition not found";
case LUA_ERROR_AREA_NOT_FOUND: return "Area not found";
case LUA_ERROR_CONTAINER_NOT_FOUND: return "Container not found";
case LUA_ERROR_VARIANT_NOT_FOUND: return "Variant not found";
case LUA_ERROR_VARIANT_UNKNOWN: return "Unknown variant type";
case LUA_ERROR_SPELL_NOT_FOUND: return "Spell not found";
default: return "Bad error code";
}
}
ScriptEnvironment LuaScriptInterface::scriptEnv[16];
int32_t LuaScriptInterface::scriptEnvIndex = -1;
LuaScriptInterface::LuaScriptInterface(std::string interfaceName) : interfaceName(std::move(interfaceName))
{
if (!g_luaEnvironment.getLuaState()) {
g_luaEnvironment.initState();
}
}
LuaScriptInterface::~LuaScriptInterface()
{
closeState();
}
bool LuaScriptInterface::reInitState()
{
g_luaEnvironment.clearCombatObjects(this);
g_luaEnvironment.clearAreaObjects(this);
closeState();
return initState();
}
/// Same as lua_pcall, but adds stack trace to error strings in called function.
int LuaScriptInterface::protectedCall(lua_State* L, int nargs, int nresults)
{
int error_index = lua_gettop(L) - nargs;
lua_pushcfunction(L, luaErrorHandler);
lua_insert(L, error_index);
int ret = lua_pcall(L, nargs, nresults, error_index);
lua_remove(L, error_index);
return ret;
}
int32_t LuaScriptInterface::loadFile(const std::string& file, Npc* npc /* = nullptr*/)
{
//loads file as a chunk at stack top
int ret = luaL_loadfile(luaState, file.c_str());
if (ret != 0) {
lastLuaError = popString(luaState);
return -1;
}
//check that it is loaded as a function
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
loadingFile = file;
if (!reserveScriptEnv()) {
lua_pop(luaState, 1);
return -1;
}
ScriptEnvironment* env = getScriptEnv();
env->setScriptId(EVENT_ID_LOADING, this);
env->setNpc(npc);
//execute it
ret = protectedCall(luaState, 0, 0);
if (ret != 0) {
reportError(nullptr, popString(luaState));
resetScriptEnv();
return -1;
}
resetScriptEnv();
return 0;
}
int32_t LuaScriptInterface::getEvent(const std::string& eventName)
{
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//get current event function pointer
lua_getglobal(luaState, eventName.c_str());
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 2);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -1);
lua_rawseti(luaState, -3, runningEventId);
lua_pop(luaState, 2);
//reset global value of this event
lua_pushnil(luaState);
lua_setglobal(luaState, eventName.c_str());
cacheFiles[runningEventId] = loadingFile + ":" + eventName;
return runningEventId++;
}
int32_t LuaScriptInterface::getEvent()
{
//check if function is on the stack
if (!isFunction(luaState, -1)) {
return -1;
}
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -2);
lua_rawseti(luaState, -2, runningEventId);
lua_pop(luaState, 2);
cacheFiles[runningEventId] = loadingFile + ":callback";
return runningEventId++;
}
int32_t LuaScriptInterface::getMetaEvent(const std::string& globalName, const std::string& eventName)
{
//get our events table
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
lua_pop(luaState, 1);
return -1;
}
//get current event function pointer
lua_getglobal(luaState, globalName.c_str());
lua_getfield(luaState, -1, eventName.c_str());
if (!isFunction(luaState, -1)) {
lua_pop(luaState, 3);
return -1;
}
//save in our events table
lua_pushvalue(luaState, -1);
lua_rawseti(luaState, -4, runningEventId);
lua_pop(luaState, 1);
//reset global value of this event
lua_pushnil(luaState);
lua_setfield(luaState, -2, eventName.c_str());
lua_pop(luaState, 2);
cacheFiles[runningEventId] = loadingFile + ":" + globalName + "@" + eventName;
return runningEventId++;
}
const std::string& LuaScriptInterface::getFileById(int32_t scriptId)
{
if (scriptId == EVENT_ID_LOADING) {
return loadingFile;
}
auto it = cacheFiles.find(scriptId);
if (it == cacheFiles.end()) {
static const std::string& unk = "(Unknown scriptfile)";
return unk;
}
return it->second;
}
std::string LuaScriptInterface::getStackTrace(lua_State* L, const std::string& error_desc)
{
lua_getglobal(L, "debug");
if (!isTable(L, -1)) {
lua_pop(L, 1);
return error_desc;
}
lua_getfield(L, -1, "traceback");
if (!isFunction(L, -1)) {
lua_pop(L, 2);
return error_desc;
}
lua_replace(L, -2);
pushString(L, error_desc);
lua_call(L, 1, 1);
return popString(L);
}
void LuaScriptInterface::reportError(const char* function, const std::string& error_desc, lua_State* L /*= nullptr*/, bool stack_trace /*= false*/)
{
int32_t scriptId;
int32_t callbackId;
bool timerEvent;
LuaScriptInterface* scriptInterface;
getScriptEnv()->getEventInfo(scriptId, scriptInterface, callbackId, timerEvent);
std::cout << std::endl << "Lua Script Error: ";
if (scriptInterface) {
std::cout << '[' << scriptInterface->getInterfaceName() << "] " << std::endl;
if (timerEvent) {
std::cout << "in a timer event called from: " << std::endl;
}
if (callbackId) {
std::cout << "in callback: " << scriptInterface->getFileById(callbackId) << std::endl;
}
std::cout << scriptInterface->getFileById(scriptId) << std::endl;
}
if (function) {
std::cout << function << "(). ";
}
if (L && stack_trace) {
std::cout << getStackTrace(L, error_desc) << std::endl;
} else {
std::cout << error_desc << std::endl;
}
}
bool LuaScriptInterface::pushFunction(int32_t functionId)
{
lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef);
if (!isTable(luaState, -1)) {
return false;
}
lua_rawgeti(luaState, -1, functionId);
lua_replace(luaState, -2);
return isFunction(luaState, -1);
}
bool LuaScriptInterface::initState()
{
luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return false;
}
lua_newtable(luaState);
eventTableRef = luaL_ref(luaState, LUA_REGISTRYINDEX);
runningEventId = EVENT_ID_USER;
return true;
}
bool LuaScriptInterface::closeState()
{
if (!g_luaEnvironment.getLuaState() || !luaState) {
return false;
}
cacheFiles.clear();
if (eventTableRef != -1) {
luaL_unref(luaState, LUA_REGISTRYINDEX, eventTableRef);
eventTableRef = -1;
}
luaState = nullptr;
return true;
}
int LuaScriptInterface::luaErrorHandler(lua_State* L)
{
const std::string& errorMessage = popString(L);
pushString(L, LuaScriptInterface::getStackTrace(L, errorMessage));
return 1;
}
bool LuaScriptInterface::callFunction(int params)
{
bool result = false;
int size = lua_gettop(luaState);
if (protectedCall(luaState, params, 1) != 0) {
LuaScriptInterface::reportError(nullptr, LuaScriptInterface::getString(luaState, -1));
} else {
result = LuaScriptInterface::getBoolean(luaState, -1);
}
lua_pop(luaState, 1);
if ((lua_gettop(luaState) + params + 1) != size) {
LuaScriptInterface::reportError(nullptr, "Stack size changed!");
}
resetScriptEnv();
return result;
}
void LuaScriptInterface::callVoidFunction(int params)
{
int size = lua_gettop(luaState);
if (protectedCall(luaState, params, 0) != 0) {
LuaScriptInterface::reportError(nullptr, LuaScriptInterface::popString(luaState));
}
if ((lua_gettop(luaState) + params + 1) != size) {
LuaScriptInterface::reportError(nullptr, "Stack size changed!");
}
resetScriptEnv();
}
void LuaScriptInterface::pushVariant(lua_State* L, const LuaVariant& var)
{
lua_createtable(L, 0, 2);
setField(L, "type", var.type);
switch (var.type) {
case VARIANT_NUMBER:
setField(L, "number", var.number);
break;
case VARIANT_STRING:
setField(L, "string", var.text);
break;
case VARIANT_TARGETPOSITION:
case VARIANT_POSITION: {
pushPosition(L, var.pos);
lua_setfield(L, -2, "pos");
break;
}
default:
break;
}
setMetatable(L, -1, "Variant");
}
void LuaScriptInterface::pushThing(lua_State* L, Thing* thing)
{
if (!thing) {
lua_createtable(L, 0, 4);
setField(L, "uid", 0);
setField(L, "itemid", 0);
setField(L, "actionid", 0);
setField(L, "type", 0);
return;
}
if (Item* item = thing->getItem()) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else if (Creature* creature = thing->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else {
lua_pushnil(L);
}
}
void LuaScriptInterface::pushCylinder(lua_State* L, Cylinder* cylinder)
{
if (Creature* creature = cylinder->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else if (Item* parentItem = cylinder->getItem()) {
pushUserdata<Item>(L, parentItem);
setItemMetatable(L, -1, parentItem);
} else if (Tile* tile = cylinder->getTile()) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else if (cylinder == VirtualCylinder::virtualCylinder) {
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
}
void LuaScriptInterface::pushString(lua_State* L, const std::string& value)
{
lua_pushlstring(L, value.c_str(), value.length());
}
void LuaScriptInterface::pushCallback(lua_State* L, int32_t callback)
{
lua_rawgeti(L, LUA_REGISTRYINDEX, callback);
}
std::string LuaScriptInterface::popString(lua_State* L)
{
if (lua_gettop(L) == 0) {
return std::string();
}
std::string str(getString(L, -1));
lua_pop(L, 1);
return str;
}
int32_t LuaScriptInterface::popCallback(lua_State* L)
{
return luaL_ref(L, LUA_REGISTRYINDEX);
}
// Metatables
void LuaScriptInterface::setMetatable(lua_State* L, int32_t index, const std::string& name)
{
luaL_getmetatable(L, name.c_str());
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setWeakMetatable(lua_State* L, int32_t index, const std::string& name)
{
static std::set<std::string> weakObjectTypes;
const std::string& weakName = name + "_weak";
auto result = weakObjectTypes.emplace(name);
if (result.second) {
luaL_getmetatable(L, name.c_str());
int childMetatable = lua_gettop(L);
luaL_newmetatable(L, weakName.c_str());
int metatable = lua_gettop(L);
static const std::vector<std::string> methodKeys = {"__index", "__metatable", "__eq"};
for (const std::string& metaKey : methodKeys) {
lua_getfield(L, childMetatable, metaKey.c_str());
lua_setfield(L, metatable, metaKey.c_str());
}
static const std::vector<int> methodIndexes = {'h', 'p', 't'};
for (int metaIndex : methodIndexes) {
lua_rawgeti(L, childMetatable, metaIndex);
lua_rawseti(L, metatable, metaIndex);
}
lua_pushnil(L);
lua_setfield(L, metatable, "__gc");
lua_remove(L, childMetatable);
} else {
luaL_getmetatable(L, weakName.c_str());
}
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setItemMetatable(lua_State* L, int32_t index, const Item* item)
{
if (item->getContainer()) {
luaL_getmetatable(L, "Container");
} else if (item->getTeleport()) {
luaL_getmetatable(L, "Teleport");
} else {
luaL_getmetatable(L, "Item");
}
lua_setmetatable(L, index - 1);
}
void LuaScriptInterface::setCreatureMetatable(lua_State* L, int32_t index, const Creature* creature)
{
if (creature->getPlayer()) {
luaL_getmetatable(L, "Player");
} else if (creature->getMonster()) {
luaL_getmetatable(L, "Monster");
} else {
luaL_getmetatable(L, "Npc");
}
lua_setmetatable(L, index - 1);
}
// Get
std::string LuaScriptInterface::getString(lua_State* L, int32_t arg)
{
size_t len;
const char* c_str = lua_tolstring(L, arg, &len);
if (!c_str || len == 0) {
return std::string();
}
return std::string(c_str, len);
}
Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg, int32_t& stackpos)
{
Position position;
position.x = getField<uint16_t>(L, arg, "x");
position.y = getField<uint16_t>(L, arg, "y");
position.z = getField<uint8_t>(L, arg, "z");
lua_getfield(L, arg, "stackpos");
if (lua_isnil(L, -1) == 1) {
stackpos = 0;
} else {
stackpos = getNumber<int32_t>(L, -1);
}
lua_pop(L, 4);
return position;
}
Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg)
{
Position position;
position.x = getField<uint16_t>(L, arg, "x");
position.y = getField<uint16_t>(L, arg, "y");
position.z = getField<uint8_t>(L, arg, "z");
lua_pop(L, 3);
return position;
}
Outfit_t LuaScriptInterface::getOutfit(lua_State* L, int32_t arg)
{
Outfit_t outfit;
outfit.lookMount = getField<uint16_t>(L, arg, "lookMount");
outfit.lookAddons = getField<uint8_t>(L, arg, "lookAddons");
outfit.lookFeet = getField<uint8_t>(L, arg, "lookFeet");
outfit.lookLegs = getField<uint8_t>(L, arg, "lookLegs");
outfit.lookBody = getField<uint8_t>(L, arg, "lookBody");
outfit.lookHead = getField<uint8_t>(L, arg, "lookHead");
outfit.lookTypeEx = getField<uint16_t>(L, arg, "lookTypeEx");
outfit.lookType = getField<uint16_t>(L, arg, "lookType");
lua_pop(L, 8);
return outfit;
}
Outfit LuaScriptInterface::getOutfitClass(lua_State* L, int32_t arg)
{
uint16_t lookType = getField<uint16_t>(L, arg, "lookType");
const std::string& name = getFieldString(L, arg, "name");
bool premium = getField<uint8_t>(L, arg, "premium") == 1;
bool unlocked = getField<uint8_t>(L, arg, "unlocked") == 1;
lua_pop(L, 4);
return Outfit(name, lookType, premium, unlocked);
}
LuaVariant LuaScriptInterface::getVariant(lua_State* L, int32_t arg)
{
LuaVariant var;
switch (var.type = getField<LuaVariantType_t>(L, arg, "type")) {
case VARIANT_NUMBER: {
var.number = getField<uint32_t>(L, arg, "number");
lua_pop(L, 2);
break;
}
case VARIANT_STRING: {
var.text = getFieldString(L, arg, "string");
lua_pop(L, 2);
break;
}
case VARIANT_POSITION:
case VARIANT_TARGETPOSITION: {
lua_getfield(L, arg, "pos");
var.pos = getPosition(L, lua_gettop(L));
lua_pop(L, 2);
break;
}
default: {
var.type = VARIANT_NONE;
lua_pop(L, 1);
break;
}
}
return var;
}
InstantSpell* LuaScriptInterface::getInstantSpell(lua_State* L, int32_t arg)
{
InstantSpell* spell = g_spells->getInstantSpellByName(getFieldString(L, arg, "name"));
lua_pop(L, 1);
return spell;
}
Thing* LuaScriptInterface::getThing(lua_State* L, int32_t arg)
{
Thing* thing;
if (lua_getmetatable(L, arg) != 0) {
lua_rawgeti(L, -1, 't');
switch(getNumber<uint32_t>(L, -1)) {
case LuaData_Item:
thing = getUserdata<Item>(L, arg);
break;
case LuaData_Container:
thing = getUserdata<Container>(L, arg);
break;
case LuaData_Teleport:
thing = getUserdata<Teleport>(L, arg);
break;
case LuaData_Player:
thing = getUserdata<Player>(L, arg);
break;
case LuaData_Monster:
thing = getUserdata<Monster>(L, arg);
break;
case LuaData_Npc:
thing = getUserdata<Npc>(L, arg);
break;
default:
thing = nullptr;
break;
}
lua_pop(L, 2);
} else {
thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, arg));
}
return thing;
}
Creature* LuaScriptInterface::getCreature(lua_State* L, int32_t arg)
{
if (isUserdata(L, arg)) {
return getUserdata<Creature>(L, arg);
}
return g_game.getCreatureByID(getNumber<uint32_t>(L, arg));
}
Player* LuaScriptInterface::getPlayer(lua_State* L, int32_t arg)
{
if (isUserdata(L, arg)) {
return getUserdata<Player>(L, arg);
}
return g_game.getPlayerByID(getNumber<uint32_t>(L, arg));
}
std::string LuaScriptInterface::getFieldString(lua_State* L, int32_t arg, const std::string& key)
{
lua_getfield(L, arg, key.c_str());
return getString(L, -1);
}
LuaDataType LuaScriptInterface::getUserdataType(lua_State* L, int32_t arg)
{
if (lua_getmetatable(L, arg) == 0) {
return LuaData_Unknown;
}
lua_rawgeti(L, -1, 't');
LuaDataType type = getNumber<LuaDataType>(L, -1);
lua_pop(L, 2);
return type;
}
// Push
void LuaScriptInterface::pushBoolean(lua_State* L, bool value)
{
lua_pushboolean(L, value ? 1 : 0);
}
void LuaScriptInterface::pushCombatDamage(lua_State* L, const CombatDamage& damage)
{
lua_pushnumber(L, damage.primary.value);
lua_pushnumber(L, damage.primary.type);
lua_pushnumber(L, damage.secondary.value);
lua_pushnumber(L, damage.secondary.type);
lua_pushnumber(L, damage.origin);
}
void LuaScriptInterface::pushInstantSpell(lua_State* L, const InstantSpell& spell)
{
lua_createtable(L, 0, 7);
setField(L, "name", spell.getName());
setField(L, "words", spell.getWords());
setField(L, "level", spell.getLevel());
setField(L, "mlevel", spell.getMagicLevel());
setField(L, "mana", spell.getMana());
setField(L, "manapercent", spell.getManaPercent());
setField(L, "params", spell.getHasParam());
setMetatable(L, -1, "Spell");
}
void LuaScriptInterface::pushPosition(lua_State* L, const Position& position, int32_t stackpos/* = 0*/)
{
lua_createtable(L, 0, 4);
setField(L, "x", position.x);
setField(L, "y", position.y);
setField(L, "z", position.z);
setField(L, "stackpos", stackpos);
setMetatable(L, -1, "Position");
}
void LuaScriptInterface::pushOutfit(lua_State* L, const Outfit_t& outfit)
{
lua_createtable(L, 0, 8);
setField(L, "lookType", outfit.lookType);
setField(L, "lookTypeEx", outfit.lookTypeEx);
setField(L, "lookHead", outfit.lookHead);
setField(L, "lookBody", outfit.lookBody);
setField(L, "lookLegs", outfit.lookLegs);
setField(L, "lookFeet", outfit.lookFeet);
setField(L, "lookAddons", outfit.lookAddons);
setField(L, "lookMount", outfit.lookMount);
}
void LuaScriptInterface::pushOutfit(lua_State* L, const Outfit* outfit)
{
lua_createtable(L, 0, 4);
setField(L, "lookType", outfit->lookType);
setField(L, "name", outfit->name);
setField(L, "premium", outfit->premium);
setField(L, "unlocked", outfit->unlocked);
setMetatable(L, -1, "Outfit");
}
void LuaScriptInterface::pushLoot(lua_State* L, const std::vector<LootBlock>& lootList)
{
lua_createtable(L, lootList.size(), 0);
int index = 0;
for (const auto& lootBlock : lootList) {
lua_createtable(L, 0, 7);
setField(L, "itemId", lootBlock.id);
setField(L, "chance", lootBlock.chance);
setField(L, "subType", lootBlock.subType);
setField(L, "maxCount", lootBlock.countmax);
setField(L, "actionId", lootBlock.actionId);
setField(L, "text", lootBlock.text);
pushLoot(L, lootBlock.childLoot);
lua_setfield(L, -2, "childLoot");
lua_rawseti(L, -2, ++index);
}
}
#define registerEnum(value) { std::string enumName = #value; registerGlobalVariable(enumName.substr(enumName.find_last_of(':') + 1), value); }
#define registerEnumIn(tableName, value) { std::string enumName = #value; registerVariable(tableName, enumName.substr(enumName.find_last_of(':') + 1), value); }
void LuaScriptInterface::registerFunctions()
{
//doPlayerAddItem(uid, itemid, <optional: default: 1> count/subtype)
//doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype)
//Returns uid of the created item
lua_register(luaState, "doPlayerAddItem", LuaScriptInterface::luaDoPlayerAddItem);
//isValidUID(uid)
lua_register(luaState, "isValidUID", LuaScriptInterface::luaIsValidUID);
//isDepot(uid)
lua_register(luaState, "isDepot", LuaScriptInterface::luaIsDepot);
//isMovable(uid)
lua_register(luaState, "isMovable", LuaScriptInterface::luaIsMoveable);
//doAddContainerItem(uid, itemid, <optional> count/subtype)
lua_register(luaState, "doAddContainerItem", LuaScriptInterface::luaDoAddContainerItem);
//getDepotId(uid)
lua_register(luaState, "getDepotId", LuaScriptInterface::luaGetDepotId);
//getWorldTime()
lua_register(luaState, "getWorldTime", LuaScriptInterface::luaGetWorldTime);
//getWorldLight()
lua_register(luaState, "getWorldLight", LuaScriptInterface::luaGetWorldLight);
//setWorldLight(level, color)
lua_register(luaState, "setWorldLight", LuaScriptInterface::luaSetWorldLight);
//getWorldUpTime()
lua_register(luaState, "getWorldUpTime", LuaScriptInterface::luaGetWorldUpTime);
// getSubTypeName(subType)
lua_register(luaState, "getSubTypeName", LuaScriptInterface::luaGetSubTypeName);
//createCombatArea( {area}, <optional> {extArea} )
lua_register(luaState, "createCombatArea", LuaScriptInterface::luaCreateCombatArea);
//doAreaCombat(cid, type, pos, area, min, max, effect[, origin = ORIGIN_SPELL[, blockArmor = false[, blockShield = false[, ignoreResistances = false]]]])
lua_register(luaState, "doAreaCombat", LuaScriptInterface::luaDoAreaCombat);
//doTargetCombat(cid, target, type, min, max, effect[, origin = ORIGIN_SPELL[, blockArmor = false[, blockShield = false[, ignoreResistances = false]]]])
lua_register(luaState, "doTargetCombat", LuaScriptInterface::luaDoTargetCombat);
//doChallengeCreature(cid, target[, force = false])
lua_register(luaState, "doChallengeCreature", LuaScriptInterface::luaDoChallengeCreature);
//addEvent(callback, delay, ...)
lua_register(luaState, "addEvent", LuaScriptInterface::luaAddEvent);
//stopEvent(eventid)
lua_register(luaState, "stopEvent", LuaScriptInterface::luaStopEvent);
//saveServer()
lua_register(luaState, "saveServer", LuaScriptInterface::luaSaveServer);
//cleanMap()
lua_register(luaState, "cleanMap", LuaScriptInterface::luaCleanMap);
//debugPrint(text)
lua_register(luaState, "debugPrint", LuaScriptInterface::luaDebugPrint);
//isInWar(cid, target)
lua_register(luaState, "isInWar", LuaScriptInterface::luaIsInWar);
//getWaypointPosition(name)
lua_register(luaState, "getWaypointPositionByName", LuaScriptInterface::luaGetWaypointPositionByName);
//sendChannelMessage(channelId, type, message)
lua_register(luaState, "sendChannelMessage", LuaScriptInterface::luaSendChannelMessage);
//sendGuildChannelMessage(guildId, type, message)
lua_register(luaState, "sendGuildChannelMessage", LuaScriptInterface::luaSendGuildChannelMessage);
//isScriptsInterface()
lua_register(luaState, "isScriptsInterface", LuaScriptInterface::luaIsScriptsInterface);
#ifndef LUAJIT_VERSION
//bit operations for Lua, based on bitlib project release 24
//bit.bnot, bit.band, bit.bor, bit.bxor, bit.lshift, bit.rshift
luaL_register(luaState, "bit", LuaScriptInterface::luaBitReg);
lua_pop(luaState, 1);
#endif
//configManager table
luaL_register(luaState, "configManager", LuaScriptInterface::luaConfigManagerTable);
lua_pop(luaState, 1);
//db table
luaL_register(luaState, "db", LuaScriptInterface::luaDatabaseTable);
lua_pop(luaState, 1);
//result table
luaL_register(luaState, "result", LuaScriptInterface::luaResultTable);
lua_pop(luaState, 1);
/* New functions */
//registerClass(className, baseClass, newFunction)
//registerTable(tableName)
//registerMethod(className, functionName, function)
//registerMetaMethod(className, functionName, function)
//registerGlobalMethod(functionName, function)
//registerVariable(tableName, name, value)
//registerGlobalVariable(name, value)
//registerEnum(value)
//registerEnumIn(tableName, value)
// Enums
registerEnum(ACCOUNT_TYPE_NORMAL)
registerEnum(ACCOUNT_TYPE_TUTOR)
registerEnum(ACCOUNT_TYPE_SENIORTUTOR)
registerEnum(ACCOUNT_TYPE_GAMEMASTER)
registerEnum(ACCOUNT_TYPE_COMMUNITYMANAGER)
registerEnum(ACCOUNT_TYPE_GOD)
registerEnum(AMMO_NONE)
registerEnum(AMMO_BOLT)
registerEnum(AMMO_ARROW)
registerEnum(AMMO_SPEAR)
registerEnum(AMMO_THROWINGSTAR)
registerEnum(AMMO_THROWINGKNIFE)
registerEnum(AMMO_STONE)
registerEnum(AMMO_SNOWBALL)
registerEnum(BUG_CATEGORY_MAP)
registerEnum(BUG_CATEGORY_TYPO)
registerEnum(BUG_CATEGORY_TECHNICAL)
registerEnum(BUG_CATEGORY_OTHER)
registerEnum(CALLBACK_PARAM_LEVELMAGICVALUE)
registerEnum(CALLBACK_PARAM_SKILLVALUE)
registerEnum(CALLBACK_PARAM_TARGETTILE)
registerEnum(CALLBACK_PARAM_TARGETCREATURE)
registerEnum(COMBAT_FORMULA_UNDEFINED)
registerEnum(COMBAT_FORMULA_LEVELMAGIC)
registerEnum(COMBAT_FORMULA_SKILL)
registerEnum(COMBAT_FORMULA_DAMAGE)
registerEnum(DIRECTION_NORTH)
registerEnum(DIRECTION_EAST)
registerEnum(DIRECTION_SOUTH)
registerEnum(DIRECTION_WEST)
registerEnum(DIRECTION_SOUTHWEST)
registerEnum(DIRECTION_SOUTHEAST)
registerEnum(DIRECTION_NORTHWEST)
registerEnum(DIRECTION_NORTHEAST)
registerEnum(COMBAT_NONE)
registerEnum(COMBAT_PHYSICALDAMAGE)
registerEnum(COMBAT_ENERGYDAMAGE)
registerEnum(COMBAT_EARTHDAMAGE)
registerEnum(COMBAT_FIREDAMAGE)
registerEnum(COMBAT_UNDEFINEDDAMAGE)
registerEnum(COMBAT_LIFEDRAIN)
registerEnum(COMBAT_MANADRAIN)
registerEnum(COMBAT_HEALING)
registerEnum(COMBAT_DROWNDAMAGE)
registerEnum(COMBAT_ICEDAMAGE)
registerEnum(COMBAT_HOLYDAMAGE)
registerEnum(COMBAT_DEATHDAMAGE)
registerEnum(COMBAT_PARAM_TYPE)
registerEnum(COMBAT_PARAM_EFFECT)
registerEnum(COMBAT_PARAM_DISTANCEEFFECT)
registerEnum(COMBAT_PARAM_BLOCKSHIELD)
registerEnum(COMBAT_PARAM_BLOCKARMOR)
registerEnum(COMBAT_PARAM_TARGETCASTERORTOPMOST)
registerEnum(COMBAT_PARAM_CREATEITEM)
registerEnum(COMBAT_PARAM_AGGRESSIVE)
registerEnum(COMBAT_PARAM_DISPEL)
registerEnum(COMBAT_PARAM_USECHARGES)
registerEnum(CONDITION_NONE)
registerEnum(CONDITION_POISON)
registerEnum(CONDITION_FIRE)
registerEnum(CONDITION_ENERGY)
registerEnum(CONDITION_BLEEDING)
registerEnum(CONDITION_HASTE)
registerEnum(CONDITION_PARALYZE)
registerEnum(CONDITION_OUTFIT)
registerEnum(CONDITION_INVISIBLE)
registerEnum(CONDITION_LIGHT)
registerEnum(CONDITION_MANASHIELD)
registerEnum(CONDITION_INFIGHT)
registerEnum(CONDITION_DRUNK)
registerEnum(CONDITION_EXHAUST_WEAPON)
registerEnum(CONDITION_REGENERATION)
registerEnum(CONDITION_SOUL)
registerEnum(CONDITION_DROWN)
registerEnum(CONDITION_MUTED)
registerEnum(CONDITION_CHANNELMUTEDTICKS)
registerEnum(CONDITION_YELLTICKS)
registerEnum(CONDITION_ATTRIBUTES)
registerEnum(CONDITION_FREEZING)
registerEnum(CONDITION_DAZZLED)
registerEnum(CONDITION_CURSED)
registerEnum(CONDITION_EXHAUST_COMBAT)
registerEnum(CONDITION_EXHAUST_HEAL)
registerEnum(CONDITION_PACIFIED)
registerEnum(CONDITION_SPELLCOOLDOWN)
registerEnum(CONDITION_SPELLGROUPCOOLDOWN)
registerEnum(CONDITIONID_DEFAULT)
registerEnum(CONDITIONID_COMBAT)
registerEnum(CONDITIONID_HEAD)
registerEnum(CONDITIONID_NECKLACE)
registerEnum(CONDITIONID_BACKPACK)
registerEnum(CONDITIONID_ARMOR)
registerEnum(CONDITIONID_RIGHT)
registerEnum(CONDITIONID_LEFT)
registerEnum(CONDITIONID_LEGS)
registerEnum(CONDITIONID_FEET)
registerEnum(CONDITIONID_RING)
registerEnum(CONDITIONID_AMMO)
registerEnum(CONDITION_PARAM_OWNER)
registerEnum(CONDITION_PARAM_TICKS)
registerEnum(CONDITION_PARAM_DRUNKENNESS)
registerEnum(CONDITION_PARAM_HEALTHGAIN)
registerEnum(CONDITION_PARAM_HEALTHTICKS)
registerEnum(CONDITION_PARAM_MANAGAIN)
registerEnum(CONDITION_PARAM_MANATICKS)
registerEnum(CONDITION_PARAM_DELAYED)
registerEnum(CONDITION_PARAM_SPEED)
registerEnum(CONDITION_PARAM_LIGHT_LEVEL)
registerEnum(CONDITION_PARAM_LIGHT_COLOR)
registerEnum(CONDITION_PARAM_SOULGAIN)
registerEnum(CONDITION_PARAM_SOULTICKS)
registerEnum(CONDITION_PARAM_MINVALUE)
registerEnum(CONDITION_PARAM_MAXVALUE)
registerEnum(CONDITION_PARAM_STARTVALUE)
registerEnum(CONDITION_PARAM_TICKINTERVAL)
registerEnum(CONDITION_PARAM_FORCEUPDATE)
registerEnum(CONDITION_PARAM_SKILL_MELEE)
registerEnum(CONDITION_PARAM_SKILL_FIST)
registerEnum(CONDITION_PARAM_SKILL_CLUB)
registerEnum(CONDITION_PARAM_SKILL_SWORD)
registerEnum(CONDITION_PARAM_SKILL_AXE)
registerEnum(CONDITION_PARAM_SKILL_DISTANCE)
registerEnum(CONDITION_PARAM_SKILL_SHIELD)
registerEnum(CONDITION_PARAM_SKILL_FISHING)
registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAGICPOINTS)
registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTSPERCENT)
registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTSPERCENT)
registerEnum(CONDITION_PARAM_STAT_MAGICPOINTSPERCENT)
registerEnum(CONDITION_PARAM_PERIODICDAMAGE)
registerEnum(CONDITION_PARAM_SKILL_MELEEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_FISTPERCENT)
registerEnum(CONDITION_PARAM_SKILL_CLUBPERCENT)
registerEnum(CONDITION_PARAM_SKILL_SWORDPERCENT)
registerEnum(CONDITION_PARAM_SKILL_AXEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_DISTANCEPERCENT)
registerEnum(CONDITION_PARAM_SKILL_SHIELDPERCENT)
registerEnum(CONDITION_PARAM_SKILL_FISHINGPERCENT)
registerEnum(CONDITION_PARAM_BUFF_SPELL)
registerEnum(CONDITION_PARAM_SUBID)
registerEnum(CONDITION_PARAM_FIELD)
registerEnum(CONDITION_PARAM_DISABLE_DEFENSE)
registerEnum(CONDITION_PARAM_SPECIALSKILL_CRITICALHITCHANCE)
registerEnum(CONDITION_PARAM_SPECIALSKILL_CRITICALHITAMOUNT)
registerEnum(CONDITION_PARAM_SPECIALSKILL_LIFELEECHCHANCE)
registerEnum(CONDITION_PARAM_SPECIALSKILL_LIFELEECHAMOUNT)
registerEnum(CONDITION_PARAM_SPECIALSKILL_MANALEECHCHANCE)
registerEnum(CONDITION_PARAM_SPECIALSKILL_MANALEECHAMOUNT)
registerEnum(CONDITION_PARAM_AGGRESSIVE)
registerEnum(CONST_ME_NONE)
registerEnum(CONST_ME_DRAWBLOOD)
registerEnum(CONST_ME_LOSEENERGY)
registerEnum(CONST_ME_POFF)
registerEnum(CONST_ME_BLOCKHIT)
registerEnum(CONST_ME_EXPLOSIONAREA)
registerEnum(CONST_ME_EXPLOSIONHIT)
registerEnum(CONST_ME_FIREAREA)
registerEnum(CONST_ME_YELLOW_RINGS)
registerEnum(CONST_ME_GREEN_RINGS)
registerEnum(CONST_ME_HITAREA)
registerEnum(CONST_ME_TELEPORT)
registerEnum(CONST_ME_ENERGYHIT)
registerEnum(CONST_ME_MAGIC_BLUE)
registerEnum(CONST_ME_MAGIC_RED)
registerEnum(CONST_ME_MAGIC_GREEN)
registerEnum(CONST_ME_HITBYFIRE)
registerEnum(CONST_ME_HITBYPOISON)
registerEnum(CONST_ME_MORTAREA)
registerEnum(CONST_ME_SOUND_GREEN)
registerEnum(CONST_ME_SOUND_RED)
registerEnum(CONST_ME_POISONAREA)
registerEnum(CONST_ME_SOUND_YELLOW)
registerEnum(CONST_ME_SOUND_PURPLE)
registerEnum(CONST_ME_SOUND_BLUE)
registerEnum(CONST_ME_SOUND_WHITE)
registerEnum(CONST_ME_BUBBLES)
registerEnum(CONST_ME_CRAPS)
registerEnum(CONST_ME_GIFT_WRAPS)
registerEnum(CONST_ME_FIREWORK_YELLOW)
registerEnum(CONST_ME_FIREWORK_RED)
registerEnum(CONST_ME_FIREWORK_BLUE)
registerEnum(CONST_ME_STUN)
registerEnum(CONST_ME_SLEEP)
registerEnum(CONST_ME_WATERCREATURE)
registerEnum(CONST_ME_GROUNDSHAKER)
registerEnum(CONST_ME_HEARTS)
registerEnum(CONST_ME_FIREATTACK)
registerEnum(CONST_ME_ENERGYAREA)
registerEnum(CONST_ME_SMALLCLOUDS)
registerEnum(CONST_ME_HOLYDAMAGE)
registerEnum(CONST_ME_BIGCLOUDS)
registerEnum(CONST_ME_ICEAREA)
registerEnum(CONST_ME_ICETORNADO)
registerEnum(CONST_ME_ICEATTACK)
registerEnum(CONST_ME_STONES)
registerEnum(CONST_ME_SMALLPLANTS)
registerEnum(CONST_ME_CARNIPHILA)
registerEnum(CONST_ME_PURPLEENERGY)
registerEnum(CONST_ME_YELLOWENERGY)
registerEnum(CONST_ME_HOLYAREA)
registerEnum(CONST_ME_BIGPLANTS)
registerEnum(CONST_ME_CAKE)
registerEnum(CONST_ME_GIANTICE)
registerEnum(CONST_ME_WATERSPLASH)
registerEnum(CONST_ME_PLANTATTACK)
registerEnum(CONST_ME_TUTORIALARROW)
registerEnum(CONST_ME_TUTORIALSQUARE)
registerEnum(CONST_ME_MIRRORHORIZONTAL)
registerEnum(CONST_ME_MIRRORVERTICAL)
registerEnum(CONST_ME_SKULLHORIZONTAL)
registerEnum(CONST_ME_SKULLVERTICAL)
registerEnum(CONST_ME_ASSASSIN)
registerEnum(CONST_ME_STEPSHORIZONTAL)
registerEnum(CONST_ME_BLOODYSTEPS)
registerEnum(CONST_ME_STEPSVERTICAL)
registerEnum(CONST_ME_YALAHARIGHOST)
registerEnum(CONST_ME_BATS)
registerEnum(CONST_ME_SMOKE)
registerEnum(CONST_ME_INSECTS)
registerEnum(CONST_ME_DRAGONHEAD)
registerEnum(CONST_ME_ORCSHAMAN)
registerEnum(CONST_ME_ORCSHAMAN_FIRE)
registerEnum(CONST_ME_THUNDER)
registerEnum(CONST_ME_FERUMBRAS)
registerEnum(CONST_ME_CONFETTI_HORIZONTAL)
registerEnum(CONST_ME_CONFETTI_VERTICAL)
registerEnum(CONST_ME_BLACKSMOKE)
registerEnum(CONST_ME_REDSMOKE)
registerEnum(CONST_ME_YELLOWSMOKE)
registerEnum(CONST_ME_GREENSMOKE)
registerEnum(CONST_ME_PURPLESMOKE)
registerEnum(CONST_ME_EARLY_THUNDER)
registerEnum(CONST_ME_RAGIAZ_BONECAPSULE)
registerEnum(CONST_ME_CRITICAL_DAMAGE)
registerEnum(CONST_ME_PLUNGING_FISH)
registerEnum(CONST_ANI_NONE)
registerEnum(CONST_ANI_SPEAR)
registerEnum(CONST_ANI_BOLT)
registerEnum(CONST_ANI_ARROW)
registerEnum(CONST_ANI_FIRE)
registerEnum(CONST_ANI_ENERGY)
registerEnum(CONST_ANI_POISONARROW)
registerEnum(CONST_ANI_BURSTARROW)
registerEnum(CONST_ANI_THROWINGSTAR)
registerEnum(CONST_ANI_THROWINGKNIFE)
registerEnum(CONST_ANI_SMALLSTONE)
registerEnum(CONST_ANI_DEATH)
registerEnum(CONST_ANI_LARGEROCK)
registerEnum(CONST_ANI_SNOWBALL)
registerEnum(CONST_ANI_POWERBOLT)
registerEnum(CONST_ANI_POISON)
registerEnum(CONST_ANI_INFERNALBOLT)
registerEnum(CONST_ANI_HUNTINGSPEAR)
registerEnum(CONST_ANI_ENCHANTEDSPEAR)
registerEnum(CONST_ANI_REDSTAR)
registerEnum(CONST_ANI_GREENSTAR)
registerEnum(CONST_ANI_ROYALSPEAR)
registerEnum(CONST_ANI_SNIPERARROW)
registerEnum(CONST_ANI_ONYXARROW)
registerEnum(CONST_ANI_PIERCINGBOLT)
registerEnum(CONST_ANI_WHIRLWINDSWORD)
registerEnum(CONST_ANI_WHIRLWINDAXE)
registerEnum(CONST_ANI_WHIRLWINDCLUB)
registerEnum(CONST_ANI_ETHEREALSPEAR)
registerEnum(CONST_ANI_ICE)
registerEnum(CONST_ANI_EARTH)
registerEnum(CONST_ANI_HOLY)
registerEnum(CONST_ANI_SUDDENDEATH)
registerEnum(CONST_ANI_FLASHARROW)
registerEnum(CONST_ANI_FLAMMINGARROW)
registerEnum(CONST_ANI_SHIVERARROW)
registerEnum(CONST_ANI_ENERGYBALL)
registerEnum(CONST_ANI_SMALLICE)
registerEnum(CONST_ANI_SMALLHOLY)
registerEnum(CONST_ANI_SMALLEARTH)
registerEnum(CONST_ANI_EARTHARROW)
registerEnum(CONST_ANI_EXPLOSION)
registerEnum(CONST_ANI_CAKE)
registerEnum(CONST_ANI_TARSALARROW)
registerEnum(CONST_ANI_VORTEXBOLT)
registerEnum(CONST_ANI_PRISMATICBOLT)
registerEnum(CONST_ANI_CRYSTALLINEARROW)
registerEnum(CONST_ANI_DRILLBOLT)
registerEnum(CONST_ANI_ENVENOMEDARROW)
registerEnum(CONST_ANI_GLOOTHSPEAR)
registerEnum(CONST_ANI_SIMPLEARROW)
registerEnum(CONST_ANI_WEAPONTYPE)
registerEnum(CONST_PROP_BLOCKSOLID)
registerEnum(CONST_PROP_HASHEIGHT)
registerEnum(CONST_PROP_BLOCKPROJECTILE)
registerEnum(CONST_PROP_BLOCKPATH)
registerEnum(CONST_PROP_ISVERTICAL)
registerEnum(CONST_PROP_ISHORIZONTAL)
registerEnum(CONST_PROP_MOVEABLE)
registerEnum(CONST_PROP_IMMOVABLEBLOCKSOLID)
registerEnum(CONST_PROP_IMMOVABLEBLOCKPATH)
registerEnum(CONST_PROP_IMMOVABLENOFIELDBLOCKPATH)
registerEnum(CONST_PROP_NOFIELDBLOCKPATH)
registerEnum(CONST_PROP_SUPPORTHANGABLE)
registerEnum(CONST_SLOT_HEAD)
registerEnum(CONST_SLOT_NECKLACE)
registerEnum(CONST_SLOT_BACKPACK)
registerEnum(CONST_SLOT_ARMOR)
registerEnum(CONST_SLOT_RIGHT)
registerEnum(CONST_SLOT_LEFT)
registerEnum(CONST_SLOT_LEGS)
registerEnum(CONST_SLOT_FEET)
registerEnum(CONST_SLOT_RING)
registerEnum(CONST_SLOT_AMMO)
registerEnum(CREATURE_EVENT_NONE)
registerEnum(CREATURE_EVENT_LOGIN)
registerEnum(CREATURE_EVENT_LOGOUT)
registerEnum(CREATURE_EVENT_THINK)
registerEnum(CREATURE_EVENT_PREPAREDEATH)
registerEnum(CREATURE_EVENT_DEATH)
registerEnum(CREATURE_EVENT_KILL)
registerEnum(CREATURE_EVENT_ADVANCE)
registerEnum(CREATURE_EVENT_MODALWINDOW)
registerEnum(CREATURE_EVENT_TEXTEDIT)
registerEnum(CREATURE_EVENT_HEALTHCHANGE)
registerEnum(CREATURE_EVENT_MANACHANGE)
registerEnum(CREATURE_EVENT_EXTENDED_OPCODE)
registerEnum(GAME_STATE_STARTUP)
registerEnum(GAME_STATE_INIT)
registerEnum(GAME_STATE_NORMAL)
registerEnum(GAME_STATE_CLOSED)
registerEnum(GAME_STATE_SHUTDOWN)
registerEnum(GAME_STATE_CLOSING)
registerEnum(GAME_STATE_MAINTAIN)
registerEnum(MESSAGE_STATUS_CONSOLE_BLUE)
registerEnum(MESSAGE_STATUS_CONSOLE_RED)
registerEnum(MESSAGE_STATUS_DEFAULT)
registerEnum(MESSAGE_STATUS_WARNING)
registerEnum(MESSAGE_EVENT_ADVANCE)
registerEnum(MESSAGE_STATUS_SMALL)
registerEnum(MESSAGE_INFO_DESCR)
registerEnum(MESSAGE_DAMAGE_DEALT)
registerEnum(MESSAGE_DAMAGE_RECEIVED)
registerEnum(MESSAGE_HEALED)
registerEnum(MESSAGE_EXPERIENCE)
registerEnum(MESSAGE_DAMAGE_OTHERS)
registerEnum(MESSAGE_HEALED_OTHERS)
registerEnum(MESSAGE_EXPERIENCE_OTHERS)
registerEnum(MESSAGE_EVENT_DEFAULT)
registerEnum(MESSAGE_GUILD)
registerEnum(MESSAGE_PARTY_MANAGEMENT)
registerEnum(MESSAGE_PARTY)
registerEnum(MESSAGE_EVENT_ORANGE)
registerEnum(MESSAGE_STATUS_CONSOLE_ORANGE)
registerEnum(MESSAGE_LOOT)
registerEnum(CREATURETYPE_PLAYER)
registerEnum(CREATURETYPE_MONSTER)
registerEnum(CREATURETYPE_NPC)
registerEnum(CREATURETYPE_SUMMON_OWN)
registerEnum(CREATURETYPE_SUMMON_OTHERS)
registerEnum(CLIENTOS_LINUX)
registerEnum(CLIENTOS_WINDOWS)
registerEnum(CLIENTOS_FLASH)
registerEnum(CLIENTOS_OTCLIENT_LINUX)
registerEnum(CLIENTOS_OTCLIENT_WINDOWS)
registerEnum(CLIENTOS_OTCLIENT_MAC)
registerEnum(FIGHTMODE_ATTACK)
registerEnum(FIGHTMODE_BALANCED)
registerEnum(FIGHTMODE_DEFENSE)
registerEnum(ITEM_ATTRIBUTE_NONE)
registerEnum(ITEM_ATTRIBUTE_ACTIONID)
registerEnum(ITEM_ATTRIBUTE_UNIQUEID)
registerEnum(ITEM_ATTRIBUTE_DESCRIPTION)
registerEnum(ITEM_ATTRIBUTE_TEXT)
registerEnum(ITEM_ATTRIBUTE_DATE)
registerEnum(ITEM_ATTRIBUTE_WRITER)
registerEnum(ITEM_ATTRIBUTE_NAME)
registerEnum(ITEM_ATTRIBUTE_ARTICLE)
registerEnum(ITEM_ATTRIBUTE_PLURALNAME)
registerEnum(ITEM_ATTRIBUTE_WEIGHT)
registerEnum(ITEM_ATTRIBUTE_ATTACK)
registerEnum(ITEM_ATTRIBUTE_DEFENSE)
registerEnum(ITEM_ATTRIBUTE_EXTRADEFENSE)
registerEnum(ITEM_ATTRIBUTE_ARMOR)
registerEnum(ITEM_ATTRIBUTE_HITCHANCE)
registerEnum(ITEM_ATTRIBUTE_SHOOTRANGE)
registerEnum(ITEM_ATTRIBUTE_OWNER)
registerEnum(ITEM_ATTRIBUTE_DURATION)
registerEnum(ITEM_ATTRIBUTE_DECAYSTATE)
registerEnum(ITEM_ATTRIBUTE_CORPSEOWNER)
registerEnum(ITEM_ATTRIBUTE_CHARGES)
registerEnum(ITEM_ATTRIBUTE_FLUIDTYPE)
registerEnum(ITEM_ATTRIBUTE_DOORID)
registerEnum(ITEM_ATTRIBUTE_DECAYTO)
registerEnum(ITEM_ATTRIBUTE_WRAPID)
registerEnum(ITEM_ATTRIBUTE_STOREITEM)
registerEnum(ITEM_ATTRIBUTE_ATTACK_SPEED)
registerEnum(ITEM_TYPE_DEPOT)
registerEnum(ITEM_TYPE_MAILBOX)
registerEnum(ITEM_TYPE_TRASHHOLDER)
registerEnum(ITEM_TYPE_CONTAINER)
registerEnum(ITEM_TYPE_DOOR)
registerEnum(ITEM_TYPE_MAGICFIELD)
registerEnum(ITEM_TYPE_TELEPORT)
registerEnum(ITEM_TYPE_BED)
registerEnum(ITEM_TYPE_KEY)
registerEnum(ITEM_TYPE_RUNE)
registerEnum(ITEM_GROUP_GROUND)
registerEnum(ITEM_GROUP_CONTAINER)
registerEnum(ITEM_GROUP_WEAPON)
registerEnum(ITEM_GROUP_AMMUNITION)
registerEnum(ITEM_GROUP_ARMOR)
registerEnum(ITEM_GROUP_CHARGES)
registerEnum(ITEM_GROUP_TELEPORT)
registerEnum(ITEM_GROUP_MAGICFIELD)
registerEnum(ITEM_GROUP_WRITEABLE)
registerEnum(ITEM_GROUP_KEY)
registerEnum(ITEM_GROUP_SPLASH)
registerEnum(ITEM_GROUP_FLUID)
registerEnum(ITEM_GROUP_DOOR)
registerEnum(ITEM_GROUP_DEPRECATED)
registerEnum(ITEM_BROWSEFIELD)
registerEnum(ITEM_BAG)
registerEnum(ITEM_SHOPPING_BAG)
registerEnum(ITEM_GOLD_COIN)
registerEnum(ITEM_PLATINUM_COIN)
registerEnum(ITEM_CRYSTAL_COIN)
registerEnum(ITEM_AMULETOFLOSS)
registerEnum(ITEM_PARCEL)
registerEnum(ITEM_LABEL)
registerEnum(ITEM_FIREFIELD_PVP_FULL)
registerEnum(ITEM_FIREFIELD_PVP_MEDIUM)
registerEnum(ITEM_FIREFIELD_PVP_SMALL)
registerEnum(ITEM_FIREFIELD_PERSISTENT_FULL)
registerEnum(ITEM_FIREFIELD_PERSISTENT_MEDIUM)
registerEnum(ITEM_FIREFIELD_PERSISTENT_SMALL)
registerEnum(ITEM_FIREFIELD_NOPVP)
registerEnum(ITEM_POISONFIELD_PVP)
registerEnum(ITEM_POISONFIELD_PERSISTENT)
registerEnum(ITEM_POISONFIELD_NOPVP)
registerEnum(ITEM_ENERGYFIELD_PVP)
registerEnum(ITEM_ENERGYFIELD_PERSISTENT)
registerEnum(ITEM_ENERGYFIELD_NOPVP)
registerEnum(ITEM_MAGICWALL)
registerEnum(ITEM_MAGICWALL_PERSISTENT)
registerEnum(ITEM_MAGICWALL_SAFE)
registerEnum(ITEM_WILDGROWTH)
registerEnum(ITEM_WILDGROWTH_PERSISTENT)
registerEnum(ITEM_WILDGROWTH_SAFE)
registerEnum(WIELDINFO_NONE)
registerEnum(WIELDINFO_LEVEL)
registerEnum(WIELDINFO_MAGLV)
registerEnum(WIELDINFO_VOCREQ)
registerEnum(WIELDINFO_PREMIUM)
registerEnum(PlayerFlag_CannotUseCombat)
registerEnum(PlayerFlag_CannotAttackPlayer)
registerEnum(PlayerFlag_CannotAttackMonster)
registerEnum(PlayerFlag_CannotBeAttacked)
registerEnum(PlayerFlag_CanConvinceAll)
registerEnum(PlayerFlag_CanSummonAll)
registerEnum(PlayerFlag_CanIllusionAll)
registerEnum(PlayerFlag_CanSenseInvisibility)
registerEnum(PlayerFlag_IgnoredByMonsters)
registerEnum(PlayerFlag_NotGainInFight)
registerEnum(PlayerFlag_HasInfiniteMana)
registerEnum(PlayerFlag_HasInfiniteSoul)
registerEnum(PlayerFlag_HasNoExhaustion)
registerEnum(PlayerFlag_CannotUseSpells)
registerEnum(PlayerFlag_CannotPickupItem)
registerEnum(PlayerFlag_CanAlwaysLogin)
registerEnum(PlayerFlag_CanBroadcast)
registerEnum(PlayerFlag_CanEditHouses)
registerEnum(PlayerFlag_CannotBeBanned)
registerEnum(PlayerFlag_CannotBePushed)
registerEnum(PlayerFlag_HasInfiniteCapacity)
registerEnum(PlayerFlag_CanPushAllCreatures)
registerEnum(PlayerFlag_CanTalkRedPrivate)
registerEnum(PlayerFlag_CanTalkRedChannel)
registerEnum(PlayerFlag_TalkOrangeHelpChannel)
registerEnum(PlayerFlag_NotGainExperience)
registerEnum(PlayerFlag_NotGainMana)
registerEnum(PlayerFlag_NotGainHealth)
registerEnum(PlayerFlag_NotGainSkill)
registerEnum(PlayerFlag_SetMaxSpeed)
registerEnum(PlayerFlag_SpecialVIP)
registerEnum(PlayerFlag_NotGenerateLoot)
registerEnum(PlayerFlag_IgnoreProtectionZone)
registerEnum(PlayerFlag_IgnoreSpellCheck)
registerEnum(PlayerFlag_IgnoreWeaponCheck)
registerEnum(PlayerFlag_CannotBeMuted)
registerEnum(PlayerFlag_IsAlwaysPremium)
registerEnum(PlayerFlag_IgnoreYellCheck)
registerEnum(PlayerFlag_IgnoreSendPrivateCheck)
registerEnum(PLAYERSEX_FEMALE)
registerEnum(PLAYERSEX_MALE)
registerEnum(REPORT_REASON_NAMEINAPPROPRIATE)
registerEnum(REPORT_REASON_NAMEPOORFORMATTED)
registerEnum(REPORT_REASON_NAMEADVERTISING)
registerEnum(REPORT_REASON_NAMEUNFITTING)
registerEnum(REPORT_REASON_NAMERULEVIOLATION)
registerEnum(REPORT_REASON_INSULTINGSTATEMENT)
registerEnum(REPORT_REASON_SPAMMING)
registerEnum(REPORT_REASON_ADVERTISINGSTATEMENT)
registerEnum(REPORT_REASON_UNFITTINGSTATEMENT)
registerEnum(REPORT_REASON_LANGUAGESTATEMENT)
registerEnum(REPORT_REASON_DISCLOSURE)
registerEnum(REPORT_REASON_RULEVIOLATION)
registerEnum(REPORT_REASON_STATEMENT_BUGABUSE)
registerEnum(REPORT_REASON_UNOFFICIALSOFTWARE)
registerEnum(REPORT_REASON_PRETENDING)
registerEnum(REPORT_REASON_HARASSINGOWNERS)
registerEnum(REPORT_REASON_FALSEINFO)
registerEnum(REPORT_REASON_ACCOUNTSHARING)
registerEnum(REPORT_REASON_STEALINGDATA)
registerEnum(REPORT_REASON_SERVICEATTACKING)
registerEnum(REPORT_REASON_SERVICEAGREEMENT)
registerEnum(REPORT_TYPE_NAME)
registerEnum(REPORT_TYPE_STATEMENT)
registerEnum(REPORT_TYPE_BOT)
registerEnum(VOCATION_NONE)
registerEnum(SKILL_FIST)
registerEnum(SKILL_CLUB)
registerEnum(SKILL_SWORD)
registerEnum(SKILL_AXE)
registerEnum(SKILL_DISTANCE)
registerEnum(SKILL_SHIELD)
registerEnum(SKILL_FISHING)
registerEnum(SKILL_MAGLEVEL)
registerEnum(SKILL_LEVEL)
registerEnum(SPECIALSKILL_CRITICALHITCHANCE)
registerEnum(SPECIALSKILL_CRITICALHITAMOUNT)
registerEnum(SPECIALSKILL_LIFELEECHCHANCE)
registerEnum(SPECIALSKILL_LIFELEECHAMOUNT)
registerEnum(SPECIALSKILL_MANALEECHCHANCE)
registerEnum(SPECIALSKILL_MANALEECHAMOUNT)
registerEnum(SKULL_NONE)
registerEnum(SKULL_YELLOW)
registerEnum(SKULL_GREEN)
registerEnum(SKULL_WHITE)
registerEnum(SKULL_RED)
registerEnum(SKULL_BLACK)
registerEnum(SKULL_ORANGE)
registerEnum(TALKTYPE_SAY)
registerEnum(TALKTYPE_WHISPER)
registerEnum(TALKTYPE_YELL)
registerEnum(TALKTYPE_PRIVATE_FROM)
registerEnum(TALKTYPE_PRIVATE_TO)
registerEnum(TALKTYPE_CHANNEL_Y)
registerEnum(TALKTYPE_CHANNEL_O)
registerEnum(TALKTYPE_PRIVATE_NP)
registerEnum(TALKTYPE_PRIVATE_PN)
registerEnum(TALKTYPE_BROADCAST)
registerEnum(TALKTYPE_CHANNEL_R1)
registerEnum(TALKTYPE_PRIVATE_RED_FROM)
registerEnum(TALKTYPE_PRIVATE_RED_TO)
registerEnum(TALKTYPE_MONSTER_SAY)
registerEnum(TALKTYPE_MONSTER_YELL)
registerEnum(TEXTCOLOR_BLUE)
registerEnum(TEXTCOLOR_LIGHTGREEN)
registerEnum(TEXTCOLOR_LIGHTBLUE)
registerEnum(TEXTCOLOR_MAYABLUE)
registerEnum(TEXTCOLOR_DARKRED)
registerEnum(TEXTCOLOR_LIGHTGREY)
registerEnum(TEXTCOLOR_SKYBLUE)
registerEnum(TEXTCOLOR_PURPLE)
registerEnum(TEXTCOLOR_ELECTRICPURPLE)
registerEnum(TEXTCOLOR_RED)
registerEnum(TEXTCOLOR_PASTELRED)
registerEnum(TEXTCOLOR_ORANGE)
registerEnum(TEXTCOLOR_YELLOW)
registerEnum(TEXTCOLOR_WHITE_EXP)
registerEnum(TEXTCOLOR_NONE)
registerEnum(TILESTATE_NONE)
registerEnum(TILESTATE_PROTECTIONZONE)
registerEnum(TILESTATE_NOPVPZONE)
registerEnum(TILESTATE_NOLOGOUT)
registerEnum(TILESTATE_PVPZONE)
registerEnum(TILESTATE_FLOORCHANGE)
registerEnum(TILESTATE_FLOORCHANGE_DOWN)
registerEnum(TILESTATE_FLOORCHANGE_NORTH)
registerEnum(TILESTATE_FLOORCHANGE_SOUTH)
registerEnum(TILESTATE_FLOORCHANGE_EAST)
registerEnum(TILESTATE_FLOORCHANGE_WEST)
registerEnum(TILESTATE_TELEPORT)
registerEnum(TILESTATE_MAGICFIELD)
registerEnum(TILESTATE_MAILBOX)
registerEnum(TILESTATE_TRASHHOLDER)
registerEnum(TILESTATE_BED)
registerEnum(TILESTATE_DEPOT)
registerEnum(TILESTATE_BLOCKSOLID)
registerEnum(TILESTATE_BLOCKPATH)
registerEnum(TILESTATE_IMMOVABLEBLOCKSOLID)
registerEnum(TILESTATE_IMMOVABLEBLOCKPATH)
registerEnum(TILESTATE_IMMOVABLENOFIELDBLOCKPATH)
registerEnum(TILESTATE_NOFIELDBLOCKPATH)
registerEnum(TILESTATE_FLOORCHANGE_SOUTH_ALT)
registerEnum(TILESTATE_FLOORCHANGE_EAST_ALT)
registerEnum(TILESTATE_SUPPORTS_HANGABLE)
registerEnum(WEAPON_NONE)
registerEnum(WEAPON_SWORD)
registerEnum(WEAPON_CLUB)
registerEnum(WEAPON_AXE)
registerEnum(WEAPON_SHIELD)
registerEnum(WEAPON_DISTANCE)
registerEnum(WEAPON_WAND)
registerEnum(WEAPON_AMMO)
registerEnum(WORLD_TYPE_NO_PVP)
registerEnum(WORLD_TYPE_PVP)
registerEnum(WORLD_TYPE_PVP_ENFORCED)
// Use with container:addItem, container:addItemEx and possibly other functions.
registerEnum(FLAG_NOLIMIT)
registerEnum(FLAG_IGNOREBLOCKITEM)
registerEnum(FLAG_IGNOREBLOCKCREATURE)
registerEnum(FLAG_CHILDISOWNER)
registerEnum(FLAG_PATHFINDING)
registerEnum(FLAG_IGNOREFIELDDAMAGE)
registerEnum(FLAG_IGNORENOTMOVEABLE)
registerEnum(FLAG_IGNOREAUTOSTACK)
// Use with itemType:getSlotPosition
registerEnum(SLOTP_WHEREEVER)
registerEnum(SLOTP_HEAD)
registerEnum(SLOTP_NECKLACE)
registerEnum(SLOTP_BACKPACK)
registerEnum(SLOTP_ARMOR)
registerEnum(SLOTP_RIGHT)
registerEnum(SLOTP_LEFT)
registerEnum(SLOTP_LEGS)
registerEnum(SLOTP_FEET)
registerEnum(SLOTP_RING)
registerEnum(SLOTP_AMMO)
registerEnum(SLOTP_DEPOT)
registerEnum(SLOTP_TWO_HAND)
// Use with combat functions
registerEnum(ORIGIN_NONE)
registerEnum(ORIGIN_CONDITION)
registerEnum(ORIGIN_SPELL)
registerEnum(ORIGIN_MELEE)
registerEnum(ORIGIN_RANGED)
registerEnum(ORIGIN_WAND)
// Use with house:getAccessList, house:setAccessList
registerEnum(GUEST_LIST)
registerEnum(SUBOWNER_LIST)
// Use with npc:setSpeechBubble
registerEnum(SPEECHBUBBLE_NONE)
registerEnum(SPEECHBUBBLE_NORMAL)
registerEnum(SPEECHBUBBLE_TRADE)
registerEnum(SPEECHBUBBLE_QUEST)
registerEnum(SPEECHBUBBLE_QUESTTRADER)
// Use with player:addMapMark
registerEnum(MAPMARK_TICK)
registerEnum(MAPMARK_QUESTION)
registerEnum(MAPMARK_EXCLAMATION)
registerEnum(MAPMARK_STAR)
registerEnum(MAPMARK_CROSS)
registerEnum(MAPMARK_TEMPLE)
registerEnum(MAPMARK_KISS)
registerEnum(MAPMARK_SHOVEL)
registerEnum(MAPMARK_SWORD)
registerEnum(MAPMARK_FLAG)
registerEnum(MAPMARK_LOCK)
registerEnum(MAPMARK_BAG)
registerEnum(MAPMARK_SKULL)
registerEnum(MAPMARK_DOLLAR)
registerEnum(MAPMARK_REDNORTH)
registerEnum(MAPMARK_REDSOUTH)
registerEnum(MAPMARK_REDEAST)
registerEnum(MAPMARK_REDWEST)
registerEnum(MAPMARK_GREENNORTH)
registerEnum(MAPMARK_GREENSOUTH)
// Use with Game.getReturnMessage
registerEnum(RETURNVALUE_NOERROR)
registerEnum(RETURNVALUE_NOTPOSSIBLE)
registerEnum(RETURNVALUE_NOTENOUGHROOM)
registerEnum(RETURNVALUE_PLAYERISPZLOCKED)
registerEnum(RETURNVALUE_PLAYERISNOTINVITED)
registerEnum(RETURNVALUE_CANNOTTHROW)
registerEnum(RETURNVALUE_THEREISNOWAY)
registerEnum(RETURNVALUE_DESTINATIONOUTOFREACH)
registerEnum(RETURNVALUE_CREATUREBLOCK)
registerEnum(RETURNVALUE_NOTMOVEABLE)
registerEnum(RETURNVALUE_DROPTWOHANDEDITEM)
registerEnum(RETURNVALUE_BOTHHANDSNEEDTOBEFREE)
registerEnum(RETURNVALUE_CANONLYUSEONEWEAPON)
registerEnum(RETURNVALUE_NEEDEXCHANGE)
registerEnum(RETURNVALUE_CANNOTBEDRESSED)
registerEnum(RETURNVALUE_PUTTHISOBJECTINYOURHAND)
registerEnum(RETURNVALUE_PUTTHISOBJECTINBOTHHANDS)
registerEnum(RETURNVALUE_TOOFARAWAY)
registerEnum(RETURNVALUE_FIRSTGODOWNSTAIRS)
registerEnum(RETURNVALUE_FIRSTGOUPSTAIRS)
registerEnum(RETURNVALUE_CONTAINERNOTENOUGHROOM)
registerEnum(RETURNVALUE_NOTENOUGHCAPACITY)
registerEnum(RETURNVALUE_CANNOTPICKUP)
registerEnum(RETURNVALUE_THISISIMPOSSIBLE)
registerEnum(RETURNVALUE_DEPOTISFULL)
registerEnum(RETURNVALUE_CREATUREDOESNOTEXIST)
registerEnum(RETURNVALUE_CANNOTUSETHISOBJECT)
registerEnum(RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE)
registerEnum(RETURNVALUE_NOTREQUIREDLEVELTOUSERUNE)
registerEnum(RETURNVALUE_YOUAREALREADYTRADING)
registerEnum(RETURNVALUE_THISPLAYERISALREADYTRADING)
registerEnum(RETURNVALUE_YOUMAYNOTLOGOUTDURINGAFIGHT)
registerEnum(RETURNVALUE_DIRECTPLAYERSHOOT)
registerEnum(RETURNVALUE_NOTENOUGHLEVEL)
registerEnum(RETURNVALUE_NOTENOUGHMAGICLEVEL)
registerEnum(RETURNVALUE_NOTENOUGHMANA)
registerEnum(RETURNVALUE_NOTENOUGHSOUL)
registerEnum(RETURNVALUE_YOUAREEXHAUSTED)
registerEnum(RETURNVALUE_YOUCANNOTUSEOBJECTSTHATFAST)
registerEnum(RETURNVALUE_PLAYERISNOTREACHABLE)
registerEnum(RETURNVALUE_CANONLYUSETHISRUNEONCREATURES)
registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISPLAYER)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONWHILEINPROTECTIONZONE)
registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISCREATURE)
registerEnum(RETURNVALUE_YOUCANONLYUSEITONCREATURES)
registerEnum(RETURNVALUE_CREATUREISNOTREACHABLE)
registerEnum(RETURNVALUE_TURNSECUREMODETOATTACKUNMARKEDPLAYERS)
registerEnum(RETURNVALUE_YOUNEEDPREMIUMACCOUNT)
registerEnum(RETURNVALUE_YOUNEEDTOLEARNTHISSPELL)
registerEnum(RETURNVALUE_YOURVOCATIONCANNOTUSETHISSPELL)
registerEnum(RETURNVALUE_YOUNEEDAWEAPONTOUSETHISSPELL)
registerEnum(RETURNVALUE_PLAYERISPZLOCKEDLEAVEPVPZONE)
registerEnum(RETURNVALUE_PLAYERISPZLOCKEDENTERPVPZONE)
registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINANOPVPZONE)
registerEnum(RETURNVALUE_YOUCANNOTLOGOUTHERE)
registerEnum(RETURNVALUE_YOUNEEDAMAGICITEMTOCASTSPELL)
registerEnum(RETURNVALUE_CANNOTCONJUREITEMHERE)
registerEnum(RETURNVALUE_YOUNEEDTOSPLITYOURSPEARS)
registerEnum(RETURNVALUE_NAMEISTOOAMBIGUOUS)
registerEnum(RETURNVALUE_CANONLYUSEONESHIELD)
registerEnum(RETURNVALUE_NOPARTYMEMBERSINRANGE)
registerEnum(RETURNVALUE_YOUARENOTTHEOWNER)
registerEnum(RETURNVALUE_TRADEPLAYERFARAWAY)
registerEnum(RETURNVALUE_YOUDONTOWNTHISHOUSE)
registerEnum(RETURNVALUE_TRADEPLAYERALREADYOWNSAHOUSE)
registerEnum(RETURNVALUE_TRADEPLAYERHIGHESTBIDDER)
registerEnum(RETURNVALUE_YOUCANNOTTRADETHISHOUSE)
registerEnum(RETURNVALUE_YOUDONTHAVEREQUIREDPROFESSION)
registerEnum(RETURNVALUE_YOUCANNOTUSETHISBED)
registerEnum(RELOAD_TYPE_ALL)
registerEnum(RELOAD_TYPE_ACTIONS)
registerEnum(RELOAD_TYPE_CHAT)
registerEnum(RELOAD_TYPE_CONFIG)
registerEnum(RELOAD_TYPE_CREATURESCRIPTS)
registerEnum(RELOAD_TYPE_EVENTS)
registerEnum(RELOAD_TYPE_GLOBAL)
registerEnum(RELOAD_TYPE_GLOBALEVENTS)
registerEnum(RELOAD_TYPE_ITEMS)
registerEnum(RELOAD_TYPE_MONSTERS)
registerEnum(RELOAD_TYPE_MOUNTS)
registerEnum(RELOAD_TYPE_MOVEMENTS)
registerEnum(RELOAD_TYPE_NPCS)
registerEnum(RELOAD_TYPE_QUESTS)
registerEnum(RELOAD_TYPE_RAIDS)
registerEnum(RELOAD_TYPE_SCRIPTS)
registerEnum(RELOAD_TYPE_SPELLS)
registerEnum(RELOAD_TYPE_TALKACTIONS)
registerEnum(RELOAD_TYPE_WEAPONS)
registerEnum(ZONE_PROTECTION)
registerEnum(ZONE_NOPVP)
registerEnum(ZONE_PVP)
registerEnum(ZONE_NOLOGOUT)
registerEnum(ZONE_NORMAL)
registerEnum(MAX_LOOTCHANCE)
registerEnum(SPELL_INSTANT)
registerEnum(SPELL_RUNE)
registerEnum(MONSTERS_EVENT_THINK)
registerEnum(MONSTERS_EVENT_APPEAR)
registerEnum(MONSTERS_EVENT_DISAPPEAR)
registerEnum(MONSTERS_EVENT_MOVE)
registerEnum(MONSTERS_EVENT_SAY)
// _G
registerGlobalVariable("INDEX_WHEREEVER", INDEX_WHEREEVER);
registerGlobalBoolean("VIRTUAL_PARENT", true);
registerGlobalMethod("isType", LuaScriptInterface::luaIsType);
registerGlobalMethod("rawgetmetatable", LuaScriptInterface::luaRawGetMetatable);
// configKeys
registerTable("configKeys");
registerEnumIn("configKeys", ConfigManager::ALLOW_CHANGEOUTFIT)
registerEnumIn("configKeys", ConfigManager::ONE_PLAYER_ON_ACCOUNT)
registerEnumIn("configKeys", ConfigManager::AIMBOT_HOTKEY_ENABLED)
registerEnumIn("configKeys", ConfigManager::REMOVE_RUNE_CHARGES)
registerEnumIn("configKeys", ConfigManager::REMOVE_WEAPON_AMMO)
registerEnumIn("configKeys", ConfigManager::REMOVE_WEAPON_CHARGES)
registerEnumIn("configKeys", ConfigManager::REMOVE_POTION_CHARGES)
registerEnumIn("configKeys", ConfigManager::EXPERIENCE_FROM_PLAYERS)
registerEnumIn("configKeys", ConfigManager::FREE_PREMIUM)
registerEnumIn("configKeys", ConfigManager::REPLACE_KICK_ON_LOGIN)
registerEnumIn("configKeys", ConfigManager::ALLOW_CLONES)
registerEnumIn("configKeys", ConfigManager::BIND_ONLY_GLOBAL_ADDRESS)
registerEnumIn("configKeys", ConfigManager::OPTIMIZE_DATABASE)
registerEnumIn("configKeys", ConfigManager::MARKET_PREMIUM)
registerEnumIn("configKeys", ConfigManager::EMOTE_SPELLS)
registerEnumIn("configKeys", ConfigManager::STAMINA_SYSTEM)
registerEnumIn("configKeys", ConfigManager::WARN_UNSAFE_SCRIPTS)
registerEnumIn("configKeys", ConfigManager::CONVERT_UNSAFE_SCRIPTS)
registerEnumIn("configKeys", ConfigManager::CLASSIC_EQUIPMENT_SLOTS)
registerEnumIn("configKeys", ConfigManager::CLASSIC_ATTACK_SPEED)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_NOTIFY_MESSAGE)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_NOTIFY_DURATION)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_CLEAN_MAP)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_CLOSE)
registerEnumIn("configKeys", ConfigManager::SERVER_SAVE_SHUTDOWN)
registerEnumIn("configKeys", ConfigManager::ONLINE_OFFLINE_CHARLIST)
registerEnumIn("configKeys", ConfigManager::LUA_ITEM_DESC)
registerEnumIn("configKeys", ConfigManager::MAP_NAME)
registerEnumIn("configKeys", ConfigManager::HOUSE_RENT_PERIOD)
registerEnumIn("configKeys", ConfigManager::SERVER_NAME)
registerEnumIn("configKeys", ConfigManager::OWNER_NAME)
registerEnumIn("configKeys", ConfigManager::OWNER_EMAIL)
registerEnumIn("configKeys", ConfigManager::URL)
registerEnumIn("configKeys", ConfigManager::LOCATION)
registerEnumIn("configKeys", ConfigManager::IP)
registerEnumIn("configKeys", ConfigManager::MOTD)
registerEnumIn("configKeys", ConfigManager::WORLD_TYPE)
registerEnumIn("configKeys", ConfigManager::MYSQL_HOST)
registerEnumIn("configKeys", ConfigManager::MYSQL_USER)
registerEnumIn("configKeys", ConfigManager::MYSQL_PASS)
registerEnumIn("configKeys", ConfigManager::MYSQL_DB)
registerEnumIn("configKeys", ConfigManager::MYSQL_SOCK)
registerEnumIn("configKeys", ConfigManager::DEFAULT_PRIORITY)
registerEnumIn("configKeys", ConfigManager::MAP_AUTHOR)
registerEnumIn("configKeys", ConfigManager::SQL_PORT)
registerEnumIn("configKeys", ConfigManager::MAX_PLAYERS)
registerEnumIn("configKeys", ConfigManager::PZ_LOCKED)
registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRANGE)
registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRADIUS)
registerEnumIn("configKeys", ConfigManager::DEFAULT_WALKTOSPAWNRADIUS)
registerEnumIn("configKeys", ConfigManager::REMOVE_ON_DESPAWN)
registerEnumIn("configKeys", ConfigManager::RATE_EXPERIENCE)
registerEnumIn("configKeys", ConfigManager::RATE_SKILL)
registerEnumIn("configKeys", ConfigManager::RATE_LOOT)
registerEnumIn("configKeys", ConfigManager::RATE_MAGIC)
registerEnumIn("configKeys", ConfigManager::RATE_SPAWN)
registerEnumIn("configKeys", ConfigManager::HOUSE_PRICE)
registerEnumIn("configKeys", ConfigManager::KILLS_TO_RED)
registerEnumIn("configKeys", ConfigManager::KILLS_TO_BLACK)
registerEnumIn("configKeys", ConfigManager::MAX_MESSAGEBUFFER)
registerEnumIn("configKeys", ConfigManager::ACTIONS_DELAY_INTERVAL)
registerEnumIn("configKeys", ConfigManager::EX_ACTIONS_DELAY_INTERVAL)
registerEnumIn("configKeys", ConfigManager::KICK_AFTER_MINUTES)
registerEnumIn("configKeys", ConfigManager::PROTECTION_LEVEL)
registerEnumIn("configKeys", ConfigManager::DEATH_LOSE_PERCENT)
registerEnumIn("configKeys", ConfigManager::STATUSQUERY_TIMEOUT)
registerEnumIn("configKeys", ConfigManager::FRAG_TIME)
registerEnumIn("configKeys", ConfigManager::WHITE_SKULL_TIME)
registerEnumIn("configKeys", ConfigManager::GAME_PORT)
registerEnumIn("configKeys", ConfigManager::LOGIN_PORT)
registerEnumIn("configKeys", ConfigManager::STATUS_PORT)
registerEnumIn("configKeys", ConfigManager::STAIRHOP_DELAY)
registerEnumIn("configKeys", ConfigManager::MARKET_OFFER_DURATION)
registerEnumIn("configKeys", ConfigManager::CHECK_EXPIRED_MARKET_OFFERS_EACH_MINUTES)
registerEnumIn("configKeys", ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER)
registerEnumIn("configKeys", ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE)
registerEnumIn("configKeys", ConfigManager::MAX_PACKETS_PER_SECOND)
registerEnumIn("configKeys", ConfigManager::PLAYER_CONSOLE_LOGS)
// os
registerMethod("os", "mtime", LuaScriptInterface::luaSystemTime);
// table
registerMethod("table", "create", LuaScriptInterface::luaTableCreate);
registerMethod("table", "pack", LuaScriptInterface::luaTablePack);
// Game
registerTable("Game");
registerMethod("Game", "getSpectators", LuaScriptInterface::luaGameGetSpectators);
registerMethod("Game", "getPlayers", LuaScriptInterface::luaGameGetPlayers);
registerMethod("Game", "loadMap", LuaScriptInterface::luaGameLoadMap);
registerMethod("Game", "getExperienceStage", LuaScriptInterface::luaGameGetExperienceStage);
registerMethod("Game", "getExperienceForLevel", LuaScriptInterface::luaGameGetExperienceForLevel);
registerMethod("Game", "getMonsterCount", LuaScriptInterface::luaGameGetMonsterCount);
registerMethod("Game", "getPlayerCount", LuaScriptInterface::luaGameGetPlayerCount);
registerMethod("Game", "getNpcCount", LuaScriptInterface::luaGameGetNpcCount);
registerMethod("Game", "getMonsterTypes", LuaScriptInterface::luaGameGetMonsterTypes);
registerMethod("Game", "getTowns", LuaScriptInterface::luaGameGetTowns);
registerMethod("Game", "getHouses", LuaScriptInterface::luaGameGetHouses);
registerMethod("Game", "getGameState", LuaScriptInterface::luaGameGetGameState);
registerMethod("Game", "setGameState", LuaScriptInterface::luaGameSetGameState);
registerMethod("Game", "getWorldType", LuaScriptInterface::luaGameGetWorldType);
registerMethod("Game", "setWorldType", LuaScriptInterface::luaGameSetWorldType);
registerMethod("Game", "getItemAttributeByName", LuaScriptInterface::luaGameGetItemAttributeByName);
registerMethod("Game", "getReturnMessage", LuaScriptInterface::luaGameGetReturnMessage);
registerMethod("Game", "createItem", LuaScriptInterface::luaGameCreateItem);
registerMethod("Game", "createContainer", LuaScriptInterface::luaGameCreateContainer);
registerMethod("Game", "createMonster", LuaScriptInterface::luaGameCreateMonster);
registerMethod("Game", "createNpc", LuaScriptInterface::luaGameCreateNpc);
registerMethod("Game", "createTile", LuaScriptInterface::luaGameCreateTile);
registerMethod("Game", "createMonsterType", LuaScriptInterface::luaGameCreateMonsterType);
registerMethod("Game", "startRaid", LuaScriptInterface::luaGameStartRaid);
registerMethod("Game", "getClientVersion", LuaScriptInterface::luaGameGetClientVersion);
registerMethod("Game", "reload", LuaScriptInterface::luaGameReload);
registerMethod("Game", "getAccountStorageValue", LuaScriptInterface::luaGameGetAccountStorageValue);
registerMethod("Game", "setAccountStorageValue", LuaScriptInterface::luaGameSetAccountStorageValue);
registerMethod("Game", "saveAccountStorageValues", LuaScriptInterface::luaGameSaveAccountStorageValues);
// Variant
registerClass("Variant", "", LuaScriptInterface::luaVariantCreate);
registerMethod("Variant", "getNumber", LuaScriptInterface::luaVariantGetNumber);
registerMethod("Variant", "getString", LuaScriptInterface::luaVariantGetString);
registerMethod("Variant", "getPosition", LuaScriptInterface::luaVariantGetPosition);
// Position
registerClass("Position", "", LuaScriptInterface::luaPositionCreate);
registerMetaMethod("Position", "__add", LuaScriptInterface::luaPositionAdd);
registerMetaMethod("Position", "__sub", LuaScriptInterface::luaPositionSub);
registerMetaMethod("Position", "__eq", LuaScriptInterface::luaPositionCompare);
registerMethod("Position", "getDistance", LuaScriptInterface::luaPositionGetDistance);
registerMethod("Position", "isSightClear", LuaScriptInterface::luaPositionIsSightClear);
registerMethod("Position", "sendMagicEffect", LuaScriptInterface::luaPositionSendMagicEffect);
registerMethod("Position", "sendDistanceEffect", LuaScriptInterface::luaPositionSendDistanceEffect);
// Tile
registerClass("Tile", "", LuaScriptInterface::luaTileCreate);
registerMetaMethod("Tile", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Tile", "remove", LuaScriptInterface::luaTileRemove);
registerMethod("Tile", "getPosition", LuaScriptInterface::luaTileGetPosition);
registerMethod("Tile", "getGround", LuaScriptInterface::luaTileGetGround);
registerMethod("Tile", "getThing", LuaScriptInterface::luaTileGetThing);
registerMethod("Tile", "getThingCount", LuaScriptInterface::luaTileGetThingCount);
registerMethod("Tile", "getTopVisibleThing", LuaScriptInterface::luaTileGetTopVisibleThing);
registerMethod("Tile", "getTopTopItem", LuaScriptInterface::luaTileGetTopTopItem);
registerMethod("Tile", "getTopDownItem", LuaScriptInterface::luaTileGetTopDownItem);
registerMethod("Tile", "getFieldItem", LuaScriptInterface::luaTileGetFieldItem);
registerMethod("Tile", "getItemById", LuaScriptInterface::luaTileGetItemById);
registerMethod("Tile", "getItemByType", LuaScriptInterface::luaTileGetItemByType);
registerMethod("Tile", "getItemByTopOrder", LuaScriptInterface::luaTileGetItemByTopOrder);
registerMethod("Tile", "getItemCountById", LuaScriptInterface::luaTileGetItemCountById);
registerMethod("Tile", "getBottomCreature", LuaScriptInterface::luaTileGetBottomCreature);
registerMethod("Tile", "getTopCreature", LuaScriptInterface::luaTileGetTopCreature);
registerMethod("Tile", "getBottomVisibleCreature", LuaScriptInterface::luaTileGetBottomVisibleCreature);
registerMethod("Tile", "getTopVisibleCreature", LuaScriptInterface::luaTileGetTopVisibleCreature);
registerMethod("Tile", "getItems", LuaScriptInterface::luaTileGetItems);
registerMethod("Tile", "getItemCount", LuaScriptInterface::luaTileGetItemCount);
registerMethod("Tile", "getDownItemCount", LuaScriptInterface::luaTileGetDownItemCount);
registerMethod("Tile", "getTopItemCount", LuaScriptInterface::luaTileGetTopItemCount);
registerMethod("Tile", "getCreatures", LuaScriptInterface::luaTileGetCreatures);
registerMethod("Tile", "getCreatureCount", LuaScriptInterface::luaTileGetCreatureCount);
registerMethod("Tile", "getThingIndex", LuaScriptInterface::luaTileGetThingIndex);
registerMethod("Tile", "hasProperty", LuaScriptInterface::luaTileHasProperty);
registerMethod("Tile", "hasFlag", LuaScriptInterface::luaTileHasFlag);
registerMethod("Tile", "queryAdd", LuaScriptInterface::luaTileQueryAdd);
registerMethod("Tile", "addItem", LuaScriptInterface::luaTileAddItem);
registerMethod("Tile", "addItemEx", LuaScriptInterface::luaTileAddItemEx);
registerMethod("Tile", "getHouse", LuaScriptInterface::luaTileGetHouse);
// NetworkMessage
registerClass("NetworkMessage", "", LuaScriptInterface::luaNetworkMessageCreate);
registerMetaMethod("NetworkMessage", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("NetworkMessage", "__gc", LuaScriptInterface::luaNetworkMessageDelete);
registerMethod("NetworkMessage", "delete", LuaScriptInterface::luaNetworkMessageDelete);
registerMethod("NetworkMessage", "getByte", LuaScriptInterface::luaNetworkMessageGetByte);
registerMethod("NetworkMessage", "getU16", LuaScriptInterface::luaNetworkMessageGetU16);
registerMethod("NetworkMessage", "getU32", LuaScriptInterface::luaNetworkMessageGetU32);
registerMethod("NetworkMessage", "getU64", LuaScriptInterface::luaNetworkMessageGetU64);
registerMethod("NetworkMessage", "getString", LuaScriptInterface::luaNetworkMessageGetString);
registerMethod("NetworkMessage", "getPosition", LuaScriptInterface::luaNetworkMessageGetPosition);
registerMethod("NetworkMessage", "addByte", LuaScriptInterface::luaNetworkMessageAddByte);
registerMethod("NetworkMessage", "addU16", LuaScriptInterface::luaNetworkMessageAddU16);
registerMethod("NetworkMessage", "addU32", LuaScriptInterface::luaNetworkMessageAddU32);
registerMethod("NetworkMessage", "addU64", LuaScriptInterface::luaNetworkMessageAddU64);
registerMethod("NetworkMessage", "addString", LuaScriptInterface::luaNetworkMessageAddString);
registerMethod("NetworkMessage", "addPosition", LuaScriptInterface::luaNetworkMessageAddPosition);
registerMethod("NetworkMessage", "addDouble", LuaScriptInterface::luaNetworkMessageAddDouble);
registerMethod("NetworkMessage", "addItem", LuaScriptInterface::luaNetworkMessageAddItem);
registerMethod("NetworkMessage", "addItemId", LuaScriptInterface::luaNetworkMessageAddItemId);
registerMethod("NetworkMessage", "reset", LuaScriptInterface::luaNetworkMessageReset);
registerMethod("NetworkMessage", "seek", LuaScriptInterface::luaNetworkMessageSeek);
registerMethod("NetworkMessage", "tell", LuaScriptInterface::luaNetworkMessageTell);
registerMethod("NetworkMessage", "len", LuaScriptInterface::luaNetworkMessageLength);
registerMethod("NetworkMessage", "skipBytes", LuaScriptInterface::luaNetworkMessageSkipBytes);
registerMethod("NetworkMessage", "sendToPlayer", LuaScriptInterface::luaNetworkMessageSendToPlayer);
// ModalWindow
registerClass("ModalWindow", "", LuaScriptInterface::luaModalWindowCreate);
registerMetaMethod("ModalWindow", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("ModalWindow", "__gc", LuaScriptInterface::luaModalWindowDelete);
registerMethod("ModalWindow", "delete", LuaScriptInterface::luaModalWindowDelete);
registerMethod("ModalWindow", "getId", LuaScriptInterface::luaModalWindowGetId);
registerMethod("ModalWindow", "getTitle", LuaScriptInterface::luaModalWindowGetTitle);
registerMethod("ModalWindow", "getMessage", LuaScriptInterface::luaModalWindowGetMessage);
registerMethod("ModalWindow", "setTitle", LuaScriptInterface::luaModalWindowSetTitle);
registerMethod("ModalWindow", "setMessage", LuaScriptInterface::luaModalWindowSetMessage);
registerMethod("ModalWindow", "getButtonCount", LuaScriptInterface::luaModalWindowGetButtonCount);
registerMethod("ModalWindow", "getChoiceCount", LuaScriptInterface::luaModalWindowGetChoiceCount);
registerMethod("ModalWindow", "addButton", LuaScriptInterface::luaModalWindowAddButton);
registerMethod("ModalWindow", "addChoice", LuaScriptInterface::luaModalWindowAddChoice);
registerMethod("ModalWindow", "getDefaultEnterButton", LuaScriptInterface::luaModalWindowGetDefaultEnterButton);
registerMethod("ModalWindow", "setDefaultEnterButton", LuaScriptInterface::luaModalWindowSetDefaultEnterButton);
registerMethod("ModalWindow", "getDefaultEscapeButton", LuaScriptInterface::luaModalWindowGetDefaultEscapeButton);
registerMethod("ModalWindow", "setDefaultEscapeButton", LuaScriptInterface::luaModalWindowSetDefaultEscapeButton);
registerMethod("ModalWindow", "hasPriority", LuaScriptInterface::luaModalWindowHasPriority);
registerMethod("ModalWindow", "setPriority", LuaScriptInterface::luaModalWindowSetPriority);
registerMethod("ModalWindow", "sendToPlayer", LuaScriptInterface::luaModalWindowSendToPlayer);
// Item
registerClass("Item", "", LuaScriptInterface::luaItemCreate);
registerMetaMethod("Item", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Item", "isItem", LuaScriptInterface::luaItemIsItem);
registerMethod("Item", "getParent", LuaScriptInterface::luaItemGetParent);
registerMethod("Item", "getTopParent", LuaScriptInterface::luaItemGetTopParent);
registerMethod("Item", "getId", LuaScriptInterface::luaItemGetId);
registerMethod("Item", "clone", LuaScriptInterface::luaItemClone);
registerMethod("Item", "split", LuaScriptInterface::luaItemSplit);
registerMethod("Item", "remove", LuaScriptInterface::luaItemRemove);
registerMethod("Item", "getUniqueId", LuaScriptInterface::luaItemGetUniqueId);
registerMethod("Item", "getActionId", LuaScriptInterface::luaItemGetActionId);
registerMethod("Item", "setActionId", LuaScriptInterface::luaItemSetActionId);
registerMethod("Item", "getCount", LuaScriptInterface::luaItemGetCount);
registerMethod("Item", "getCharges", LuaScriptInterface::luaItemGetCharges);
registerMethod("Item", "getFluidType", LuaScriptInterface::luaItemGetFluidType);
registerMethod("Item", "getWeight", LuaScriptInterface::luaItemGetWeight);
registerMethod("Item", "getSubType", LuaScriptInterface::luaItemGetSubType);
registerMethod("Item", "getName", LuaScriptInterface::luaItemGetName);
registerMethod("Item", "getPluralName", LuaScriptInterface::luaItemGetPluralName);
registerMethod("Item", "getArticle", LuaScriptInterface::luaItemGetArticle);
registerMethod("Item", "getPosition", LuaScriptInterface::luaItemGetPosition);
registerMethod("Item", "getTile", LuaScriptInterface::luaItemGetTile);
registerMethod("Item", "hasAttribute", LuaScriptInterface::luaItemHasAttribute);
registerMethod("Item", "getAttribute", LuaScriptInterface::luaItemGetAttribute);
registerMethod("Item", "setAttribute", LuaScriptInterface::luaItemSetAttribute);
registerMethod("Item", "removeAttribute", LuaScriptInterface::luaItemRemoveAttribute);
registerMethod("Item", "getCustomAttribute", LuaScriptInterface::luaItemGetCustomAttribute);
registerMethod("Item", "setCustomAttribute", LuaScriptInterface::luaItemSetCustomAttribute);
registerMethod("Item", "removeCustomAttribute", LuaScriptInterface::luaItemRemoveCustomAttribute);
registerMethod("Item", "moveTo", LuaScriptInterface::luaItemMoveTo);
registerMethod("Item", "transform", LuaScriptInterface::luaItemTransform);
registerMethod("Item", "decay", LuaScriptInterface::luaItemDecay);
registerMethod("Item", "getDescription", LuaScriptInterface::luaItemGetDescription);
registerMethod("Item", "getSpecialDescription", LuaScriptInterface::luaItemGetSpecialDescription);
registerMethod("Item", "hasProperty", LuaScriptInterface::luaItemHasProperty);
registerMethod("Item", "isLoadedFromMap", LuaScriptInterface::luaItemIsLoadedFromMap);
registerMethod("Item", "setStoreItem", LuaScriptInterface::luaItemSetStoreItem);
registerMethod("Item", "isStoreItem", LuaScriptInterface::luaItemIsStoreItem);
// Container
registerClass("Container", "Item", LuaScriptInterface::luaContainerCreate);
registerMetaMethod("Container", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Container", "getSize", LuaScriptInterface::luaContainerGetSize);
registerMethod("Container", "getCapacity", LuaScriptInterface::luaContainerGetCapacity);
registerMethod("Container", "getEmptySlots", LuaScriptInterface::luaContainerGetEmptySlots);
registerMethod("Container", "getContentDescription", LuaScriptInterface::luaContainerGetContentDescription);
registerMethod("Container", "getItems", LuaScriptInterface::luaContainerGetItems);
registerMethod("Container", "getItemHoldingCount", LuaScriptInterface::luaContainerGetItemHoldingCount);
registerMethod("Container", "getItemCountById", LuaScriptInterface::luaContainerGetItemCountById);
registerMethod("Container", "getItem", LuaScriptInterface::luaContainerGetItem);
registerMethod("Container", "hasItem", LuaScriptInterface::luaContainerHasItem);
registerMethod("Container", "addItem", LuaScriptInterface::luaContainerAddItem);
registerMethod("Container", "addItemEx", LuaScriptInterface::luaContainerAddItemEx);
registerMethod("Container", "getCorpseOwner", LuaScriptInterface::luaContainerGetCorpseOwner);
// Teleport
registerClass("Teleport", "Item", LuaScriptInterface::luaTeleportCreate);
registerMetaMethod("Teleport", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Teleport", "getDestination", LuaScriptInterface::luaTeleportGetDestination);
registerMethod("Teleport", "setDestination", LuaScriptInterface::luaTeleportSetDestination);
// Creature
registerClass("Creature", "", LuaScriptInterface::luaCreatureCreate);
registerMetaMethod("Creature", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Creature", "getEvents", LuaScriptInterface::luaCreatureGetEvents);
registerMethod("Creature", "registerEvent", LuaScriptInterface::luaCreatureRegisterEvent);
registerMethod("Creature", "unregisterEvent", LuaScriptInterface::luaCreatureUnregisterEvent);
registerMethod("Creature", "isRemoved", LuaScriptInterface::luaCreatureIsRemoved);
registerMethod("Creature", "isCreature", LuaScriptInterface::luaCreatureIsCreature);
registerMethod("Creature", "isInGhostMode", LuaScriptInterface::luaCreatureIsInGhostMode);
registerMethod("Creature", "isHealthHidden", LuaScriptInterface::luaCreatureIsHealthHidden);
registerMethod("Creature", "isMovementBlocked", LuaScriptInterface::luaCreatureIsMovementBlocked);
registerMethod("Creature", "isImmune", LuaScriptInterface::luaCreatureIsImmune);
registerMethod("Creature", "canSee", LuaScriptInterface::luaCreatureCanSee);
registerMethod("Creature", "canSeeCreature", LuaScriptInterface::luaCreatureCanSeeCreature);
registerMethod("Creature", "canSeeGhostMode", LuaScriptInterface::luaCreatureCanSeeGhostMode);
registerMethod("Creature", "canSeeInvisibility", LuaScriptInterface::luaCreatureCanSeeInvisibility);
registerMethod("Creature", "getParent", LuaScriptInterface::luaCreatureGetParent);
registerMethod("Creature", "getId", LuaScriptInterface::luaCreatureGetId);
registerMethod("Creature", "getName", LuaScriptInterface::luaCreatureGetName);
registerMethod("Creature", "getTarget", LuaScriptInterface::luaCreatureGetTarget);
registerMethod("Creature", "setTarget", LuaScriptInterface::luaCreatureSetTarget);
registerMethod("Creature", "getFollowCreature", LuaScriptInterface::luaCreatureGetFollowCreature);
registerMethod("Creature", "setFollowCreature", LuaScriptInterface::luaCreatureSetFollowCreature);
registerMethod("Creature", "getMaster", LuaScriptInterface::luaCreatureGetMaster);
registerMethod("Creature", "setMaster", LuaScriptInterface::luaCreatureSetMaster);
registerMethod("Creature", "getLight", LuaScriptInterface::luaCreatureGetLight);
registerMethod("Creature", "setLight", LuaScriptInterface::luaCreatureSetLight);
registerMethod("Creature", "getSpeed", LuaScriptInterface::luaCreatureGetSpeed);
registerMethod("Creature", "getBaseSpeed", LuaScriptInterface::luaCreatureGetBaseSpeed);
registerMethod("Creature", "changeSpeed", LuaScriptInterface::luaCreatureChangeSpeed);
registerMethod("Creature", "setDropLoot", LuaScriptInterface::luaCreatureSetDropLoot);
registerMethod("Creature", "setSkillLoss", LuaScriptInterface::luaCreatureSetSkillLoss);
registerMethod("Creature", "getPosition", LuaScriptInterface::luaCreatureGetPosition);
registerMethod("Creature", "getTile", LuaScriptInterface::luaCreatureGetTile);
registerMethod("Creature", "getDirection", LuaScriptInterface::luaCreatureGetDirection);
registerMethod("Creature", "setDirection", LuaScriptInterface::luaCreatureSetDirection);
registerMethod("Creature", "getHealth", LuaScriptInterface::luaCreatureGetHealth);
registerMethod("Creature", "setHealth", LuaScriptInterface::luaCreatureSetHealth);
registerMethod("Creature", "addHealth", LuaScriptInterface::luaCreatureAddHealth);
registerMethod("Creature", "getMaxHealth", LuaScriptInterface::luaCreatureGetMaxHealth);
registerMethod("Creature", "setMaxHealth", LuaScriptInterface::luaCreatureSetMaxHealth);
registerMethod("Creature", "setHiddenHealth", LuaScriptInterface::luaCreatureSetHiddenHealth);
registerMethod("Creature", "setMovementBlocked", LuaScriptInterface::luaCreatureSetMovementBlocked);
registerMethod("Creature", "getSkull", LuaScriptInterface::luaCreatureGetSkull);
registerMethod("Creature", "setSkull", LuaScriptInterface::luaCreatureSetSkull);
registerMethod("Creature", "getOutfit", LuaScriptInterface::luaCreatureGetOutfit);
registerMethod("Creature", "setOutfit", LuaScriptInterface::luaCreatureSetOutfit);
registerMethod("Creature", "getCondition", LuaScriptInterface::luaCreatureGetCondition);
registerMethod("Creature", "addCondition", LuaScriptInterface::luaCreatureAddCondition);
registerMethod("Creature", "removeCondition", LuaScriptInterface::luaCreatureRemoveCondition);
registerMethod("Creature", "hasCondition", LuaScriptInterface::luaCreatureHasCondition);
registerMethod("Creature", "remove", LuaScriptInterface::luaCreatureRemove);
registerMethod("Creature", "teleportTo", LuaScriptInterface::luaCreatureTeleportTo);
registerMethod("Creature", "say", LuaScriptInterface::luaCreatureSay);
registerMethod("Creature", "getDamageMap", LuaScriptInterface::luaCreatureGetDamageMap);
registerMethod("Creature", "getSummons", LuaScriptInterface::luaCreatureGetSummons);
registerMethod("Creature", "getDescription", LuaScriptInterface::luaCreatureGetDescription);
registerMethod("Creature", "getPathTo", LuaScriptInterface::luaCreatureGetPathTo);
registerMethod("Creature", "move", LuaScriptInterface::luaCreatureMove);
registerMethod("Creature", "getZone", LuaScriptInterface::luaCreatureGetZone);
// Player
registerClass("Player", "Creature", LuaScriptInterface::luaPlayerCreate);
registerMetaMethod("Player", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Player", "isPlayer", LuaScriptInterface::luaPlayerIsPlayer);
registerMethod("Player", "getGuid", LuaScriptInterface::luaPlayerGetGuid);
registerMethod("Player", "getIp", LuaScriptInterface::luaPlayerGetIp);
registerMethod("Player", "getAccountId", LuaScriptInterface::luaPlayerGetAccountId);
registerMethod("Player", "getLastLoginSaved", LuaScriptInterface::luaPlayerGetLastLoginSaved);
registerMethod("Player", "getLastLogout", LuaScriptInterface::luaPlayerGetLastLogout);
registerMethod("Player", "getAccountType", LuaScriptInterface::luaPlayerGetAccountType);
registerMethod("Player", "setAccountType", LuaScriptInterface::luaPlayerSetAccountType);
registerMethod("Player", "getCapacity", LuaScriptInterface::luaPlayerGetCapacity);
registerMethod("Player", "setCapacity", LuaScriptInterface::luaPlayerSetCapacity);
registerMethod("Player", "getFreeCapacity", LuaScriptInterface::luaPlayerGetFreeCapacity);
registerMethod("Player", "getDepotChest", LuaScriptInterface::luaPlayerGetDepotChest);
registerMethod("Player", "getInbox", LuaScriptInterface::luaPlayerGetInbox);
registerMethod("Player", "getSkullTime", LuaScriptInterface::luaPlayerGetSkullTime);
registerMethod("Player", "setSkullTime", LuaScriptInterface::luaPlayerSetSkullTime);
registerMethod("Player", "getDeathPenalty", LuaScriptInterface::luaPlayerGetDeathPenalty);
registerMethod("Player", "getExperience", LuaScriptInterface::luaPlayerGetExperience);
registerMethod("Player", "addExperience", LuaScriptInterface::luaPlayerAddExperience);
registerMethod("Player", "removeExperience", LuaScriptInterface::luaPlayerRemoveExperience);
registerMethod("Player", "getLevel", LuaScriptInterface::luaPlayerGetLevel);
registerMethod("Player", "getMagicLevel", LuaScriptInterface::luaPlayerGetMagicLevel);
registerMethod("Player", "getBaseMagicLevel", LuaScriptInterface::luaPlayerGetBaseMagicLevel);
registerMethod("Player", "getMana", LuaScriptInterface::luaPlayerGetMana);
registerMethod("Player", "addMana", LuaScriptInterface::luaPlayerAddMana);
registerMethod("Player", "getMaxMana", LuaScriptInterface::luaPlayerGetMaxMana);
registerMethod("Player", "setMaxMana", LuaScriptInterface::luaPlayerSetMaxMana);
registerMethod("Player", "getManaSpent", LuaScriptInterface::luaPlayerGetManaSpent);
registerMethod("Player", "addManaSpent", LuaScriptInterface::luaPlayerAddManaSpent);
registerMethod("Player", "removeManaSpent", LuaScriptInterface::luaPlayerRemoveManaSpent);
registerMethod("Player", "getBaseMaxHealth", LuaScriptInterface::luaPlayerGetBaseMaxHealth);
registerMethod("Player", "getBaseMaxMana", LuaScriptInterface::luaPlayerGetBaseMaxMana);
registerMethod("Player", "getSkillLevel", LuaScriptInterface::luaPlayerGetSkillLevel);
registerMethod("Player", "getEffectiveSkillLevel", LuaScriptInterface::luaPlayerGetEffectiveSkillLevel);
registerMethod("Player", "getSkillPercent", LuaScriptInterface::luaPlayerGetSkillPercent);
registerMethod("Player", "getSkillTries", LuaScriptInterface::luaPlayerGetSkillTries);
registerMethod("Player", "addSkillTries", LuaScriptInterface::luaPlayerAddSkillTries);
registerMethod("Player", "removeSkillTries", LuaScriptInterface::luaPlayerRemoveSkillTries);
registerMethod("Player", "getSpecialSkill", LuaScriptInterface::luaPlayerGetSpecialSkill);
registerMethod("Player", "addSpecialSkill", LuaScriptInterface::luaPlayerAddSpecialSkill);
registerMethod("Player", "addOfflineTrainingTime", LuaScriptInterface::luaPlayerAddOfflineTrainingTime);
registerMethod("Player", "getOfflineTrainingTime", LuaScriptInterface::luaPlayerGetOfflineTrainingTime);
registerMethod("Player", "removeOfflineTrainingTime", LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime);
registerMethod("Player", "addOfflineTrainingTries", LuaScriptInterface::luaPlayerAddOfflineTrainingTries);
registerMethod("Player", "getOfflineTrainingSkill", LuaScriptInterface::luaPlayerGetOfflineTrainingSkill);
registerMethod("Player", "setOfflineTrainingSkill", LuaScriptInterface::luaPlayerSetOfflineTrainingSkill);
registerMethod("Player", "getItemCount", LuaScriptInterface::luaPlayerGetItemCount);
registerMethod("Player", "getItemById", LuaScriptInterface::luaPlayerGetItemById);
registerMethod("Player", "getVocation", LuaScriptInterface::luaPlayerGetVocation);
registerMethod("Player", "setVocation", LuaScriptInterface::luaPlayerSetVocation);
registerMethod("Player", "getSex", LuaScriptInterface::luaPlayerGetSex);
registerMethod("Player", "setSex", LuaScriptInterface::luaPlayerSetSex);
registerMethod("Player", "getTown", LuaScriptInterface::luaPlayerGetTown);
registerMethod("Player", "setTown", LuaScriptInterface::luaPlayerSetTown);
registerMethod("Player", "getGuild", LuaScriptInterface::luaPlayerGetGuild);
registerMethod("Player", "setGuild", LuaScriptInterface::luaPlayerSetGuild);
registerMethod("Player", "getGuildLevel", LuaScriptInterface::luaPlayerGetGuildLevel);
registerMethod("Player", "setGuildLevel", LuaScriptInterface::luaPlayerSetGuildLevel);
registerMethod("Player", "getGuildNick", LuaScriptInterface::luaPlayerGetGuildNick);
registerMethod("Player", "setGuildNick", LuaScriptInterface::luaPlayerSetGuildNick);
registerMethod("Player", "getGroup", LuaScriptInterface::luaPlayerGetGroup);
registerMethod("Player", "setGroup", LuaScriptInterface::luaPlayerSetGroup);
registerMethod("Player", "getStamina", LuaScriptInterface::luaPlayerGetStamina);
registerMethod("Player", "setStamina", LuaScriptInterface::luaPlayerSetStamina);
registerMethod("Player", "getSoul", LuaScriptInterface::luaPlayerGetSoul);
registerMethod("Player", "addSoul", LuaScriptInterface::luaPlayerAddSoul);
registerMethod("Player", "getMaxSoul", LuaScriptInterface::luaPlayerGetMaxSoul);
registerMethod("Player", "getBankBalance", LuaScriptInterface::luaPlayerGetBankBalance);
registerMethod("Player", "setBankBalance", LuaScriptInterface::luaPlayerSetBankBalance);
registerMethod("Player", "getStorageValue", LuaScriptInterface::luaPlayerGetStorageValue);
registerMethod("Player", "setStorageValue", LuaScriptInterface::luaPlayerSetStorageValue);
registerMethod("Player", "addItem", LuaScriptInterface::luaPlayerAddItem);
registerMethod("Player", "addItemEx", LuaScriptInterface::luaPlayerAddItemEx);
registerMethod("Player", "removeItem", LuaScriptInterface::luaPlayerRemoveItem);
registerMethod("Player", "getMoney", LuaScriptInterface::luaPlayerGetMoney);
registerMethod("Player", "addMoney", LuaScriptInterface::luaPlayerAddMoney);
registerMethod("Player", "removeMoney", LuaScriptInterface::luaPlayerRemoveMoney);
registerMethod("Player", "showTextDialog", LuaScriptInterface::luaPlayerShowTextDialog);
registerMethod("Player", "sendTextMessage", LuaScriptInterface::luaPlayerSendTextMessage);
registerMethod("Player", "sendChannelMessage", LuaScriptInterface::luaPlayerSendChannelMessage);
registerMethod("Player", "sendPrivateMessage", LuaScriptInterface::luaPlayerSendPrivateMessage);
registerMethod("Player", "channelSay", LuaScriptInterface::luaPlayerChannelSay);
registerMethod("Player", "openChannel", LuaScriptInterface::luaPlayerOpenChannel);
registerMethod("Player", "getSlotItem", LuaScriptInterface::luaPlayerGetSlotItem);
registerMethod("Player", "getParty", LuaScriptInterface::luaPlayerGetParty);
registerMethod("Player", "addOutfit", LuaScriptInterface::luaPlayerAddOutfit);
registerMethod("Player", "addOutfitAddon", LuaScriptInterface::luaPlayerAddOutfitAddon);
registerMethod("Player", "removeOutfit", LuaScriptInterface::luaPlayerRemoveOutfit);
registerMethod("Player", "removeOutfitAddon", LuaScriptInterface::luaPlayerRemoveOutfitAddon);
registerMethod("Player", "hasOutfit", LuaScriptInterface::luaPlayerHasOutfit);
registerMethod("Player", "canWearOutfit", LuaScriptInterface::luaPlayerCanWearOutfit);
registerMethod("Player", "sendOutfitWindow", LuaScriptInterface::luaPlayerSendOutfitWindow);
registerMethod("Player", "addMount", LuaScriptInterface::luaPlayerAddMount);
registerMethod("Player", "removeMount", LuaScriptInterface::luaPlayerRemoveMount);
registerMethod("Player", "hasMount", LuaScriptInterface::luaPlayerHasMount);
registerMethod("Player", "getPremiumEndsAt", LuaScriptInterface::luaPlayerGetPremiumEndsAt);
registerMethod("Player", "setPremiumEndsAt", LuaScriptInterface::luaPlayerSetPremiumEndsAt);
registerMethod("Player", "hasBlessing", LuaScriptInterface::luaPlayerHasBlessing);
registerMethod("Player", "addBlessing", LuaScriptInterface::luaPlayerAddBlessing);
registerMethod("Player", "removeBlessing", LuaScriptInterface::luaPlayerRemoveBlessing);
registerMethod("Player", "canLearnSpell", LuaScriptInterface::luaPlayerCanLearnSpell);
registerMethod("Player", "learnSpell", LuaScriptInterface::luaPlayerLearnSpell);
registerMethod("Player", "forgetSpell", LuaScriptInterface::luaPlayerForgetSpell);
registerMethod("Player", "hasLearnedSpell", LuaScriptInterface::luaPlayerHasLearnedSpell);
registerMethod("Player", "sendTutorial", LuaScriptInterface::luaPlayerSendTutorial);
registerMethod("Player", "addMapMark", LuaScriptInterface::luaPlayerAddMapMark);
registerMethod("Player", "save", LuaScriptInterface::luaPlayerSave);
registerMethod("Player", "popupFYI", LuaScriptInterface::luaPlayerPopupFYI);
registerMethod("Player", "isPzLocked", LuaScriptInterface::luaPlayerIsPzLocked);
registerMethod("Player", "getClient", LuaScriptInterface::luaPlayerGetClient);
registerMethod("Player", "getHouse", LuaScriptInterface::luaPlayerGetHouse);
registerMethod("Player", "sendHouseWindow", LuaScriptInterface::luaPlayerSendHouseWindow);
registerMethod("Player", "setEditHouse", LuaScriptInterface::luaPlayerSetEditHouse);
registerMethod("Player", "setGhostMode", LuaScriptInterface::luaPlayerSetGhostMode);
registerMethod("Player", "getContainerId", LuaScriptInterface::luaPlayerGetContainerId);
registerMethod("Player", "getContainerById", LuaScriptInterface::luaPlayerGetContainerById);
registerMethod("Player", "getContainerIndex", LuaScriptInterface::luaPlayerGetContainerIndex);
registerMethod("Player", "getInstantSpells", LuaScriptInterface::luaPlayerGetInstantSpells);
registerMethod("Player", "canCast", LuaScriptInterface::luaPlayerCanCast);
registerMethod("Player", "hasChaseMode", LuaScriptInterface::luaPlayerHasChaseMode);
registerMethod("Player", "hasSecureMode", LuaScriptInterface::luaPlayerHasSecureMode);
registerMethod("Player", "getFightMode", LuaScriptInterface::luaPlayerGetFightMode);
registerMethod("Player", "getStoreInbox", LuaScriptInterface::luaPlayerGetStoreInbox);
// Monster
registerClass("Monster", "Creature", LuaScriptInterface::luaMonsterCreate);
registerMetaMethod("Monster", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Monster", "isMonster", LuaScriptInterface::luaMonsterIsMonster);
registerMethod("Monster", "getType", LuaScriptInterface::luaMonsterGetType);
registerMethod("Monster", "rename", LuaScriptInterface::luaMonsterRename);
registerMethod("Monster", "getSpawnPosition", LuaScriptInterface::luaMonsterGetSpawnPosition);
registerMethod("Monster", "isInSpawnRange", LuaScriptInterface::luaMonsterIsInSpawnRange);
registerMethod("Monster", "isIdle", LuaScriptInterface::luaMonsterIsIdle);
registerMethod("Monster", "setIdle", LuaScriptInterface::luaMonsterSetIdle);
registerMethod("Monster", "isTarget", LuaScriptInterface::luaMonsterIsTarget);
registerMethod("Monster", "isOpponent", LuaScriptInterface::luaMonsterIsOpponent);
registerMethod("Monster", "isFriend", LuaScriptInterface::luaMonsterIsFriend);
registerMethod("Monster", "addFriend", LuaScriptInterface::luaMonsterAddFriend);
registerMethod("Monster", "removeFriend", LuaScriptInterface::luaMonsterRemoveFriend);
registerMethod("Monster", "getFriendList", LuaScriptInterface::luaMonsterGetFriendList);
registerMethod("Monster", "getFriendCount", LuaScriptInterface::luaMonsterGetFriendCount);
registerMethod("Monster", "addTarget", LuaScriptInterface::luaMonsterAddTarget);
registerMethod("Monster", "removeTarget", LuaScriptInterface::luaMonsterRemoveTarget);
registerMethod("Monster", "getTargetList", LuaScriptInterface::luaMonsterGetTargetList);
registerMethod("Monster", "getTargetCount", LuaScriptInterface::luaMonsterGetTargetCount);
registerMethod("Monster", "selectTarget", LuaScriptInterface::luaMonsterSelectTarget);
registerMethod("Monster", "searchTarget", LuaScriptInterface::luaMonsterSearchTarget);
registerMethod("Monster", "isWalkingToSpawn", LuaScriptInterface::luaMonsterIsWalkingToSpawn);
registerMethod("Monster", "walkToSpawn", LuaScriptInterface::luaMonsterWalkToSpawn);
// Npc
registerClass("Npc", "Creature", LuaScriptInterface::luaNpcCreate);
registerMetaMethod("Npc", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Npc", "isNpc", LuaScriptInterface::luaNpcIsNpc);
registerMethod("Npc", "setMasterPos", LuaScriptInterface::luaNpcSetMasterPos);
registerMethod("Npc", "getSpeechBubble", LuaScriptInterface::luaNpcGetSpeechBubble);
registerMethod("Npc", "setSpeechBubble", LuaScriptInterface::luaNpcSetSpeechBubble);
// Guild
registerClass("Guild", "", LuaScriptInterface::luaGuildCreate);
registerMetaMethod("Guild", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Guild", "getId", LuaScriptInterface::luaGuildGetId);
registerMethod("Guild", "getName", LuaScriptInterface::luaGuildGetName);
registerMethod("Guild", "getMembersOnline", LuaScriptInterface::luaGuildGetMembersOnline);
registerMethod("Guild", "addRank", LuaScriptInterface::luaGuildAddRank);
registerMethod("Guild", "getRankById", LuaScriptInterface::luaGuildGetRankById);
registerMethod("Guild", "getRankByLevel", LuaScriptInterface::luaGuildGetRankByLevel);
registerMethod("Guild", "getMotd", LuaScriptInterface::luaGuildGetMotd);
registerMethod("Guild", "setMotd", LuaScriptInterface::luaGuildSetMotd);
// Group
registerClass("Group", "", LuaScriptInterface::luaGroupCreate);
registerMetaMethod("Group", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Group", "getId", LuaScriptInterface::luaGroupGetId);
registerMethod("Group", "getName", LuaScriptInterface::luaGroupGetName);
registerMethod("Group", "getFlags", LuaScriptInterface::luaGroupGetFlags);
registerMethod("Group", "getAccess", LuaScriptInterface::luaGroupGetAccess);
registerMethod("Group", "getMaxDepotItems", LuaScriptInterface::luaGroupGetMaxDepotItems);
registerMethod("Group", "getMaxVipEntries", LuaScriptInterface::luaGroupGetMaxVipEntries);
registerMethod("Group", "hasFlag", LuaScriptInterface::luaGroupHasFlag);
// Vocation
registerClass("Vocation", "", LuaScriptInterface::luaVocationCreate);
registerMetaMethod("Vocation", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Vocation", "getId", LuaScriptInterface::luaVocationGetId);
registerMethod("Vocation", "getClientId", LuaScriptInterface::luaVocationGetClientId);
registerMethod("Vocation", "getName", LuaScriptInterface::luaVocationGetName);
registerMethod("Vocation", "getDescription", LuaScriptInterface::luaVocationGetDescription);
registerMethod("Vocation", "getRequiredSkillTries", LuaScriptInterface::luaVocationGetRequiredSkillTries);
registerMethod("Vocation", "getRequiredManaSpent", LuaScriptInterface::luaVocationGetRequiredManaSpent);
registerMethod("Vocation", "getCapacityGain", LuaScriptInterface::luaVocationGetCapacityGain);
registerMethod("Vocation", "getHealthGain", LuaScriptInterface::luaVocationGetHealthGain);
registerMethod("Vocation", "getHealthGainTicks", LuaScriptInterface::luaVocationGetHealthGainTicks);
registerMethod("Vocation", "getHealthGainAmount", LuaScriptInterface::luaVocationGetHealthGainAmount);
registerMethod("Vocation", "getManaGain", LuaScriptInterface::luaVocationGetManaGain);
registerMethod("Vocation", "getManaGainTicks", LuaScriptInterface::luaVocationGetManaGainTicks);
registerMethod("Vocation", "getManaGainAmount", LuaScriptInterface::luaVocationGetManaGainAmount);
registerMethod("Vocation", "getMaxSoul", LuaScriptInterface::luaVocationGetMaxSoul);
registerMethod("Vocation", "getSoulGainTicks", LuaScriptInterface::luaVocationGetSoulGainTicks);
registerMethod("Vocation", "getAttackSpeed", LuaScriptInterface::luaVocationGetAttackSpeed);
registerMethod("Vocation", "getBaseSpeed", LuaScriptInterface::luaVocationGetBaseSpeed);
registerMethod("Vocation", "getDemotion", LuaScriptInterface::luaVocationGetDemotion);
registerMethod("Vocation", "getPromotion", LuaScriptInterface::luaVocationGetPromotion);
registerMethod("Vocation", "allowsPvp", LuaScriptInterface::luaVocationAllowsPvp);
// Town
registerClass("Town", "", LuaScriptInterface::luaTownCreate);
registerMetaMethod("Town", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Town", "getId", LuaScriptInterface::luaTownGetId);
registerMethod("Town", "getName", LuaScriptInterface::luaTownGetName);
registerMethod("Town", "getTemplePosition", LuaScriptInterface::luaTownGetTemplePosition);
// House
registerClass("House", "", LuaScriptInterface::luaHouseCreate);
registerMetaMethod("House", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("House", "getId", LuaScriptInterface::luaHouseGetId);
registerMethod("House", "getName", LuaScriptInterface::luaHouseGetName);
registerMethod("House", "getTown", LuaScriptInterface::luaHouseGetTown);
registerMethod("House", "getExitPosition", LuaScriptInterface::luaHouseGetExitPosition);
registerMethod("House", "getRent", LuaScriptInterface::luaHouseGetRent);
registerMethod("House", "getOwnerGuid", LuaScriptInterface::luaHouseGetOwnerGuid);
registerMethod("House", "setOwnerGuid", LuaScriptInterface::luaHouseSetOwnerGuid);
registerMethod("House", "startTrade", LuaScriptInterface::luaHouseStartTrade);
registerMethod("House", "getBeds", LuaScriptInterface::luaHouseGetBeds);
registerMethod("House", "getBedCount", LuaScriptInterface::luaHouseGetBedCount);
registerMethod("House", "getDoors", LuaScriptInterface::luaHouseGetDoors);
registerMethod("House", "getDoorCount", LuaScriptInterface::luaHouseGetDoorCount);
registerMethod("House", "getDoorIdByPosition", LuaScriptInterface::luaHouseGetDoorIdByPosition);
registerMethod("House", "getTiles", LuaScriptInterface::luaHouseGetTiles);
registerMethod("House", "getItems", LuaScriptInterface::luaHouseGetItems);
registerMethod("House", "getTileCount", LuaScriptInterface::luaHouseGetTileCount);
registerMethod("House", "canEditAccessList", LuaScriptInterface::luaHouseCanEditAccessList);
registerMethod("House", "getAccessList", LuaScriptInterface::luaHouseGetAccessList);
registerMethod("House", "setAccessList", LuaScriptInterface::luaHouseSetAccessList);
registerMethod("House", "kickPlayer", LuaScriptInterface::luaHouseKickPlayer);
registerMethod("House", "save", LuaScriptInterface::luaHouseSave);
// ItemType
registerClass("ItemType", "", LuaScriptInterface::luaItemTypeCreate);
registerMetaMethod("ItemType", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("ItemType", "isCorpse", LuaScriptInterface::luaItemTypeIsCorpse);
registerMethod("ItemType", "isDoor", LuaScriptInterface::luaItemTypeIsDoor);
registerMethod("ItemType", "isContainer", LuaScriptInterface::luaItemTypeIsContainer);
registerMethod("ItemType", "isFluidContainer", LuaScriptInterface::luaItemTypeIsFluidContainer);
registerMethod("ItemType", "isMovable", LuaScriptInterface::luaItemTypeIsMovable);
registerMethod("ItemType", "isRune", LuaScriptInterface::luaItemTypeIsRune);
registerMethod("ItemType", "isStackable", LuaScriptInterface::luaItemTypeIsStackable);
registerMethod("ItemType", "isReadable", LuaScriptInterface::luaItemTypeIsReadable);
registerMethod("ItemType", "isWritable", LuaScriptInterface::luaItemTypeIsWritable);
registerMethod("ItemType", "isBlocking", LuaScriptInterface::luaItemTypeIsBlocking);
registerMethod("ItemType", "isGroundTile", LuaScriptInterface::luaItemTypeIsGroundTile);
registerMethod("ItemType", "isMagicField", LuaScriptInterface::luaItemTypeIsMagicField);
registerMethod("ItemType", "isUseable", LuaScriptInterface::luaItemTypeIsUseable);
registerMethod("ItemType", "isPickupable", LuaScriptInterface::luaItemTypeIsPickupable);
registerMethod("ItemType", "getType", LuaScriptInterface::luaItemTypeGetType);
registerMethod("ItemType", "getGroup", LuaScriptInterface::luaItemTypeGetGroup);
registerMethod("ItemType", "getId", LuaScriptInterface::luaItemTypeGetId);
registerMethod("ItemType", "getClientId", LuaScriptInterface::luaItemTypeGetClientId);
registerMethod("ItemType", "getName", LuaScriptInterface::luaItemTypeGetName);
registerMethod("ItemType", "getPluralName", LuaScriptInterface::luaItemTypeGetPluralName);
registerMethod("ItemType", "getArticle", LuaScriptInterface::luaItemTypeGetArticle);
registerMethod("ItemType", "getDescription", LuaScriptInterface::luaItemTypeGetDescription);
registerMethod("ItemType", "getSlotPosition", LuaScriptInterface::luaItemTypeGetSlotPosition);
registerMethod("ItemType", "getCharges", LuaScriptInterface::luaItemTypeGetCharges);
registerMethod("ItemType", "getFluidSource", LuaScriptInterface::luaItemTypeGetFluidSource);
registerMethod("ItemType", "getCapacity", LuaScriptInterface::luaItemTypeGetCapacity);
registerMethod("ItemType", "getWeight", LuaScriptInterface::luaItemTypeGetWeight);
registerMethod("ItemType", "getHitChance", LuaScriptInterface::luaItemTypeGetHitChance);
registerMethod("ItemType", "getShootRange", LuaScriptInterface::luaItemTypeGetShootRange);
registerMethod("ItemType", "getAttack", LuaScriptInterface::luaItemTypeGetAttack);
registerMethod("ItemType", "getAttackSpeed", LuaScriptInterface::luaItemTypeGetAttackSpeed);
registerMethod("ItemType", "getDefense", LuaScriptInterface::luaItemTypeGetDefense);
registerMethod("ItemType", "getExtraDefense", LuaScriptInterface::luaItemTypeGetExtraDefense);
registerMethod("ItemType", "getArmor", LuaScriptInterface::luaItemTypeGetArmor);
registerMethod("ItemType", "getWeaponType", LuaScriptInterface::luaItemTypeGetWeaponType);
registerMethod("ItemType", "getElementType", LuaScriptInterface::luaItemTypeGetElementType);
registerMethod("ItemType", "getElementDamage", LuaScriptInterface::luaItemTypeGetElementDamage);
registerMethod("ItemType", "getTransformEquipId", LuaScriptInterface::luaItemTypeGetTransformEquipId);
registerMethod("ItemType", "getTransformDeEquipId", LuaScriptInterface::luaItemTypeGetTransformDeEquipId);
registerMethod("ItemType", "getDestroyId", LuaScriptInterface::luaItemTypeGetDestroyId);
registerMethod("ItemType", "getDecayId", LuaScriptInterface::luaItemTypeGetDecayId);
registerMethod("ItemType", "getRequiredLevel", LuaScriptInterface::luaItemTypeGetRequiredLevel);
registerMethod("ItemType", "getAmmoType", LuaScriptInterface::luaItemTypeGetAmmoType);
registerMethod("ItemType", "getCorpseType", LuaScriptInterface::luaItemTypeGetCorpseType);
registerMethod("ItemType", "getAbilities", LuaScriptInterface::luaItemTypeGetAbilities);
registerMethod("ItemType", "hasShowAttributes", LuaScriptInterface::luaItemTypeHasShowAttributes);
registerMethod("ItemType", "hasShowCount", LuaScriptInterface::luaItemTypeHasShowCount);
registerMethod("ItemType", "hasShowCharges", LuaScriptInterface::luaItemTypeHasShowCharges);
registerMethod("ItemType", "hasShowDuration", LuaScriptInterface::luaItemTypeHasShowDuration);
registerMethod("ItemType", "hasAllowDistRead", LuaScriptInterface::luaItemTypeHasAllowDistRead);
registerMethod("ItemType", "getWieldInfo", LuaScriptInterface::luaItemTypeGetWieldInfo);
registerMethod("ItemType", "getDuration", LuaScriptInterface::luaItemTypeGetDuration);
registerMethod("ItemType", "getLevelDoor", LuaScriptInterface::luaItemTypeGetLevelDoor);
registerMethod("ItemType", "getRuneSpellName", LuaScriptInterface::luaItemTypeGetRuneSpellName);
registerMethod("ItemType", "getVocationString", LuaScriptInterface::luaItemTypeGetVocationString);
registerMethod("ItemType", "getMinReqLevel", LuaScriptInterface::luaItemTypeGetMinReqLevel);
registerMethod("ItemType", "getMinReqMagicLevel", LuaScriptInterface::luaItemTypeGetMinReqMagicLevel);
registerMethod("ItemType", "hasSubType", LuaScriptInterface::luaItemTypeHasSubType);
registerMethod("ItemType", "isStoreItem", LuaScriptInterface::luaItemTypeIsStoreItem);
// Combat
registerClass("Combat", "", LuaScriptInterface::luaCombatCreate);
registerMetaMethod("Combat", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("Combat", "__gc", LuaScriptInterface::luaCombatDelete);
registerMethod("Combat", "delete", LuaScriptInterface::luaCombatDelete);
registerMethod("Combat", "setParameter", LuaScriptInterface::luaCombatSetParameter);
registerMethod("Combat", "getParameter", LuaScriptInterface::luaCombatGetParameter);
registerMethod("Combat", "setFormula", LuaScriptInterface::luaCombatSetFormula);
registerMethod("Combat", "setArea", LuaScriptInterface::luaCombatSetArea);
registerMethod("Combat", "addCondition", LuaScriptInterface::luaCombatAddCondition);
registerMethod("Combat", "clearConditions", LuaScriptInterface::luaCombatClearConditions);
registerMethod("Combat", "setCallback", LuaScriptInterface::luaCombatSetCallback);
registerMethod("Combat", "setOrigin", LuaScriptInterface::luaCombatSetOrigin);
registerMethod("Combat", "execute", LuaScriptInterface::luaCombatExecute);
// Condition
registerClass("Condition", "", LuaScriptInterface::luaConditionCreate);
registerMetaMethod("Condition", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMetaMethod("Condition", "__gc", LuaScriptInterface::luaConditionDelete);
registerMethod("Condition", "getId", LuaScriptInterface::luaConditionGetId);
registerMethod("Condition", "getSubId", LuaScriptInterface::luaConditionGetSubId);
registerMethod("Condition", "getType", LuaScriptInterface::luaConditionGetType);
registerMethod("Condition", "getIcons", LuaScriptInterface::luaConditionGetIcons);
registerMethod("Condition", "getEndTime", LuaScriptInterface::luaConditionGetEndTime);
registerMethod("Condition", "clone", LuaScriptInterface::luaConditionClone);
registerMethod("Condition", "getTicks", LuaScriptInterface::luaConditionGetTicks);
registerMethod("Condition", "setTicks", LuaScriptInterface::luaConditionSetTicks);
registerMethod("Condition", "setParameter", LuaScriptInterface::luaConditionSetParameter);
registerMethod("Condition", "getParameter", LuaScriptInterface::luaConditionGetParameter);
registerMethod("Condition", "setFormula", LuaScriptInterface::luaConditionSetFormula);
registerMethod("Condition", "setOutfit", LuaScriptInterface::luaConditionSetOutfit);
registerMethod("Condition", "addDamage", LuaScriptInterface::luaConditionAddDamage);
// Outfit
registerClass("Outfit", "", LuaScriptInterface::luaOutfitCreate);
registerMetaMethod("Outfit", "__eq", LuaScriptInterface::luaOutfitCompare);
// MonsterType
registerClass("MonsterType", "", LuaScriptInterface::luaMonsterTypeCreate);
registerMetaMethod("MonsterType", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("MonsterType", "isAttackable", LuaScriptInterface::luaMonsterTypeIsAttackable);
registerMethod("MonsterType", "isChallengeable", LuaScriptInterface::luaMonsterTypeIsChallengeable);
registerMethod("MonsterType", "isConvinceable", LuaScriptInterface::luaMonsterTypeIsConvinceable);
registerMethod("MonsterType", "isSummonable", LuaScriptInterface::luaMonsterTypeIsSummonable);
registerMethod("MonsterType", "isIgnoringSpawnBlock", LuaScriptInterface::luaMonsterTypeIsIgnoringSpawnBlock);
registerMethod("MonsterType", "isIllusionable", LuaScriptInterface::luaMonsterTypeIsIllusionable);
registerMethod("MonsterType", "isHostile", LuaScriptInterface::luaMonsterTypeIsHostile);
registerMethod("MonsterType", "isPushable", LuaScriptInterface::luaMonsterTypeIsPushable);
registerMethod("MonsterType", "isHealthHidden", LuaScriptInterface::luaMonsterTypeIsHealthHidden);
registerMethod("MonsterType", "isBoss", LuaScriptInterface::luaMonsterTypeIsBoss);
registerMethod("MonsterType", "canPushItems", LuaScriptInterface::luaMonsterTypeCanPushItems);
registerMethod("MonsterType", "canPushCreatures", LuaScriptInterface::luaMonsterTypeCanPushCreatures);
registerMethod("MonsterType", "canWalkOnEnergy", LuaScriptInterface::luaMonsterTypeCanWalkOnEnergy);
registerMethod("MonsterType", "canWalkOnFire", LuaScriptInterface::luaMonsterTypeCanWalkOnFire);
registerMethod("MonsterType", "canWalkOnPoison", LuaScriptInterface::luaMonsterTypeCanWalkOnPoison);
registerMethod("MonsterType", "name", LuaScriptInterface::luaMonsterTypeName);
registerMethod("MonsterType", "nameDescription", LuaScriptInterface::luaMonsterTypeNameDescription);
registerMethod("MonsterType", "health", LuaScriptInterface::luaMonsterTypeHealth);
registerMethod("MonsterType", "maxHealth", LuaScriptInterface::luaMonsterTypeMaxHealth);
registerMethod("MonsterType", "runHealth", LuaScriptInterface::luaMonsterTypeRunHealth);
registerMethod("MonsterType", "experience", LuaScriptInterface::luaMonsterTypeExperience);
registerMethod("MonsterType", "skull", LuaScriptInterface::luaMonsterTypeSkull);
registerMethod("MonsterType", "combatImmunities", LuaScriptInterface::luaMonsterTypeCombatImmunities);
registerMethod("MonsterType", "conditionImmunities", LuaScriptInterface::luaMonsterTypeConditionImmunities);
registerMethod("MonsterType", "getAttackList", LuaScriptInterface::luaMonsterTypeGetAttackList);
registerMethod("MonsterType", "addAttack", LuaScriptInterface::luaMonsterTypeAddAttack);
registerMethod("MonsterType", "getDefenseList", LuaScriptInterface::luaMonsterTypeGetDefenseList);
registerMethod("MonsterType", "addDefense", LuaScriptInterface::luaMonsterTypeAddDefense);
registerMethod("MonsterType", "getElementList", LuaScriptInterface::luaMonsterTypeGetElementList);
registerMethod("MonsterType", "addElement", LuaScriptInterface::luaMonsterTypeAddElement);
registerMethod("MonsterType", "getVoices", LuaScriptInterface::luaMonsterTypeGetVoices);
registerMethod("MonsterType", "addVoice", LuaScriptInterface::luaMonsterTypeAddVoice);
registerMethod("MonsterType", "getLoot", LuaScriptInterface::luaMonsterTypeGetLoot);
registerMethod("MonsterType", "addLoot", LuaScriptInterface::luaMonsterTypeAddLoot);
registerMethod("MonsterType", "getCreatureEvents", LuaScriptInterface::luaMonsterTypeGetCreatureEvents);
registerMethod("MonsterType", "registerEvent", LuaScriptInterface::luaMonsterTypeRegisterEvent);
registerMethod("MonsterType", "eventType", LuaScriptInterface::luaMonsterTypeEventType);
registerMethod("MonsterType", "onThink", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "onAppear", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "onDisappear", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "onMove", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "onSay", LuaScriptInterface::luaMonsterTypeEventOnCallback);
registerMethod("MonsterType", "getSummonList", LuaScriptInterface::luaMonsterTypeGetSummonList);
registerMethod("MonsterType", "addSummon", LuaScriptInterface::luaMonsterTypeAddSummon);
registerMethod("MonsterType", "maxSummons", LuaScriptInterface::luaMonsterTypeMaxSummons);
registerMethod("MonsterType", "armor", LuaScriptInterface::luaMonsterTypeArmor);
registerMethod("MonsterType", "defense", LuaScriptInterface::luaMonsterTypeDefense);
registerMethod("MonsterType", "outfit", LuaScriptInterface::luaMonsterTypeOutfit);
registerMethod("MonsterType", "race", LuaScriptInterface::luaMonsterTypeRace);
registerMethod("MonsterType", "corpseId", LuaScriptInterface::luaMonsterTypeCorpseId);
registerMethod("MonsterType", "manaCost", LuaScriptInterface::luaMonsterTypeManaCost);
registerMethod("MonsterType", "baseSpeed", LuaScriptInterface::luaMonsterTypeBaseSpeed);
registerMethod("MonsterType", "light", LuaScriptInterface::luaMonsterTypeLight);
registerMethod("MonsterType", "staticAttackChance", LuaScriptInterface::luaMonsterTypeStaticAttackChance);
registerMethod("MonsterType", "targetDistance", LuaScriptInterface::luaMonsterTypeTargetDistance);
registerMethod("MonsterType", "yellChance", LuaScriptInterface::luaMonsterTypeYellChance);
registerMethod("MonsterType", "yellSpeedTicks", LuaScriptInterface::luaMonsterTypeYellSpeedTicks);
registerMethod("MonsterType", "changeTargetChance", LuaScriptInterface::luaMonsterTypeChangeTargetChance);
registerMethod("MonsterType", "changeTargetSpeed", LuaScriptInterface::luaMonsterTypeChangeTargetSpeed);
// Loot
registerClass("Loot", "", LuaScriptInterface::luaCreateLoot);
registerMetaMethod("Loot", "__gc", LuaScriptInterface::luaDeleteLoot);
registerMethod("Loot", "delete", LuaScriptInterface::luaDeleteLoot);
registerMethod("Loot", "setId", LuaScriptInterface::luaLootSetId);
registerMethod("Loot", "setMaxCount", LuaScriptInterface::luaLootSetMaxCount);
registerMethod("Loot", "setSubType", LuaScriptInterface::luaLootSetSubType);
registerMethod("Loot", "setChance", LuaScriptInterface::luaLootSetChance);
registerMethod("Loot", "setActionId", LuaScriptInterface::luaLootSetActionId);
registerMethod("Loot", "setDescription", LuaScriptInterface::luaLootSetDescription);
registerMethod("Loot", "addChildLoot", LuaScriptInterface::luaLootAddChildLoot);
// MonsterSpell
registerClass("MonsterSpell", "", LuaScriptInterface::luaCreateMonsterSpell);
registerMetaMethod("MonsterSpell", "__gc", LuaScriptInterface::luaDeleteMonsterSpell);
registerMethod("MonsterSpell", "delete", LuaScriptInterface::luaDeleteMonsterSpell);
registerMethod("MonsterSpell", "setType", LuaScriptInterface::luaMonsterSpellSetType);
registerMethod("MonsterSpell", "setScriptName", LuaScriptInterface::luaMonsterSpellSetScriptName);
registerMethod("MonsterSpell", "setChance", LuaScriptInterface::luaMonsterSpellSetChance);
registerMethod("MonsterSpell", "setInterval", LuaScriptInterface::luaMonsterSpellSetInterval);
registerMethod("MonsterSpell", "setRange", LuaScriptInterface::luaMonsterSpellSetRange);
registerMethod("MonsterSpell", "setCombatValue", LuaScriptInterface::luaMonsterSpellSetCombatValue);
registerMethod("MonsterSpell", "setCombatType", LuaScriptInterface::luaMonsterSpellSetCombatType);
registerMethod("MonsterSpell", "setAttackValue", LuaScriptInterface::luaMonsterSpellSetAttackValue);
registerMethod("MonsterSpell", "setNeedTarget", LuaScriptInterface::luaMonsterSpellSetNeedTarget);
registerMethod("MonsterSpell", "setNeedDirection", LuaScriptInterface::luaMonsterSpellSetNeedDirection);
registerMethod("MonsterSpell", "setCombatLength", LuaScriptInterface::luaMonsterSpellSetCombatLength);
registerMethod("MonsterSpell", "setCombatSpread", LuaScriptInterface::luaMonsterSpellSetCombatSpread);
registerMethod("MonsterSpell", "setCombatRadius", LuaScriptInterface::luaMonsterSpellSetCombatRadius);
registerMethod("MonsterSpell", "setCombatRing", LuaScriptInterface::luaMonsterSpellSetCombatRing);
registerMethod("MonsterSpell", "setConditionType", LuaScriptInterface::luaMonsterSpellSetConditionType);
registerMethod("MonsterSpell", "setConditionDamage", LuaScriptInterface::luaMonsterSpellSetConditionDamage);
registerMethod("MonsterSpell", "setConditionSpeedChange", LuaScriptInterface::luaMonsterSpellSetConditionSpeedChange);
registerMethod("MonsterSpell", "setConditionDuration", LuaScriptInterface::luaMonsterSpellSetConditionDuration);
registerMethod("MonsterSpell", "setConditionDrunkenness", LuaScriptInterface::luaMonsterSpellSetConditionDrunkenness);
registerMethod("MonsterSpell", "setConditionTickInterval", LuaScriptInterface::luaMonsterSpellSetConditionTickInterval);
registerMethod("MonsterSpell", "setCombatShootEffect", LuaScriptInterface::luaMonsterSpellSetCombatShootEffect);
registerMethod("MonsterSpell", "setCombatEffect", LuaScriptInterface::luaMonsterSpellSetCombatEffect);
// Party
registerClass("Party", "", LuaScriptInterface::luaPartyCreate);
registerMetaMethod("Party", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Party", "disband", LuaScriptInterface::luaPartyDisband);
registerMethod("Party", "getLeader", LuaScriptInterface::luaPartyGetLeader);
registerMethod("Party", "setLeader", LuaScriptInterface::luaPartySetLeader);
registerMethod("Party", "getMembers", LuaScriptInterface::luaPartyGetMembers);
registerMethod("Party", "getMemberCount", LuaScriptInterface::luaPartyGetMemberCount);
registerMethod("Party", "getInvitees", LuaScriptInterface::luaPartyGetInvitees);
registerMethod("Party", "getInviteeCount", LuaScriptInterface::luaPartyGetInviteeCount);
registerMethod("Party", "addInvite", LuaScriptInterface::luaPartyAddInvite);
registerMethod("Party", "removeInvite", LuaScriptInterface::luaPartyRemoveInvite);
registerMethod("Party", "addMember", LuaScriptInterface::luaPartyAddMember);
registerMethod("Party", "removeMember", LuaScriptInterface::luaPartyRemoveMember);
registerMethod("Party", "isSharedExperienceActive", LuaScriptInterface::luaPartyIsSharedExperienceActive);
registerMethod("Party", "isSharedExperienceEnabled", LuaScriptInterface::luaPartyIsSharedExperienceEnabled);
registerMethod("Party", "shareExperience", LuaScriptInterface::luaPartyShareExperience);
registerMethod("Party", "setSharedExperience", LuaScriptInterface::luaPartySetSharedExperience);
// Spells
registerClass("Spell", "", LuaScriptInterface::luaSpellCreate);
registerMetaMethod("Spell", "__eq", LuaScriptInterface::luaUserdataCompare);
registerMethod("Spell", "onCastSpell", LuaScriptInterface::luaSpellOnCastSpell);
registerMethod("Spell", "register", LuaScriptInterface::luaSpellRegister);
registerMethod("Spell", "name", LuaScriptInterface::luaSpellName);
registerMethod("Spell", "id", LuaScriptInterface::luaSpellId);
registerMethod("Spell", "group", LuaScriptInterface::luaSpellGroup);
registerMethod("Spell", "cooldown", LuaScriptInterface::luaSpellCooldown);
registerMethod("Spell", "groupCooldown", LuaScriptInterface::luaSpellGroupCooldown);
registerMethod("Spell", "level", LuaScriptInterface::luaSpellLevel);
registerMethod("Spell", "magicLevel", LuaScriptInterface::luaSpellMagicLevel);
registerMethod("Spell", "mana", LuaScriptInterface::luaSpellMana);
registerMethod("Spell", "manaPercent", LuaScriptInterface::luaSpellManaPercent);
registerMethod("Spell", "soul", LuaScriptInterface::luaSpellSoul);
registerMethod("Spell", "range", LuaScriptInterface::luaSpellRange);
registerMethod("Spell", "isPremium", LuaScriptInterface::luaSpellPremium);
registerMethod("Spell", "isEnabled", LuaScriptInterface::luaSpellEnabled);
registerMethod("Spell", "needTarget", LuaScriptInterface::luaSpellNeedTarget);
registerMethod("Spell", "needWeapon", LuaScriptInterface::luaSpellNeedWeapon);
registerMethod("Spell", "needLearn", LuaScriptInterface::luaSpellNeedLearn);
registerMethod("Spell", "isSelfTarget", LuaScriptInterface::luaSpellSelfTarget);
registerMethod("Spell", "isBlocking", LuaScriptInterface::luaSpellBlocking);
registerMethod("Spell", "isAggressive", LuaScriptInterface::luaSpellAggressive);
registerMethod("Spell", "isPzLock", LuaScriptInterface::luaSpellPzLock);
registerMethod("Spell", "vocation", LuaScriptInterface::luaSpellVocation);
// only for InstantSpell
registerMethod("Spell", "words", LuaScriptInterface::luaSpellWords);
registerMethod("Spell", "needDirection", LuaScriptInterface::luaSpellNeedDirection);
registerMethod("Spell", "hasParams", LuaScriptInterface::luaSpellHasParams);
registerMethod("Spell", "hasPlayerNameParam", LuaScriptInterface::luaSpellHasPlayerNameParam);
registerMethod("Spell", "needCasterTargetOrDirection", LuaScriptInterface::luaSpellNeedCasterTargetOrDirection);
registerMethod("Spell", "isBlockingWalls", LuaScriptInterface::luaSpellIsBlockingWalls);
// only for RuneSpells
registerMethod("Spell", "runeLevel", LuaScriptInterface::luaSpellRuneLevel);
registerMethod("Spell", "runeMagicLevel", LuaScriptInterface::luaSpellRuneMagicLevel);
registerMethod("Spell", "runeId", LuaScriptInterface::luaSpellRuneId);
registerMethod("Spell", "charges", LuaScriptInterface::luaSpellCharges);
registerMethod("Spell", "allowFarUse", LuaScriptInterface::luaSpellAllowFarUse);
registerMethod("Spell", "blockWalls", LuaScriptInterface::luaSpellBlockWalls);
registerMethod("Spell", "checkFloor", LuaScriptInterface::luaSpellCheckFloor);
// Action
registerClass("Action", "", LuaScriptInterface::luaCreateAction);
registerMethod("Action", "onUse", LuaScriptInterface::luaActionOnUse);
registerMethod("Action", "register", LuaScriptInterface::luaActionRegister);
registerMethod("Action", "id", LuaScriptInterface::luaActionItemId);
registerMethod("Action", "aid", LuaScriptInterface::luaActionActionId);
registerMethod("Action", "uid", LuaScriptInterface::luaActionUniqueId);
registerMethod("Action", "allowFarUse", LuaScriptInterface::luaActionAllowFarUse);
registerMethod("Action", "blockWalls", LuaScriptInterface::luaActionBlockWalls);
registerMethod("Action", "checkFloor", LuaScriptInterface::luaActionCheckFloor);
// TalkAction
registerClass("TalkAction", "", LuaScriptInterface::luaCreateTalkaction);
registerMethod("TalkAction", "onSay", LuaScriptInterface::luaTalkactionOnSay);
registerMethod("TalkAction", "register", LuaScriptInterface::luaTalkactionRegister);
registerMethod("TalkAction", "separator", LuaScriptInterface::luaTalkactionSeparator);
registerMethod("TalkAction", "access", LuaScriptInterface::luaTalkactionAccess);
registerMethod("TalkAction", "accountType", LuaScriptInterface::luaTalkactionAccountType);
// CreatureEvent
registerClass("CreatureEvent", "", LuaScriptInterface::luaCreateCreatureEvent);
registerMethod("CreatureEvent", "type", LuaScriptInterface::luaCreatureEventType);
registerMethod("CreatureEvent", "register", LuaScriptInterface::luaCreatureEventRegister);
registerMethod("CreatureEvent", "onLogin", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onLogout", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onThink", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onPrepareDeath", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onDeath", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onKill", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onAdvance", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onModalWindow", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onTextEdit", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onHealthChange", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onManaChange", LuaScriptInterface::luaCreatureEventOnCallback);
registerMethod("CreatureEvent", "onExtendedOpcode", LuaScriptInterface::luaCreatureEventOnCallback);
// MoveEvent
registerClass("MoveEvent", "", LuaScriptInterface::luaCreateMoveEvent);
registerMethod("MoveEvent", "type", LuaScriptInterface::luaMoveEventType);
registerMethod("MoveEvent", "register", LuaScriptInterface::luaMoveEventRegister);
registerMethod("MoveEvent", "level", LuaScriptInterface::luaMoveEventLevel);
registerMethod("MoveEvent", "magicLevel", LuaScriptInterface::luaMoveEventMagLevel);
registerMethod("MoveEvent", "slot", LuaScriptInterface::luaMoveEventSlot);
registerMethod("MoveEvent", "id", LuaScriptInterface::luaMoveEventItemId);
registerMethod("MoveEvent", "aid", LuaScriptInterface::luaMoveEventActionId);
registerMethod("MoveEvent", "uid", LuaScriptInterface::luaMoveEventUniqueId);
registerMethod("MoveEvent", "position", LuaScriptInterface::luaMoveEventPosition);
registerMethod("MoveEvent", "premium", LuaScriptInterface::luaMoveEventPremium);
registerMethod("MoveEvent", "vocation", LuaScriptInterface::luaMoveEventVocation);
registerMethod("MoveEvent", "tileItem", LuaScriptInterface::luaMoveEventTileItem);
registerMethod("MoveEvent", "onEquip", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onDeEquip", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onStepIn", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onStepOut", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onAddItem", LuaScriptInterface::luaMoveEventOnCallback);
registerMethod("MoveEvent", "onRemoveItem", LuaScriptInterface::luaMoveEventOnCallback);
// GlobalEvent
registerClass("GlobalEvent", "", LuaScriptInterface::luaCreateGlobalEvent);
registerMethod("GlobalEvent", "type", LuaScriptInterface::luaGlobalEventType);
registerMethod("GlobalEvent", "register", LuaScriptInterface::luaGlobalEventRegister);
registerMethod("GlobalEvent", "time", LuaScriptInterface::luaGlobalEventTime);
registerMethod("GlobalEvent", "interval", LuaScriptInterface::luaGlobalEventInterval);
registerMethod("GlobalEvent", "onThink", LuaScriptInterface::luaGlobalEventOnCallback);
registerMethod("GlobalEvent", "onTime", LuaScriptInterface::luaGlobalEventOnCallback);
registerMethod("GlobalEvent", "onStartup", LuaScriptInterface::luaGlobalEventOnCallback);
registerMethod("GlobalEvent", "onShutdown", LuaScriptInterface::luaGlobalEventOnCallback);
registerMethod("GlobalEvent", "onRecord", LuaScriptInterface::luaGlobalEventOnCallback);
// Weapon
registerClass("Weapon", "", LuaScriptInterface::luaCreateWeapon);
registerMethod("Weapon", "action", LuaScriptInterface::luaWeaponAction);
registerMethod("Weapon", "register", LuaScriptInterface::luaWeaponRegister);
registerMethod("Weapon", "id", LuaScriptInterface::luaWeaponId);
registerMethod("Weapon", "level", LuaScriptInterface::luaWeaponLevel);
registerMethod("Weapon", "magicLevel", LuaScriptInterface::luaWeaponMagicLevel);
registerMethod("Weapon", "mana", LuaScriptInterface::luaWeaponMana);
registerMethod("Weapon", "manaPercent", LuaScriptInterface::luaWeaponManaPercent);
registerMethod("Weapon", "health", LuaScriptInterface::luaWeaponHealth);
registerMethod("Weapon", "healthPercent", LuaScriptInterface::luaWeaponHealthPercent);
registerMethod("Weapon", "soul", LuaScriptInterface::luaWeaponSoul);
registerMethod("Weapon", "breakChance", LuaScriptInterface::luaWeaponBreakChance);
registerMethod("Weapon", "premium", LuaScriptInterface::luaWeaponPremium);
registerMethod("Weapon", "wieldUnproperly", LuaScriptInterface::luaWeaponUnproperly);
registerMethod("Weapon", "vocation", LuaScriptInterface::luaWeaponVocation);
registerMethod("Weapon", "onUseWeapon", LuaScriptInterface::luaWeaponOnUseWeapon);
registerMethod("Weapon", "element", LuaScriptInterface::luaWeaponElement);
registerMethod("Weapon", "attack", LuaScriptInterface::luaWeaponAttack);
registerMethod("Weapon", "defense", LuaScriptInterface::luaWeaponDefense);
registerMethod("Weapon", "range", LuaScriptInterface::luaWeaponRange);
registerMethod("Weapon", "charges", LuaScriptInterface::luaWeaponCharges);
registerMethod("Weapon", "duration", LuaScriptInterface::luaWeaponDuration);
registerMethod("Weapon", "decayTo", LuaScriptInterface::luaWeaponDecayTo);
registerMethod("Weapon", "transformEquipTo", LuaScriptInterface::luaWeaponTransformEquipTo);
registerMethod("Weapon", "transformDeEquipTo", LuaScriptInterface::luaWeaponTransformDeEquipTo);
registerMethod("Weapon", "slotType", LuaScriptInterface::luaWeaponSlotType);
registerMethod("Weapon", "hitChance", LuaScriptInterface::luaWeaponHitChance);
registerMethod("Weapon", "extraElement", LuaScriptInterface::luaWeaponExtraElement);
// exclusively for distance weapons
registerMethod("Weapon", "ammoType", LuaScriptInterface::luaWeaponAmmoType);
registerMethod("Weapon", "maxHitChance", LuaScriptInterface::luaWeaponMaxHitChance);
// exclusively for wands
registerMethod("Weapon", "damage", LuaScriptInterface::luaWeaponWandDamage);
// exclusively for wands & distance weapons
registerMethod("Weapon", "shootType", LuaScriptInterface::luaWeaponShootType);
}
#undef registerEnum
#undef registerEnumIn
void LuaScriptInterface::registerClass(const std::string& className, const std::string& baseClass, lua_CFunction newFunction/* = nullptr*/)
{
// className = {}
lua_newtable(luaState);
lua_pushvalue(luaState, -1);
lua_setglobal(luaState, className.c_str());
int methods = lua_gettop(luaState);
// methodsTable = {}
lua_newtable(luaState);
int methodsTable = lua_gettop(luaState);
if (newFunction) {
// className.__call = newFunction
lua_pushcfunction(luaState, newFunction);
lua_setfield(luaState, methodsTable, "__call");
}
uint32_t parents = 0;
if (!baseClass.empty()) {
lua_getglobal(luaState, baseClass.c_str());
lua_rawgeti(luaState, -1, 'p');
parents = getNumber<uint32_t>(luaState, -1) + 1;
lua_pop(luaState, 1);
lua_setfield(luaState, methodsTable, "__index");
}
// setmetatable(className, methodsTable)
lua_setmetatable(luaState, methods);
// className.metatable = {}
luaL_newmetatable(luaState, className.c_str());
int metatable = lua_gettop(luaState);
// className.metatable.__metatable = className
lua_pushvalue(luaState, methods);
lua_setfield(luaState, metatable, "__metatable");
// className.metatable.__index = className
lua_pushvalue(luaState, methods);
lua_setfield(luaState, metatable, "__index");
// className.metatable['h'] = hash
lua_pushnumber(luaState, std::hash<std::string>()(className));
lua_rawseti(luaState, metatable, 'h');
// className.metatable['p'] = parents
lua_pushnumber(luaState, parents);
lua_rawseti(luaState, metatable, 'p');
// className.metatable['t'] = type
if (className == "Item") {
lua_pushnumber(luaState, LuaData_Item);
} else if (className == "Container") {
lua_pushnumber(luaState, LuaData_Container);
} else if (className == "Teleport") {
lua_pushnumber(luaState, LuaData_Teleport);
} else if (className == "Player") {
lua_pushnumber(luaState, LuaData_Player);
} else if (className == "Monster") {
lua_pushnumber(luaState, LuaData_Monster);
} else if (className == "Npc") {
lua_pushnumber(luaState, LuaData_Npc);
} else if (className == "Tile") {
lua_pushnumber(luaState, LuaData_Tile);
} else {
lua_pushnumber(luaState, LuaData_Unknown);
}
lua_rawseti(luaState, metatable, 't');
// pop className, className.metatable
lua_pop(luaState, 2);
}
void LuaScriptInterface::registerTable(const std::string& tableName)
{
// _G[tableName] = {}
lua_newtable(luaState);
lua_setglobal(luaState, tableName.c_str());
}
void LuaScriptInterface::registerMethod(const std::string& globalName, const std::string& methodName, lua_CFunction func)
{
// globalName.methodName = func
lua_getglobal(luaState, globalName.c_str());
lua_pushcfunction(luaState, func);
lua_setfield(luaState, -2, methodName.c_str());
// pop globalName
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerMetaMethod(const std::string& className, const std::string& methodName, lua_CFunction func)
{
// className.metatable.methodName = func
luaL_getmetatable(luaState, className.c_str());
lua_pushcfunction(luaState, func);
lua_setfield(luaState, -2, methodName.c_str());
// pop className.metatable
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerGlobalMethod(const std::string& functionName, lua_CFunction func)
{
// _G[functionName] = func
lua_pushcfunction(luaState, func);
lua_setglobal(luaState, functionName.c_str());
}
void LuaScriptInterface::registerVariable(const std::string& tableName, const std::string& name, lua_Number value)
{
// tableName.name = value
lua_getglobal(luaState, tableName.c_str());
setField(luaState, name.c_str(), value);
// pop tableName
lua_pop(luaState, 1);
}
void LuaScriptInterface::registerGlobalVariable(const std::string& name, lua_Number value)
{
// _G[name] = value
lua_pushnumber(luaState, value);
lua_setglobal(luaState, name.c_str());
}
void LuaScriptInterface::registerGlobalBoolean(const std::string& name, bool value)
{
// _G[name] = value
pushBoolean(luaState, value);
lua_setglobal(luaState, name.c_str());
}
int LuaScriptInterface::luaDoPlayerAddItem(lua_State* L)
{
//doPlayerAddItem(cid, itemid, <optional: default: 1> count/subtype, <optional: default: 1> canDropOnMap)
//doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype)
Player* player = getPlayer(L, 1);
if (!player) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t itemId = getNumber<uint16_t>(L, 2);
int32_t count = getNumber<int32_t>(L, 3, 1);
bool canDropOnMap = getBoolean(L, 4, true);
uint16_t subType = getNumber<uint16_t>(L, 5, 1);
const ItemType& it = Item::items[itemId];
int32_t itemCount;
auto parameters = lua_gettop(L);
if (parameters > 4) {
//subtype already supplied, count then is the amount
itemCount = std::max<int32_t>(1, count);
} else if (it.hasSubType()) {
if (it.stackable) {
itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100));
} else {
itemCount = 1;
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
while (itemCount > 0) {
uint16_t stackCount = subType;
if (it.stackable && stackCount > 100) {
stackCount = 100;
}
Item* newItem = Item::CreateItem(itemId, stackCount);
if (!newItem) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (it.stackable) {
subType -= stackCount;
}
ReturnValue ret = g_game.internalPlayerAddItem(player, newItem, canDropOnMap);
if (ret != RETURNVALUE_NOERROR) {
delete newItem;
pushBoolean(L, false);
return 1;
}
if (--itemCount == 0) {
if (newItem->getParent()) {
uint32_t uid = getScriptEnv()->addThing(newItem);
lua_pushnumber(L, uid);
return 1;
} else {
//stackable item stacked with existing object, newItem will be released
pushBoolean(L, false);
return 1;
}
}
}
pushBoolean(L, false);
return 1;
}
int LuaScriptInterface::luaDebugPrint(lua_State* L)
{
//debugPrint(text)
reportErrorFunc(L, getString(L, -1));
return 0;
}
int LuaScriptInterface::luaGetWorldTime(lua_State* L)
{
//getWorldTime()
int16_t time = g_game.getWorldTime();
lua_pushnumber(L, time);
return 1;
}
int LuaScriptInterface::luaGetWorldLight(lua_State* L)
{
//getWorldLight()
LightInfo lightInfo = g_game.getWorldLightInfo();
lua_pushnumber(L, lightInfo.level);
lua_pushnumber(L, lightInfo.color);
return 2;
}
int LuaScriptInterface::luaSetWorldLight(lua_State* L)
{
//setWorldLight(level, color)
if (g_config.getBoolean(ConfigManager::DEFAULT_WORLD_LIGHT)) {
pushBoolean(L, false);
return 1;
}
LightInfo lightInfo;
lightInfo.level = getNumber<uint8_t>(L, 1);
lightInfo.color = getNumber<uint8_t>(L, 2);
g_game.setWorldLightInfo(lightInfo);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGetWorldUpTime(lua_State* L)
{
//getWorldUpTime()
uint64_t uptime = (OTSYS_TIME() - ProtocolStatus::start) / 1000;
lua_pushnumber(L, uptime);
return 1;
}
int LuaScriptInterface::luaGetSubTypeName(lua_State* L)
{
// getSubTypeName(subType)
int32_t subType = getNumber<int32_t>(L, 1);
if (subType > 0) {
pushString(L, Item::items[subType].name);
} else {
lua_pushnil(L);
}
return 1;
}
bool LuaScriptInterface::getArea(lua_State* L, std::vector<uint32_t>& vec, uint32_t& rows)
{
lua_pushnil(L);
for (rows = 0; lua_next(L, -2) != 0; ++rows) {
if (!isTable(L, -1)) {
return false;
}
lua_pushnil(L);
while (lua_next(L, -2) != 0) {
if (!isNumber(L, -1)) {
return false;
}
vec.push_back(getNumber<uint32_t>(L, -1));
lua_pop(L, 1);
}
lua_pop(L, 1);
}
lua_pop(L, 1);
return (rows != 0);
}
int LuaScriptInterface::luaCreateCombatArea(lua_State* L)
{
//createCombatArea( {area}, <optional> {extArea} )
ScriptEnvironment* env = getScriptEnv();
if (env->getScriptId() != EVENT_ID_LOADING) {
reportErrorFunc(L, "This function can only be used while loading the script.");
pushBoolean(L, false);
return 1;
}
uint32_t areaId = g_luaEnvironment.createAreaObject(env->getScriptInterface());
AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
int parameters = lua_gettop(L);
if (parameters >= 2) {
uint32_t rowsExtArea;
std::vector<uint32_t> vecExtArea;
if (!isTable(L, 2) || !getArea(L, vecExtArea, rowsExtArea)) {
reportErrorFunc(L, "Invalid extended area table.");
pushBoolean(L, false);
return 1;
}
area->setupExtArea(vecExtArea, rowsExtArea);
}
uint32_t rowsArea = 0;
std::vector<uint32_t> vecArea;
if (!isTable(L, 1) || !getArea(L, vecArea, rowsArea)) {
reportErrorFunc(L, "Invalid area table.");
pushBoolean(L, false);
return 1;
}
area->setupArea(vecArea, rowsArea);
lua_pushnumber(L, areaId);
return 1;
}
int LuaScriptInterface::luaDoAreaCombat(lua_State* L)
{
//doAreaCombat(cid, type, pos, area, min, max, effect[, origin = ORIGIN_SPELL[, blockArmor = false[, blockShield = false[, ignoreResistances = false]]]])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint32_t areaId = getNumber<uint32_t>(L, 4);
const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId);
if (area || areaId == 0) {
CombatType_t combatType = getNumber<CombatType_t>(L, 2);
CombatParams params;
params.combatType = combatType;
params.impactEffect = getNumber<uint8_t>(L, 7);
params.blockedByArmor = getBoolean(L, 8, false);
params.blockedByShield = getBoolean(L, 9, false);
params.ignoreResistances = getBoolean(L, 10, false);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 8, ORIGIN_SPELL);
damage.primary.type = combatType;
damage.primary.value = normal_random(getNumber<int32_t>(L, 6), getNumber<int32_t>(L, 5));
Combat::doAreaCombat(creature, getPosition(L, 3), area, damage, params);
pushBoolean(L, true);
} else {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDoTargetCombat(lua_State* L)
{
//doTargetCombat(cid, target, type, min, max, effect[, origin = ORIGIN_SPELL[, blockArmor = false[, blockShield = false[, ignoreResistances = false]]]])
Creature* creature = getCreature(L, 1);
if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
CombatType_t combatType = getNumber<CombatType_t>(L, 3);
CombatParams params;
params.combatType = combatType;
params.impactEffect = getNumber<uint8_t>(L, 6);
params.blockedByArmor = getBoolean(L, 8, false);
params.blockedByShield = getBoolean(L, 9, false);
params.ignoreResistances = getBoolean(L, 10, false);
CombatDamage damage;
damage.origin = getNumber<CombatOrigin>(L, 7, ORIGIN_SPELL);
damage.primary.type = combatType;
damage.primary.value = normal_random(getNumber<int32_t>(L, 4), getNumber<int32_t>(L, 5));
Combat::doTargetCombat(creature, target, damage, params);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaDoChallengeCreature(lua_State* L)
{
//doChallengeCreature(cid, target[, force = false])
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Creature* target = getCreature(L, 2);
if (!target) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
target->challengeCreature(creature, getBoolean(L, 3, false));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaIsValidUID(lua_State* L)
{
//isValidUID(uid)
pushBoolean(L, getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1)) != nullptr);
return 1;
}
int LuaScriptInterface::luaIsDepot(lua_State* L)
{
//isDepot(uid)
Container* container = getScriptEnv()->getContainerByUID(getNumber<uint32_t>(L, -1));
pushBoolean(L, container && container->getDepotLocker());
return 1;
}
int LuaScriptInterface::luaIsMoveable(lua_State* L)
{
//isMoveable(uid)
//isMovable(uid)
Thing* thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1));
pushBoolean(L, thing && thing->isPushable());
return 1;
}
int LuaScriptInterface::luaDoAddContainerItem(lua_State* L)
{
//doAddContainerItem(uid, itemid, <optional> count/subtype)
uint32_t uid = getNumber<uint32_t>(L, 1);
ScriptEnvironment* env = getScriptEnv();
Container* container = env->getContainerByUID(uid);
if (!container) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
uint16_t itemId = getNumber<uint16_t>(L, 2);
const ItemType& it = Item::items[itemId];
int32_t itemCount = 1;
int32_t subType = 1;
uint32_t count = getNumber<uint32_t>(L, 3, 1);
if (it.hasSubType()) {
if (it.stackable) {
itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100));
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
while (itemCount > 0) {
int32_t stackCount = std::min<int32_t>(100, subType);
Item* newItem = Item::CreateItem(itemId, stackCount);
if (!newItem) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (it.stackable) {
subType -= stackCount;
}
ReturnValue ret = g_game.internalAddItem(container, newItem);
if (ret != RETURNVALUE_NOERROR) {
delete newItem;
pushBoolean(L, false);
return 1;
}
if (--itemCount == 0) {
if (newItem->getParent()) {
lua_pushnumber(L, env->addThing(newItem));
} else {
//stackable item stacked with existing object, newItem will be released
pushBoolean(L, false);
}
return 1;
}
}
pushBoolean(L, false);
return 1;
}
int LuaScriptInterface::luaGetDepotId(lua_State* L)
{
//getDepotId(uid)
uint32_t uid = getNumber<uint32_t>(L, -1);
Container* container = getScriptEnv()->getContainerByUID(uid);
if (!container) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
DepotLocker* depotLocker = container->getDepotLocker();
if (!depotLocker) {
reportErrorFunc(L, "Depot not found");
pushBoolean(L, false);
return 1;
}
lua_pushnumber(L, depotLocker->getDepotId());
return 1;
}
int LuaScriptInterface::luaAddEvent(lua_State* L)
{
//addEvent(callback, delay, ...)
int parameters = lua_gettop(L);
if (parameters < 2) {
reportErrorFunc(L, fmt::format("Not enough parameters: {:d}.", parameters));
pushBoolean(L, false);
return 1;
}
if (!isFunction(L, 1)) {
reportErrorFunc(L, "callback parameter should be a function.");
pushBoolean(L, false);
return 1;
}
if (!isNumber(L, 2)) {
reportErrorFunc(L, "delay parameter should be a number.");
pushBoolean(L, false);
return 1;
}
if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS) || g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) {
std::vector<std::pair<int32_t, LuaDataType>> indexes;
for (int i = 3; i <= parameters; ++i) {
if (lua_getmetatable(L, i) == 0) {
continue;
}
lua_rawgeti(L, -1, 't');
LuaDataType type = getNumber<LuaDataType>(L, -1);
if (type != LuaData_Unknown && type != LuaData_Tile) {
indexes.push_back({i, type});
}
lua_pop(L, 2);
}
if (!indexes.empty()) {
if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS)) {
bool plural = indexes.size() > 1;
std::string warningString = "Argument";
if (plural) {
warningString += 's';
}
for (const auto& entry : indexes) {
if (entry == indexes.front()) {
warningString += ' ';
} else if (entry == indexes.back()) {
warningString += " and ";
} else {
warningString += ", ";
}
warningString += '#';
warningString += std::to_string(entry.first);
}
if (plural) {
warningString += " are unsafe";
} else {
warningString += " is unsafe";
}
reportErrorFunc(L, warningString);
}
if (g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) {
for (const auto& entry : indexes) {
switch (entry.second) {
case LuaData_Item:
case LuaData_Container:
case LuaData_Teleport: {
lua_getglobal(L, "Item");
lua_getfield(L, -1, "getUniqueId");
break;
}
case LuaData_Player:
case LuaData_Monster:
case LuaData_Npc: {
lua_getglobal(L, "Creature");
lua_getfield(L, -1, "getId");
break;
}
default:
break;
}
lua_replace(L, -2);
lua_pushvalue(L, entry.first);
lua_call(L, 1, 1);
lua_replace(L, entry.first);
}
}
}
}
LuaTimerEventDesc eventDesc;
eventDesc.parameters.reserve(parameters - 2); // safe to use -2 since we garanteed that there is at least two parameters
for (int i = 0; i < parameters - 2; ++i) {
eventDesc.parameters.push_back(luaL_ref(L, LUA_REGISTRYINDEX));
}
uint32_t delay = std::max<uint32_t>(100, getNumber<uint32_t>(L, 2));
lua_pop(L, 1);
eventDesc.function = luaL_ref(L, LUA_REGISTRYINDEX);
eventDesc.scriptId = getScriptEnv()->getScriptId();
auto& lastTimerEventId = g_luaEnvironment.lastEventTimerId;
eventDesc.eventId = g_scheduler.addEvent(createSchedulerTask(
delay, std::bind(&LuaEnvironment::executeTimerEvent, &g_luaEnvironment, lastTimerEventId)
));
g_luaEnvironment.timerEvents.emplace(lastTimerEventId, std::move(eventDesc));
lua_pushnumber(L, lastTimerEventId++);
return 1;
}
int LuaScriptInterface::luaStopEvent(lua_State* L)
{
//stopEvent(eventid)
uint32_t eventId = getNumber<uint32_t>(L, 1);
auto& timerEvents = g_luaEnvironment.timerEvents;
auto it = timerEvents.find(eventId);
if (it == timerEvents.end()) {
pushBoolean(L, false);
return 1;
}
LuaTimerEventDesc timerEventDesc = std::move(it->second);
timerEvents.erase(it);
g_scheduler.stopEvent(timerEventDesc.eventId);
luaL_unref(L, LUA_REGISTRYINDEX, timerEventDesc.function);
for (auto parameter : timerEventDesc.parameters) {
luaL_unref(L, LUA_REGISTRYINDEX, parameter);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaSaveServer(lua_State* L)
{
g_game.saveGameState();
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCleanMap(lua_State* L)
{
lua_pushnumber(L, g_game.map.clean());
return 1;
}
int LuaScriptInterface::luaIsInWar(lua_State* L)
{
//isInWar(cid, target)
Player* player = getPlayer(L, 1);
if (!player) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Player* targetPlayer = getPlayer(L, 2);
if (!targetPlayer) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, player->isInWar(targetPlayer));
return 1;
}
int LuaScriptInterface::luaGetWaypointPositionByName(lua_State* L)
{
//getWaypointPositionByName(name)
auto& waypoints = g_game.map.waypoints;
auto it = waypoints.find(getString(L, -1));
if (it != waypoints.end()) {
pushPosition(L, it->second);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaSendChannelMessage(lua_State* L)
{
//sendChannelMessage(channelId, type, message)
uint32_t channelId = getNumber<uint32_t>(L, 1);
ChatChannel* channel = g_chat->getChannelById(channelId);
if (!channel) {
pushBoolean(L, false);
return 1;
}
SpeakClasses type = getNumber<SpeakClasses>(L, 2);
std::string message = getString(L, 3);
channel->sendToAll(message, type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaSendGuildChannelMessage(lua_State* L)
{
//sendGuildChannelMessage(guildId, type, message)
uint32_t guildId = getNumber<uint32_t>(L, 1);
ChatChannel* channel = g_chat->getGuildChannelById(guildId);
if (!channel) {
pushBoolean(L, false);
return 1;
}
SpeakClasses type = getNumber<SpeakClasses>(L, 2);
std::string message = getString(L, 3);
channel->sendToAll(message, type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaIsScriptsInterface(lua_State* L)
{
//isScriptsInterface()
if (getScriptEnv()->getScriptInterface() == &g_scripts->getScriptInterface()) {
pushBoolean(L, true);
} else {
reportErrorFunc(L, "EventCallback: can only be called inside (data/scripts/)");
pushBoolean(L, false);
}
return 1;
}
std::string LuaScriptInterface::escapeString(const std::string& string)
{
std::string s = string;
replaceString(s, "\\", "\\\\");
replaceString(s, "\"", "\\\"");
replaceString(s, "'", "\\'");
replaceString(s, "[[", "\\[[");
return s;
}
#ifndef LUAJIT_VERSION
const luaL_Reg LuaScriptInterface::luaBitReg[] = {
//{"tobit", LuaScriptInterface::luaBitToBit},
{"bnot", LuaScriptInterface::luaBitNot},
{"band", LuaScriptInterface::luaBitAnd},
{"bor", LuaScriptInterface::luaBitOr},
{"bxor", LuaScriptInterface::luaBitXor},
{"lshift", LuaScriptInterface::luaBitLeftShift},
{"rshift", LuaScriptInterface::luaBitRightShift},
//{"arshift", LuaScriptInterface::luaBitArithmeticalRightShift},
//{"rol", LuaScriptInterface::luaBitRotateLeft},
//{"ror", LuaScriptInterface::luaBitRotateRight},
//{"bswap", LuaScriptInterface::luaBitSwapEndian},
//{"tohex", LuaScriptInterface::luaBitToHex},
{nullptr, nullptr}
};
int LuaScriptInterface::luaBitNot(lua_State* L)
{
lua_pushnumber(L, ~getNumber<uint32_t>(L, -1));
return 1;
}
#define MULTIOP(name, op) \
int LuaScriptInterface::luaBit##name(lua_State* L) \
{ \
int n = lua_gettop(L); \
uint32_t w = getNumber<uint32_t>(L, -1); \
for (int i = 1; i < n; ++i) \
w op getNumber<uint32_t>(L, i); \
lua_pushnumber(L, w); \
return 1; \
}
MULTIOP(And, &= )
MULTIOP(Or, |= )
MULTIOP(Xor, ^= )
#define SHIFTOP(name, op) \
int LuaScriptInterface::luaBit##name(lua_State* L) \
{ \
uint32_t n1 = getNumber<uint32_t>(L, 1), n2 = getNumber<uint32_t>(L, 2); \
lua_pushnumber(L, (n1 op n2)); \
return 1; \
}
SHIFTOP(LeftShift, << )
SHIFTOP(RightShift, >> )
#endif
const luaL_Reg LuaScriptInterface::luaConfigManagerTable[] = {
{"getString", LuaScriptInterface::luaConfigManagerGetString},
{"getNumber", LuaScriptInterface::luaConfigManagerGetNumber},
{"getBoolean", LuaScriptInterface::luaConfigManagerGetBoolean},
{nullptr, nullptr}
};
int LuaScriptInterface::luaConfigManagerGetString(lua_State* L)
{
pushString(L, g_config.getString(getNumber<ConfigManager::string_config_t>(L, -1)));
return 1;
}
int LuaScriptInterface::luaConfigManagerGetNumber(lua_State* L)
{
lua_pushnumber(L, g_config.getNumber(getNumber<ConfigManager::integer_config_t>(L, -1)));
return 1;
}
int LuaScriptInterface::luaConfigManagerGetBoolean(lua_State* L)
{
pushBoolean(L, g_config.getBoolean(getNumber<ConfigManager::boolean_config_t>(L, -1)));
return 1;
}
const luaL_Reg LuaScriptInterface::luaDatabaseTable[] = {
{"query", LuaScriptInterface::luaDatabaseExecute},
{"asyncQuery", LuaScriptInterface::luaDatabaseAsyncExecute},
{"storeQuery", LuaScriptInterface::luaDatabaseStoreQuery},
{"asyncStoreQuery", LuaScriptInterface::luaDatabaseAsyncStoreQuery},
{"escapeString", LuaScriptInterface::luaDatabaseEscapeString},
{"escapeBlob", LuaScriptInterface::luaDatabaseEscapeBlob},
{"lastInsertId", LuaScriptInterface::luaDatabaseLastInsertId},
{"tableExists", LuaScriptInterface::luaDatabaseTableExists},
{nullptr, nullptr}
};
int LuaScriptInterface::luaDatabaseExecute(lua_State* L)
{
pushBoolean(L, Database::getInstance().executeQuery(getString(L, -1)));
return 1;
}
int LuaScriptInterface::luaDatabaseAsyncExecute(lua_State* L)
{
std::function<void(DBResult_ptr, bool)> callback;
if (lua_gettop(L) > 1) {
int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX);
auto scriptId = getScriptEnv()->getScriptId();
callback = [ref, scriptId](DBResult_ptr, bool success) {
lua_State* luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return;
}
if (!LuaScriptInterface::reserveScriptEnv()) {
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
return;
}
lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref);
pushBoolean(luaState, success);
auto env = getScriptEnv();
env->setScriptId(scriptId, &g_luaEnvironment);
g_luaEnvironment.callFunction(1);
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
};
}
g_databaseTasks.addTask(getString(L, -1), callback);
return 0;
}
int LuaScriptInterface::luaDatabaseStoreQuery(lua_State* L)
{
if (DBResult_ptr res = Database::getInstance().storeQuery(getString(L, -1))) {
lua_pushnumber(L, ScriptEnvironment::addResult(res));
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaDatabaseAsyncStoreQuery(lua_State* L)
{
std::function<void(DBResult_ptr, bool)> callback;
if (lua_gettop(L) > 1) {
int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX);
auto scriptId = getScriptEnv()->getScriptId();
callback = [ref, scriptId](DBResult_ptr result, bool) {
lua_State* luaState = g_luaEnvironment.getLuaState();
if (!luaState) {
return;
}
if (!LuaScriptInterface::reserveScriptEnv()) {
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
return;
}
lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref);
if (result) {
lua_pushnumber(luaState, ScriptEnvironment::addResult(result));
} else {
pushBoolean(luaState, false);
}
auto env = getScriptEnv();
env->setScriptId(scriptId, &g_luaEnvironment);
g_luaEnvironment.callFunction(1);
luaL_unref(luaState, LUA_REGISTRYINDEX, ref);
};
}
g_databaseTasks.addTask(getString(L, -1), callback, true);
return 0;
}
int LuaScriptInterface::luaDatabaseEscapeString(lua_State* L)
{
pushString(L, Database::getInstance().escapeString(getString(L, -1)));
return 1;
}
int LuaScriptInterface::luaDatabaseEscapeBlob(lua_State* L)
{
uint32_t length = getNumber<uint32_t>(L, 2);
pushString(L, Database::getInstance().escapeBlob(getString(L, 1).c_str(), length));
return 1;
}
int LuaScriptInterface::luaDatabaseLastInsertId(lua_State* L)
{
lua_pushnumber(L, Database::getInstance().getLastInsertId());
return 1;
}
int LuaScriptInterface::luaDatabaseTableExists(lua_State* L)
{
pushBoolean(L, DatabaseManager::tableExists(getString(L, -1)));
return 1;
}
const luaL_Reg LuaScriptInterface::luaResultTable[] = {
{"getNumber", LuaScriptInterface::luaResultGetNumber},
{"getString", LuaScriptInterface::luaResultGetString},
{"getStream", LuaScriptInterface::luaResultGetStream},
{"next", LuaScriptInterface::luaResultNext},
{"free", LuaScriptInterface::luaResultFree},
{nullptr, nullptr}
};
int LuaScriptInterface::luaResultGetNumber(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
const std::string& s = getString(L, 2);
lua_pushnumber(L, res->getNumber<int64_t>(s));
return 1;
}
int LuaScriptInterface::luaResultGetString(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
const std::string& s = getString(L, 2);
pushString(L, res->getString(s));
return 1;
}
int LuaScriptInterface::luaResultGetStream(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1));
if (!res) {
pushBoolean(L, false);
return 1;
}
unsigned long length;
const char* stream = res->getStream(getString(L, 2), length);
lua_pushlstring(L, stream, length);
lua_pushnumber(L, length);
return 2;
}
int LuaScriptInterface::luaResultNext(lua_State* L)
{
DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, -1));
if (!res) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, res->next());
return 1;
}
int LuaScriptInterface::luaResultFree(lua_State* L)
{
pushBoolean(L, ScriptEnvironment::removeResult(getNumber<uint32_t>(L, -1)));
return 1;
}
// Userdata
int LuaScriptInterface::luaUserdataCompare(lua_State* L)
{
// userdataA == userdataB
pushBoolean(L, getUserdata<void>(L, 1) == getUserdata<void>(L, 2));
return 1;
}
// _G
int LuaScriptInterface::luaIsType(lua_State* L)
{
// isType(derived, base)
lua_getmetatable(L, -2);
lua_getmetatable(L, -2);
lua_rawgeti(L, -2, 'p');
uint_fast8_t parentsB = getNumber<uint_fast8_t>(L, 1);
lua_rawgeti(L, -3, 'h');
size_t hashB = getNumber<size_t>(L, 1);
lua_rawgeti(L, -3, 'p');
uint_fast8_t parentsA = getNumber<uint_fast8_t>(L, 1);
for (uint_fast8_t i = parentsA; i < parentsB; ++i) {
lua_getfield(L, -3, "__index");
lua_replace(L, -4);
}
lua_rawgeti(L, -4, 'h');
size_t hashA = getNumber<size_t>(L, 1);
pushBoolean(L, hashA == hashB);
return 1;
}
int LuaScriptInterface::luaRawGetMetatable(lua_State* L)
{
// rawgetmetatable(metatableName)
luaL_getmetatable(L, getString(L, 1).c_str());
return 1;
}
// os
int LuaScriptInterface::luaSystemTime(lua_State* L)
{
// os.mtime()
lua_pushnumber(L, OTSYS_TIME());
return 1;
}
// table
int LuaScriptInterface::luaTableCreate(lua_State* L)
{
// table.create(arrayLength, keyLength)
lua_createtable(L, getNumber<int32_t>(L, 1), getNumber<int32_t>(L, 2));
return 1;
}
int LuaScriptInterface::luaTablePack(lua_State* L)
{
// table.pack(...)
int i;
int n = lua_gettop(L); /* number of elements to pack */
lua_createtable(L, n, 1); /* create result table */
lua_insert(L, 1); /* put it at index 1 */
for (i = n; i >= 1; i--) /* assign elements */
lua_rawseti(L, 1, i);
if (luaL_callmeta(L, -1, "__index") != 0) {
lua_replace(L, -2);
}
lua_pushinteger(L, n);
lua_setfield(L, 1, "n"); /* t.n = number of elements */
return 1; /* return table */
}
// Game
int LuaScriptInterface::luaGameGetSpectators(lua_State* L)
{
// Game.getSpectators(position[, multifloor = false[, onlyPlayer = false[, minRangeX = 0[, maxRangeX = 0[, minRangeY = 0[, maxRangeY = 0]]]]]])
const Position& position = getPosition(L, 1);
bool multifloor = getBoolean(L, 2, false);
bool onlyPlayers = getBoolean(L, 3, false);
int32_t minRangeX = getNumber<int32_t>(L, 4, 0);
int32_t maxRangeX = getNumber<int32_t>(L, 5, 0);
int32_t minRangeY = getNumber<int32_t>(L, 6, 0);
int32_t maxRangeY = getNumber<int32_t>(L, 7, 0);
SpectatorVec spectators;
g_game.map.getSpectators(spectators, position, multifloor, onlyPlayers, minRangeX, maxRangeX, minRangeY, maxRangeY);
lua_createtable(L, spectators.size(), 0);
int index = 0;
for (Creature* creature : spectators) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetPlayers(lua_State* L)
{
// Game.getPlayers()
lua_createtable(L, g_game.getPlayersOnline(), 0);
int index = 0;
for (const auto& playerEntry : g_game.getPlayers()) {
pushUserdata<Player>(L, playerEntry.second);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameLoadMap(lua_State* L)
{
// Game.loadMap(path)
const std::string& path = getString(L, 1);
g_dispatcher.addTask(createTask([path]() {
try {
g_game.loadMap(path);
} catch (const std::exception& e) {
// FIXME: Should only catch some exceptions
std::cout << "[Error - LuaScriptInterface::luaGameLoadMap] Failed to load map: "
<< e.what() << std::endl;
}
}));
return 0;
}
int LuaScriptInterface::luaGameGetExperienceStage(lua_State* L)
{
// Game.getExperienceStage(level)
uint32_t level = getNumber<uint32_t>(L, 1);
lua_pushnumber(L, g_config.getExperienceStage(level));
return 1;
}
int LuaScriptInterface::luaGameGetExperienceForLevel(lua_State* L)
{
// Game.getExperienceForLevel(level)
const uint32_t level = getNumber<uint32_t>(L, 1);
if (level == 0) {
lua_pushnumber(L, 0);
} else {
lua_pushnumber(L, Player::getExpForLevel(level));
}
return 1;
}
int LuaScriptInterface::luaGameGetMonsterCount(lua_State* L)
{
// Game.getMonsterCount()
lua_pushnumber(L, g_game.getMonstersOnline());
return 1;
}
int LuaScriptInterface::luaGameGetPlayerCount(lua_State* L)
{
// Game.getPlayerCount()
lua_pushnumber(L, g_game.getPlayersOnline());
return 1;
}
int LuaScriptInterface::luaGameGetNpcCount(lua_State* L)
{
// Game.getNpcCount()
lua_pushnumber(L, g_game.getNpcsOnline());
return 1;
}
int LuaScriptInterface::luaGameGetMonsterTypes(lua_State* L)
{
// Game.getMonsterTypes()
auto& type = g_monsters.monsters;
lua_createtable(L, type.size(), 0);
for (auto& mType : type) {
pushUserdata<MonsterType>(L, &mType.second);
setMetatable(L, -1, "MonsterType");
lua_setfield(L, -2, mType.first.c_str());
}
return 1;
}
int LuaScriptInterface::luaGameGetTowns(lua_State* L)
{
// Game.getTowns()
const auto& towns = g_game.map.towns.getTowns();
lua_createtable(L, towns.size(), 0);
int index = 0;
for (auto townEntry : towns) {
pushUserdata<Town>(L, townEntry.second);
setMetatable(L, -1, "Town");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetHouses(lua_State* L)
{
// Game.getHouses()
const auto& houses = g_game.map.houses.getHouses();
lua_createtable(L, houses.size(), 0);
int index = 0;
for (auto houseEntry : houses) {
pushUserdata<House>(L, houseEntry.second);
setMetatable(L, -1, "House");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGameGetGameState(lua_State* L)
{
// Game.getGameState()
lua_pushnumber(L, g_game.getGameState());
return 1;
}
int LuaScriptInterface::luaGameSetGameState(lua_State* L)
{
// Game.setGameState(state)
GameState_t state = getNumber<GameState_t>(L, 1);
g_game.setGameState(state);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameGetWorldType(lua_State* L)
{
// Game.getWorldType()
lua_pushnumber(L, g_game.getWorldType());
return 1;
}
int LuaScriptInterface::luaGameSetWorldType(lua_State* L)
{
// Game.setWorldType(type)
WorldType_t type = getNumber<WorldType_t>(L, 1);
g_game.setWorldType(type);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameGetReturnMessage(lua_State* L)
{
// Game.getReturnMessage(value)
ReturnValue value = getNumber<ReturnValue>(L, 1);
pushString(L, getReturnMessage(value));
return 1;
}
int LuaScriptInterface::luaGameGetItemAttributeByName(lua_State* L)
{
// Game.getItemAttributeByName(name)
lua_pushnumber(L, stringToItemAttribute(getString(L, 1)));
return 1;
}
int LuaScriptInterface::luaGameCreateItem(lua_State* L)
{
// Game.createItem(itemId[, count[, position]])
uint16_t count = getNumber<uint16_t>(L, 2, 1);
uint16_t id;
if (isNumber(L, 1)) {
id = getNumber<uint16_t>(L, 1);
} else {
id = Item::items.getItemIdByName(getString(L, 1));
if (id == 0) {
lua_pushnil(L);
return 1;
}
}
const ItemType& it = Item::items[id];
if (it.stackable) {
count = std::min<uint16_t>(count, 100);
}
Item* item = Item::CreateItem(id, count);
if (!item) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) >= 3) {
const Position& position = getPosition(L, 3);
Tile* tile = g_game.map.getTile(position);
if (!tile) {
delete item;
lua_pushnil(L);
return 1;
}
g_game.internalAddItem(tile, item, INDEX_WHEREEVER, FLAG_NOLIMIT);
} else {
getScriptEnv()->addTempItem(item);
item->setParent(VirtualCylinder::virtualCylinder);
}
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
int LuaScriptInterface::luaGameCreateContainer(lua_State* L)
{
// Game.createContainer(itemId, size[, position])
uint16_t size = getNumber<uint16_t>(L, 2);
uint16_t id;
if (isNumber(L, 1)) {
id = getNumber<uint16_t>(L, 1);
} else {
id = Item::items.getItemIdByName(getString(L, 1));
if (id == 0) {
lua_pushnil(L);
return 1;
}
}
Container* container = Item::CreateItemAsContainer(id, size);
if (!container) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) >= 3) {
const Position& position = getPosition(L, 3);
Tile* tile = g_game.map.getTile(position);
if (!tile) {
delete container;
lua_pushnil(L);
return 1;
}
g_game.internalAddItem(tile, container, INDEX_WHEREEVER, FLAG_NOLIMIT);
} else {
getScriptEnv()->addTempItem(container);
container->setParent(VirtualCylinder::virtualCylinder);
}
pushUserdata<Container>(L, container);
setMetatable(L, -1, "Container");
return 1;
}
int LuaScriptInterface::luaGameCreateMonster(lua_State* L)
{
// Game.createMonster(monsterName, position[, extended = false[, force = false]])
Monster* monster = Monster::createMonster(getString(L, 1));
if (!monster) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
bool extended = getBoolean(L, 3, false);
bool force = getBoolean(L, 4, false);
if (g_events->eventMonsterOnSpawn(monster, position, false, true) || force) {
if (g_game.placeCreature(monster, position, extended, force)) {
pushUserdata<Monster>(L, monster);
setMetatable(L, -1, "Monster");
} else {
delete monster;
lua_pushnil(L);
}
} else {
delete monster;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGameCreateNpc(lua_State* L)
{
// Game.createNpc(npcName, position[, extended = false[, force = false]])
Npc* npc = Npc::createNpc(getString(L, 1));
if (!npc) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
bool extended = getBoolean(L, 3, false);
bool force = getBoolean(L, 4, false);
if (g_game.placeCreature(npc, position, extended, force)) {
pushUserdata<Npc>(L, npc);
setMetatable(L, -1, "Npc");
} else {
delete npc;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGameCreateTile(lua_State* L)
{
// Game.createTile(x, y, z[, isDynamic = false])
// Game.createTile(position[, isDynamic = false])
Position position;
bool isDynamic;
if (isTable(L, 1)) {
position = getPosition(L, 1);
isDynamic = getBoolean(L, 2, false);
} else {
position.x = getNumber<uint16_t>(L, 1);
position.y = getNumber<uint16_t>(L, 2);
position.z = getNumber<uint16_t>(L, 3);
isDynamic = getBoolean(L, 4, false);
}
Tile* tile = g_game.map.getTile(position);
if (!tile) {
if (isDynamic) {
tile = new DynamicTile(position.x, position.y, position.z);
} else {
tile = new StaticTile(position.x, position.y, position.z);
}
g_game.map.setTile(position, tile);
}
pushUserdata(L, tile);
setMetatable(L, -1, "Tile");
return 1;
}
int LuaScriptInterface::luaGameCreateMonsterType(lua_State* L)
{
// Game.createMonsterType(name)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "MonsterTypes can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
const std::string& name = getString(L, 1);
if (name.length() == 0) {
lua_pushnil(L);
return 1;
}
MonsterType* monsterType = g_monsters.getMonsterType(name, false);
if (!monsterType) {
monsterType = &g_monsters.monsters[asLowerCaseString(name)];
monsterType->name = name;
monsterType->nameDescription = "a " + name;
} else {
monsterType->info.lootItems.clear();
monsterType->info.attackSpells.clear();
monsterType->info.defenseSpells.clear();
monsterType->info.scripts.clear();
monsterType->info.thinkEvent = -1;
monsterType->info.creatureAppearEvent = -1;
monsterType->info.creatureDisappearEvent = -1;
monsterType->info.creatureMoveEvent = -1;
monsterType->info.creatureSayEvent = -1;
}
pushUserdata<MonsterType>(L, monsterType);
setMetatable(L, -1, "MonsterType");
return 1;
}
int LuaScriptInterface::luaGameStartRaid(lua_State* L)
{
// Game.startRaid(raidName)
const std::string& raidName = getString(L, 1);
Raid* raid = g_game.raids.getRaidByName(raidName);
if (!raid || !raid->isLoaded()) {
lua_pushnumber(L, RETURNVALUE_NOSUCHRAIDEXISTS);
return 1;
}
if (g_game.raids.getRunning()) {
lua_pushnumber(L, RETURNVALUE_ANOTHERRAIDISALREADYEXECUTING);
return 1;
}
g_game.raids.setRunning(raid);
raid->startRaid();
lua_pushnumber(L, RETURNVALUE_NOERROR);
return 1;
}
int LuaScriptInterface::luaGameGetClientVersion(lua_State* L)
{
// Game.getClientVersion()
lua_createtable(L, 0, 3);
setField(L, "min", CLIENT_VERSION_MIN);
setField(L, "max", CLIENT_VERSION_MAX);
setField(L, "string", CLIENT_VERSION_STR);
return 1;
}
int LuaScriptInterface::luaGameReload(lua_State* L)
{
// Game.reload(reloadType)
ReloadTypes_t reloadType = getNumber<ReloadTypes_t>(L, 1);
if (reloadType == RELOAD_TYPE_GLOBAL) {
pushBoolean(L, g_luaEnvironment.loadFile("data/global.lua") == 0);
pushBoolean(L, g_scripts->loadScripts("scripts/lib", true, true));
} else {
pushBoolean(L, g_game.reload(reloadType));
}
lua_gc(g_luaEnvironment.getLuaState(), LUA_GCCOLLECT, 0);
return 1;
}
int LuaScriptInterface::luaGameGetAccountStorageValue(lua_State* L)
{
// Game.getAccountStorageValue(accountId, key)
uint32_t accountId = getNumber<uint32_t>(L, 1);
uint32_t key = getNumber<uint32_t>(L, 2);
lua_pushnumber(L, g_game.getAccountStorageValue(accountId, key));
return 1;
}
int LuaScriptInterface::luaGameSetAccountStorageValue(lua_State* L)
{
// Game.setAccountStorageValue(accountId, key, value)
uint32_t accountId = getNumber<uint32_t>(L, 1);
uint32_t key = getNumber<uint32_t>(L, 2);
int32_t value = getNumber<int32_t>(L, 3);
g_game.setAccountStorageValue(accountId, key, value);
lua_pushboolean(L, true);
return 1;
}
int LuaScriptInterface::luaGameSaveAccountStorageValues(lua_State* L)
{
// Game.saveAccountStorageValues()
lua_pushboolean(L, g_game.saveAccountStorageValues());
return 1;
}
// Variant
int LuaScriptInterface::luaVariantCreate(lua_State* L)
{
// Variant(number or string or position or thing)
LuaVariant variant;
if (isUserdata(L, 2)) {
if (Thing* thing = getThing(L, 2)) {
variant.type = VARIANT_TARGETPOSITION;
variant.pos = thing->getPosition();
}
} else if (isTable(L, 2)) {
variant.type = VARIANT_POSITION;
variant.pos = getPosition(L, 2);
} else if (isNumber(L, 2)) {
variant.type = VARIANT_NUMBER;
variant.number = getNumber<uint32_t>(L, 2);
} else if (isString(L, 2)) {
variant.type = VARIANT_STRING;
variant.text = getString(L, 2);
}
pushVariant(L, variant);
return 1;
}
int LuaScriptInterface::luaVariantGetNumber(lua_State* L)
{
// Variant:getNumber()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_NUMBER) {
lua_pushnumber(L, variant.number);
} else {
lua_pushnumber(L, 0);
}
return 1;
}
int LuaScriptInterface::luaVariantGetString(lua_State* L)
{
// Variant:getString()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_STRING) {
pushString(L, variant.text);
} else {
pushString(L, std::string());
}
return 1;
}
int LuaScriptInterface::luaVariantGetPosition(lua_State* L)
{
// Variant:getPosition()
const LuaVariant& variant = getVariant(L, 1);
if (variant.type == VARIANT_POSITION || variant.type == VARIANT_TARGETPOSITION) {
pushPosition(L, variant.pos);
} else {
pushPosition(L, Position());
}
return 1;
}
// Position
int LuaScriptInterface::luaPositionCreate(lua_State* L)
{
// Position([x = 0[, y = 0[, z = 0[, stackpos = 0]]]])
// Position([position])
if (lua_gettop(L) <= 1) {
pushPosition(L, Position());
return 1;
}
int32_t stackpos;
if (isTable(L, 2)) {
const Position& position = getPosition(L, 2, stackpos);
pushPosition(L, position, stackpos);
} else {
uint16_t x = getNumber<uint16_t>(L, 2, 0);
uint16_t y = getNumber<uint16_t>(L, 3, 0);
uint8_t z = getNumber<uint8_t>(L, 4, 0);
stackpos = getNumber<int32_t>(L, 5, 0);
pushPosition(L, Position(x, y, z), stackpos);
}
return 1;
}
int LuaScriptInterface::luaPositionAdd(lua_State* L)
{
// positionValue = position + positionEx
int32_t stackpos;
const Position& position = getPosition(L, 1, stackpos);
Position positionEx;
if (stackpos == 0) {
positionEx = getPosition(L, 2, stackpos);
} else {
positionEx = getPosition(L, 2);
}
pushPosition(L, position + positionEx, stackpos);
return 1;
}
int LuaScriptInterface::luaPositionSub(lua_State* L)
{
// positionValue = position - positionEx
int32_t stackpos;
const Position& position = getPosition(L, 1, stackpos);
Position positionEx;
if (stackpos == 0) {
positionEx = getPosition(L, 2, stackpos);
} else {
positionEx = getPosition(L, 2);
}
pushPosition(L, position - positionEx, stackpos);
return 1;
}
int LuaScriptInterface::luaPositionCompare(lua_State* L)
{
// position == positionEx
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
pushBoolean(L, position == positionEx);
return 1;
}
int LuaScriptInterface::luaPositionGetDistance(lua_State* L)
{
// position:getDistance(positionEx)
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
lua_pushnumber(L, std::max<int32_t>(
std::max<int32_t>(
std::abs(Position::getDistanceX(position, positionEx)),
std::abs(Position::getDistanceY(position, positionEx))
),
std::abs(Position::getDistanceZ(position, positionEx))
));
return 1;
}
int LuaScriptInterface::luaPositionIsSightClear(lua_State* L)
{
// position:isSightClear(positionEx[, sameFloor = true])
bool sameFloor = getBoolean(L, 3, true);
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
pushBoolean(L, g_game.isSightClear(position, positionEx, sameFloor));
return 1;
}
int LuaScriptInterface::luaPositionSendMagicEffect(lua_State* L)
{
// position:sendMagicEffect(magicEffect[, player = nullptr])
SpectatorVec spectators;
if (lua_gettop(L) >= 3) {
Player* player = getPlayer(L, 3);
if (player) {
spectators.emplace_back(player);
}
}
MagicEffectClasses magicEffect = getNumber<MagicEffectClasses>(L, 2);
const Position& position = getPosition(L, 1);
if (!spectators.empty()) {
Game::addMagicEffect(spectators, position, magicEffect);
} else {
g_game.addMagicEffect(position, magicEffect);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPositionSendDistanceEffect(lua_State* L)
{
// position:sendDistanceEffect(positionEx, distanceEffect[, player = nullptr])
SpectatorVec spectators;
if (lua_gettop(L) >= 4) {
Player* player = getPlayer(L, 4);
if (player) {
spectators.emplace_back(player);
}
}
ShootType_t distanceEffect = getNumber<ShootType_t>(L, 3);
const Position& positionEx = getPosition(L, 2);
const Position& position = getPosition(L, 1);
if (!spectators.empty()) {
Game::addDistanceEffect(spectators, position, positionEx, distanceEffect);
} else {
g_game.addDistanceEffect(position, positionEx, distanceEffect);
}
pushBoolean(L, true);
return 1;
}
// Tile
int LuaScriptInterface::luaTileCreate(lua_State* L)
{
// Tile(x, y, z)
// Tile(position)
Tile* tile;
if (isTable(L, 2)) {
tile = g_game.map.getTile(getPosition(L, 2));
} else {
uint8_t z = getNumber<uint8_t>(L, 4);
uint16_t y = getNumber<uint16_t>(L, 3);
uint16_t x = getNumber<uint16_t>(L, 2);
tile = g_game.map.getTile(x, y, z);
}
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileRemove(lua_State* L)
{
// tile:remove()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
g_game.map.removeTile(tile->getPosition());
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaTileGetPosition(lua_State* L)
{
// tile:getPosition()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
pushPosition(L, tile->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetGround(lua_State* L)
{
// tile:getGround()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile && tile->getGround()) {
pushUserdata<Item>(L, tile->getGround());
setItemMetatable(L, -1, tile->getGround());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetThing(lua_State* L)
{
// tile:getThing(index)
int32_t index = getNumber<int32_t>(L, 2);
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = tile->getThing(index);
if (!thing) {
lua_pushnil(L);
return 1;
}
if (Creature* creature = thing->getCreature()) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else if (Item* item = thing->getItem()) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetThingCount(lua_State* L)
{
// tile:getThingCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
lua_pushnumber(L, tile->getThingCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopVisibleThing(lua_State* L)
{
// tile:getTopVisibleThing(creature)
Creature* creature = getCreature(L, 2);
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = tile->getTopVisibleThing(creature);
if (!thing) {
lua_pushnil(L);
return 1;
}
if (Creature* visibleCreature = thing->getCreature()) {
pushUserdata<Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else if (Item* visibleItem = thing->getItem()) {
pushUserdata<Item>(L, visibleItem);
setItemMetatable(L, -1, visibleItem);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopTopItem(lua_State* L)
{
// tile:getTopTopItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getTopTopItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopDownItem(lua_State* L)
{
// tile:getTopDownItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getTopDownItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetFieldItem(lua_State* L)
{
// tile:getFieldItem()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item = tile->getFieldItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemById(lua_State* L)
{
// tile:getItemById(itemId[, subType = -1])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
Item* item = g_game.findItemOfType(tile, itemId, false, subType);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemByType(lua_State* L)
{
// tile:getItemByType(itemType)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
bool found;
ItemTypes_t itemType = getNumber<ItemTypes_t>(L, 2);
switch (itemType) {
case ITEM_TYPE_TELEPORT:
found = tile->hasFlag(TILESTATE_TELEPORT);
break;
case ITEM_TYPE_MAGICFIELD:
found = tile->hasFlag(TILESTATE_MAGICFIELD);
break;
case ITEM_TYPE_MAILBOX:
found = tile->hasFlag(TILESTATE_MAILBOX);
break;
case ITEM_TYPE_TRASHHOLDER:
found = tile->hasFlag(TILESTATE_TRASHHOLDER);
break;
case ITEM_TYPE_BED:
found = tile->hasFlag(TILESTATE_BED);
break;
case ITEM_TYPE_DEPOT:
found = tile->hasFlag(TILESTATE_DEPOT);
break;
default:
found = true;
break;
}
if (!found) {
lua_pushnil(L);
return 1;
}
if (Item* item = tile->getGround()) {
const ItemType& it = Item::items[item->getID()];
if (it.type == itemType) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
}
if (const TileItemVector* items = tile->getItemList()) {
for (Item* item : *items) {
const ItemType& it = Item::items[item->getID()];
if (it.type == itemType) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
}
}
lua_pushnil(L);
return 1;
}
int LuaScriptInterface::luaTileGetItemByTopOrder(lua_State* L)
{
// tile:getItemByTopOrder(topOrder)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
int32_t topOrder = getNumber<int32_t>(L, 2);
Item* item = tile->getItemByTopOrder(topOrder);
if (!item) {
lua_pushnil(L);
return 1;
}
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
return 1;
}
int LuaScriptInterface::luaTileGetItemCountById(lua_State* L)
{
// tile:getItemCountById(itemId[, subType = -1])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
lua_pushnumber(L, tile->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaTileGetBottomCreature(lua_State* L)
{
// tile:getBottomCreature()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
const Creature* creature = tile->getBottomCreature();
if (!creature) {
lua_pushnil(L);
return 1;
}
pushUserdata<const Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
return 1;
}
int LuaScriptInterface::luaTileGetTopCreature(lua_State* L)
{
// tile:getTopCreature()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = tile->getTopCreature();
if (!creature) {
lua_pushnil(L);
return 1;
}
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
return 1;
}
int LuaScriptInterface::luaTileGetBottomVisibleCreature(lua_State* L)
{
// tile:getBottomVisibleCreature(creature)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Creature* visibleCreature = tile->getBottomVisibleCreature(creature);
if (visibleCreature) {
pushUserdata<const Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopVisibleCreature(lua_State* L)
{
// tile:getTopVisibleCreature(creature)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* visibleCreature = tile->getTopVisibleCreature(creature);
if (visibleCreature) {
pushUserdata<Creature>(L, visibleCreature);
setCreatureMetatable(L, -1, visibleCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetItems(lua_State* L)
{
// tile:getItems()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
TileItemVector* itemVector = tile->getItemList();
if (!itemVector) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, itemVector->size(), 0);
int index = 0;
for (Item* item : *itemVector) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaTileGetItemCount(lua_State* L)
{
// tile:getItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getItemCount());
return 1;
}
int LuaScriptInterface::luaTileGetDownItemCount(lua_State* L)
{
// tile:getDownItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
lua_pushnumber(L, tile->getDownItemCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileGetTopItemCount(lua_State* L)
{
// tile:getTopItemCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getTopItemCount());
return 1;
}
int LuaScriptInterface::luaTileGetCreatures(lua_State* L)
{
// tile:getCreatures()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
CreatureVector* creatureVector = tile->getCreatures();
if (!creatureVector) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creatureVector->size(), 0);
int index = 0;
for (Creature* creature : *creatureVector) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaTileGetCreatureCount(lua_State* L)
{
// tile:getCreatureCount()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, tile->getCreatureCount());
return 1;
}
int LuaScriptInterface::luaTileHasProperty(lua_State* L)
{
// tile:hasProperty(property[, item])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Item* item;
if (lua_gettop(L) >= 3) {
item = getUserdata<Item>(L, 3);
} else {
item = nullptr;
}
ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2);
if (item) {
pushBoolean(L, tile->hasProperty(item, property));
} else {
pushBoolean(L, tile->hasProperty(property));
}
return 1;
}
int LuaScriptInterface::luaTileGetThingIndex(lua_State* L)
{
// tile:getThingIndex(thing)
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = getThing(L, 2);
if (thing) {
lua_pushnumber(L, tile->getThingIndex(thing));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileHasFlag(lua_State* L)
{
// tile:hasFlag(flag)
Tile* tile = getUserdata<Tile>(L, 1);
if (tile) {
tileflags_t flag = getNumber<tileflags_t>(L, 2);
pushBoolean(L, tile->hasFlag(flag));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileQueryAdd(lua_State* L)
{
// tile:queryAdd(thing[, flags])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
Thing* thing = getThing(L, 2);
if (thing) {
uint32_t flags = getNumber<uint32_t>(L, 3, 0);
lua_pushnumber(L, tile->queryAdd(0, *thing, 1, flags));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileAddItem(lua_State* L)
{
// tile:addItem(itemId[, count/subType = 1[, flags = 0]])
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t subType = getNumber<uint32_t>(L, 3, 1);
Item* item = Item::CreateItem(itemId, std::min<uint32_t>(subType, 100));
if (!item) {
lua_pushnil(L);
return 1;
}
uint32_t flags = getNumber<uint32_t>(L, 4, 0);
ReturnValue ret = g_game.internalAddItem(tile, item, INDEX_WHEREEVER, flags);
if (ret == RETURNVALUE_NOERROR) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
delete item;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTileAddItemEx(lua_State* L)
{
// tile:addItemEx(item[, flags = 0])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
lua_pushnil(L);
return 1;
}
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc(L, "Item already has a parent");
lua_pushnil(L);
return 1;
}
uint32_t flags = getNumber<uint32_t>(L, 3, 0);
ReturnValue ret = g_game.internalAddItem(tile, item, INDEX_WHEREEVER, flags);
if (ret == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, ret);
return 1;
}
int LuaScriptInterface::luaTileGetHouse(lua_State* L)
{
// tile:getHouse()
Tile* tile = getUserdata<Tile>(L, 1);
if (!tile) {
lua_pushnil(L);
return 1;
}
if (HouseTile* houseTile = dynamic_cast<HouseTile*>(tile)) {
pushUserdata<House>(L, houseTile->getHouse());
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
// NetworkMessage
int LuaScriptInterface::luaNetworkMessageCreate(lua_State* L)
{
// NetworkMessage()
pushUserdata<NetworkMessage>(L, new NetworkMessage);
setMetatable(L, -1, "NetworkMessage");
return 1;
}
int LuaScriptInterface::luaNetworkMessageDelete(lua_State* L)
{
NetworkMessage** messagePtr = getRawUserdata<NetworkMessage>(L, 1);
if (messagePtr && *messagePtr) {
delete *messagePtr;
*messagePtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaNetworkMessageGetByte(lua_State* L)
{
// networkMessage:getByte()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->getByte());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU16(lua_State* L)
{
// networkMessage:getU16()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint16_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU32(lua_State* L)
{
// networkMessage:getU32()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint32_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetU64(lua_State* L)
{
// networkMessage:getU64()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->get<uint64_t>());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetString(lua_State* L)
{
// networkMessage:getString()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
pushString(L, message->getString());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageGetPosition(lua_State* L)
{
// networkMessage:getPosition()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
pushPosition(L, message->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddByte(lua_State* L)
{
// networkMessage:addByte(number)
uint8_t number = getNumber<uint8_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addByte(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU16(lua_State* L)
{
// networkMessage:addU16(number)
uint16_t number = getNumber<uint16_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint16_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU32(lua_State* L)
{
// networkMessage:addU32(number)
uint32_t number = getNumber<uint32_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint32_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddU64(lua_State* L)
{
// networkMessage:addU64(number)
uint64_t number = getNumber<uint64_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->add<uint64_t>(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddString(lua_State* L)
{
// networkMessage:addString(string)
const std::string& string = getString(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addString(string);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddPosition(lua_State* L)
{
// networkMessage:addPosition(position)
const Position& position = getPosition(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addPosition(position);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddDouble(lua_State* L)
{
// networkMessage:addDouble(number)
double number = getNumber<double>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addDouble(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddItem(lua_State* L)
{
// networkMessage:addItem(item)
Item* item = getUserdata<Item>(L, 2);
if (!item) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
lua_pushnil(L);
return 1;
}
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->addItem(item);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageAddItemId(lua_State* L)
{
// networkMessage:addItemId(itemId)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (!message) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
message->addItemId(itemId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaNetworkMessageReset(lua_State* L)
{
// networkMessage:reset()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->reset();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSeek(lua_State* L)
{
// networkMessage:seek(position)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message && isNumber(L, 2)) {
pushBoolean(L, message->setBufferPosition(getNumber<uint16_t>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageTell(lua_State* L)
{
// networkMessage:tell()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->getBufferPosition() - message->INITIAL_BUFFER_POSITION);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageLength(lua_State* L)
{
// networkMessage:len()
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
lua_pushnumber(L, message->getLength());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSkipBytes(lua_State* L)
{
// networkMessage:skipBytes(number)
int16_t number = getNumber<int16_t>(L, 2);
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (message) {
message->skipBytes(number);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNetworkMessageSendToPlayer(lua_State* L)
{
// networkMessage:sendToPlayer(player)
NetworkMessage* message = getUserdata<NetworkMessage>(L, 1);
if (!message) {
lua_pushnil(L);
return 1;
}
Player* player = getPlayer(L, 2);
if (player) {
player->sendNetworkMessage(*message);
pushBoolean(L, true);
} else {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND));
lua_pushnil(L);
}
return 1;
}
// ModalWindow
int LuaScriptInterface::luaModalWindowCreate(lua_State* L)
{
// ModalWindow(id, title, message)
const std::string& message = getString(L, 4);
const std::string& title = getString(L, 3);
uint32_t id = getNumber<uint32_t>(L, 2);
pushUserdata<ModalWindow>(L, new ModalWindow(id, title, message));
setMetatable(L, -1, "ModalWindow");
return 1;
}
int LuaScriptInterface::luaModalWindowDelete(lua_State* L)
{
ModalWindow** windowPtr = getRawUserdata<ModalWindow>(L, 1);
if (windowPtr && *windowPtr) {
delete *windowPtr;
*windowPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaModalWindowGetId(lua_State* L)
{
// modalWindow:getId()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetTitle(lua_State* L)
{
// modalWindow:getTitle()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushString(L, window->title);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetMessage(lua_State* L)
{
// modalWindow:getMessage()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushString(L, window->message);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetTitle(lua_State* L)
{
// modalWindow:setTitle(text)
const std::string& text = getString(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->title = text;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetMessage(lua_State* L)
{
// modalWindow:setMessage(text)
const std::string& text = getString(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->message = text;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetButtonCount(lua_State* L)
{
// modalWindow:getButtonCount()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->buttons.size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetChoiceCount(lua_State* L)
{
// modalWindow:getChoiceCount()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->choices.size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowAddButton(lua_State* L)
{
// modalWindow:addButton(id, text)
const std::string& text = getString(L, 3);
uint8_t id = getNumber<uint8_t>(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->buttons.emplace_back(text, id);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowAddChoice(lua_State* L)
{
// modalWindow:addChoice(id, text)
const std::string& text = getString(L, 3);
uint8_t id = getNumber<uint8_t>(L, 2);
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->choices.emplace_back(text, id);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetDefaultEnterButton(lua_State* L)
{
// modalWindow:getDefaultEnterButton()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->defaultEnterButton);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetDefaultEnterButton(lua_State* L)
{
// modalWindow:setDefaultEnterButton(buttonId)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->defaultEnterButton = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowGetDefaultEscapeButton(lua_State* L)
{
// modalWindow:getDefaultEscapeButton()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
lua_pushnumber(L, window->defaultEscapeButton);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetDefaultEscapeButton(lua_State* L)
{
// modalWindow:setDefaultEscapeButton(buttonId)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->defaultEscapeButton = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowHasPriority(lua_State* L)
{
// modalWindow:hasPriority()
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
pushBoolean(L, window->priority);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSetPriority(lua_State* L)
{
// modalWindow:setPriority(priority)
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
window->priority = getBoolean(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaModalWindowSendToPlayer(lua_State* L)
{
// modalWindow:sendToPlayer(player)
Player* player = getPlayer(L, 2);
if (!player) {
lua_pushnil(L);
return 1;
}
ModalWindow* window = getUserdata<ModalWindow>(L, 1);
if (window) {
if (!player->hasModalWindowOpen(window->id)) {
player->sendModalWindow(*window);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Item
int LuaScriptInterface::luaItemCreate(lua_State* L)
{
// Item(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(id);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemIsItem(lua_State* L)
{
// item:isItem()
pushBoolean(L, getUserdata<const Item>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaItemGetParent(lua_State* L)
{
// item:getParent()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Cylinder* parent = item->getParent();
if (!parent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, parent);
return 1;
}
int LuaScriptInterface::luaItemGetTopParent(lua_State* L)
{
// item:getTopParent()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Cylinder* topParent = item->getTopParent();
if (!topParent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, topParent);
return 1;
}
int LuaScriptInterface::luaItemGetId(lua_State* L)
{
// item:getId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemClone(lua_State* L)
{
// item:clone()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Item* clone = item->clone();
if (!clone) {
lua_pushnil(L);
return 1;
}
getScriptEnv()->addTempItem(clone);
clone->setParent(VirtualCylinder::virtualCylinder);
pushUserdata<Item>(L, clone);
setItemMetatable(L, -1, clone);
return 1;
}
int LuaScriptInterface::luaItemSplit(lua_State* L)
{
// item:split([count = 1])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item* item = *itemPtr;
if (!item || !item->isStackable()) {
lua_pushnil(L);
return 1;
}
uint16_t count = std::min<uint16_t>(getNumber<uint16_t>(L, 2, 1), item->getItemCount());
uint16_t diff = item->getItemCount() - count;
Item* splitItem = item->clone();
if (!splitItem) {
lua_pushnil(L);
return 1;
}
splitItem->setItemCount(count);
ScriptEnvironment* env = getScriptEnv();
uint32_t uid = env->addThing(item);
Item* newItem = g_game.transformItem(item, item->getID(), diff);
if (item->isRemoved()) {
env->removeItemByUID(uid);
}
if (newItem && newItem != item) {
env->insertItem(uid, newItem);
}
*itemPtr = newItem;
splitItem->setParent(VirtualCylinder::virtualCylinder);
env->addTempItem(splitItem);
pushUserdata<Item>(L, splitItem);
setItemMetatable(L, -1, splitItem);
return 1;
}
int LuaScriptInterface::luaItemRemove(lua_State* L)
{
// item:remove([count = -1])
Item* item = getUserdata<Item>(L, 1);
if (item) {
int32_t count = getNumber<int32_t>(L, 2, -1);
pushBoolean(L, g_game.internalRemoveItem(item, count) == RETURNVALUE_NOERROR);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetUniqueId(lua_State* L)
{
// item:getUniqueId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
uint32_t uniqueId = item->getUniqueId();
if (uniqueId == 0) {
uniqueId = getScriptEnv()->addThing(item);
}
lua_pushnumber(L, uniqueId);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetActionId(lua_State* L)
{
// item:getActionId()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getActionId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetActionId(lua_State* L)
{
// item:setActionId(actionId)
uint16_t actionId = getNumber<uint16_t>(L, 2);
Item* item = getUserdata<Item>(L, 1);
if (item) {
item->setActionId(actionId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetCount(lua_State* L)
{
// item:getCount()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getItemCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetCharges(lua_State* L)
{
// item:getCharges()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getCharges());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetFluidType(lua_State* L)
{
// item:getFluidType()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getFluidType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetWeight(lua_State* L)
{
// item:getWeight()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getWeight());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetSubType(lua_State* L)
{
// item:getSubType()
Item* item = getUserdata<Item>(L, 1);
if (item) {
lua_pushnumber(L, item->getSubType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetName(lua_State* L)
{
// item:getName()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetPluralName(lua_State* L)
{
// item:getPluralName()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getPluralName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetArticle(lua_State* L)
{
// item:getArticle()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getArticle());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetPosition(lua_State* L)
{
// item:getPosition()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushPosition(L, item->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetTile(lua_State* L)
{
// item:getTile()
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
Tile* tile = item->getTile();
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemHasAttribute(lua_State* L)
{
// item:hasAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
pushBoolean(L, item->hasAttribute(attribute));
return 1;
}
int LuaScriptInterface::luaItemGetAttribute(lua_State* L)
{
// item:getAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
if (ItemAttributes::isIntAttrType(attribute)) {
lua_pushnumber(L, item->getIntAttr(attribute));
} else if (ItemAttributes::isStrAttrType(attribute)) {
pushString(L, item->getStrAttr(attribute));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetAttribute(lua_State* L)
{
// item:setAttribute(key, value)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
if (ItemAttributes::isIntAttrType(attribute)) {
if (attribute == ITEM_ATTRIBUTE_UNIQUEID) {
reportErrorFunc(L, "Attempt to set protected key \"uid\"");
pushBoolean(L, false);
return 1;
}
item->setIntAttr(attribute, getNumber<int32_t>(L, 3));
pushBoolean(L, true);
} else if (ItemAttributes::isStrAttrType(attribute)) {
item->setStrAttr(attribute, getString(L, 3));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemRemoveAttribute(lua_State* L)
{
// item:removeAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
itemAttrTypes attribute;
if (isNumber(L, 2)) {
attribute = getNumber<itemAttrTypes>(L, 2);
} else if (isString(L, 2)) {
attribute = stringToItemAttribute(getString(L, 2));
} else {
attribute = ITEM_ATTRIBUTE_NONE;
}
bool ret = attribute != ITEM_ATTRIBUTE_UNIQUEID;
if (ret) {
item->removeAttribute(attribute);
} else {
reportErrorFunc(L, "Attempt to erase protected key \"uid\"");
}
pushBoolean(L, ret);
return 1;
}
int LuaScriptInterface::luaItemGetCustomAttribute(lua_State* L) {
// item:getCustomAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
const ItemAttributes::CustomAttribute* attr;
if (isNumber(L, 2)) {
attr = item->getCustomAttribute(getNumber<int64_t>(L, 2));
} else if (isString(L, 2)) {
attr = item->getCustomAttribute(getString(L, 2));
} else {
lua_pushnil(L);
return 1;
}
if (attr) {
attr->pushToLua(L);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetCustomAttribute(lua_State* L) {
// item:setCustomAttribute(key, value)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
std::string key;
if (isNumber(L, 2)) {
key = std::to_string(getNumber<int64_t>(L, 2));
} else if (isString(L, 2)) {
key = getString(L, 2);
} else {
lua_pushnil(L);
return 1;
}
ItemAttributes::CustomAttribute val;
if (isNumber(L, 3)) {
double tmp = getNumber<double>(L, 3);
if (std::floor(tmp) < tmp) {
val.set<double>(tmp);
} else {
val.set<int64_t>(tmp);
}
} else if (isString(L, 3)) {
val.set<std::string>(getString(L, 3));
} else if (isBoolean(L, 3)) {
val.set<bool>(getBoolean(L, 3));
} else {
lua_pushnil(L);
return 1;
}
item->setCustomAttribute(key, val);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaItemRemoveCustomAttribute(lua_State* L) {
// item:removeCustomAttribute(key)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
if (isNumber(L, 2)) {
pushBoolean(L, item->removeCustomAttribute(getNumber<int64_t>(L, 2)));
} else if (isString(L, 2)) {
pushBoolean(L, item->removeCustomAttribute(getString(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemMoveTo(lua_State* L)
{
// item:moveTo(position or cylinder[, flags])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item* item = *itemPtr;
if (!item || item->isRemoved()) {
lua_pushnil(L);
return 1;
}
Cylinder* toCylinder;
if (isUserdata(L, 2)) {
const LuaDataType type = getUserdataType(L, 2);
switch (type) {
case LuaData_Container:
toCylinder = getUserdata<Container>(L, 2);
break;
case LuaData_Player:
toCylinder = getUserdata<Player>(L, 2);
break;
case LuaData_Tile:
toCylinder = getUserdata<Tile>(L, 2);
break;
default:
toCylinder = nullptr;
break;
}
} else {
toCylinder = g_game.map.getTile(getPosition(L, 2));
}
if (!toCylinder) {
lua_pushnil(L);
return 1;
}
if (item->getParent() == toCylinder) {
pushBoolean(L, true);
return 1;
}
uint32_t flags = getNumber<uint32_t>(L, 3, FLAG_NOLIMIT | FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE | FLAG_IGNORENOTMOVEABLE);
if (item->getParent() == VirtualCylinder::virtualCylinder) {
pushBoolean(L, g_game.internalAddItem(toCylinder, item, INDEX_WHEREEVER, flags) == RETURNVALUE_NOERROR);
} else {
Item* moveItem = nullptr;
ReturnValue ret = g_game.internalMoveItem(item->getParent(), toCylinder, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, flags);
if (moveItem) {
*itemPtr = moveItem;
}
pushBoolean(L, ret == RETURNVALUE_NOERROR);
}
return 1;
}
int LuaScriptInterface::luaItemTransform(lua_State* L)
{
// item:transform(itemId[, count/subType = -1])
Item** itemPtr = getRawUserdata<Item>(L, 1);
if (!itemPtr) {
lua_pushnil(L);
return 1;
}
Item*& item = *itemPtr;
if (!item) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
pushBoolean(L, true);
return 1;
}
const ItemType& it = Item::items[itemId];
if (it.stackable) {
subType = std::min<int32_t>(subType, 100);
}
ScriptEnvironment* env = getScriptEnv();
uint32_t uid = env->addThing(item);
Item* newItem = g_game.transformItem(item, itemId, subType);
if (item->isRemoved()) {
env->removeItemByUID(uid);
}
if (newItem && newItem != item) {
env->insertItem(uid, newItem);
}
item = newItem;
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaItemDecay(lua_State* L)
{
// item:decay(decayId)
Item* item = getUserdata<Item>(L, 1);
if (item) {
if (isNumber(L, 2)) {
item->setDecayTo(getNumber<int32_t>(L, 2));
}
g_game.startDecay(item);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetDescription(lua_State* L)
{
// item:getDescription(distance)
Item* item = getUserdata<Item>(L, 1);
if (item) {
int32_t distance = getNumber<int32_t>(L, 2);
pushString(L, item->getDescription(distance));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemGetSpecialDescription(lua_State* L)
{
// item:getSpecialDescription()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushString(L, item->getSpecialDescription());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemHasProperty(lua_State* L)
{
// item:hasProperty(property)
Item* item = getUserdata<Item>(L, 1);
if (item) {
ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2);
pushBoolean(L, item->hasProperty(property));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemIsLoadedFromMap(lua_State* L)
{
// item:isLoadedFromMap()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushBoolean(L, item->isLoadedFromMap());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemSetStoreItem(lua_State* L)
{
// item:setStoreItem(storeItem)
Item* item = getUserdata<Item>(L, 1);
if (!item) {
lua_pushnil(L);
return 1;
}
item->setStoreItem(getBoolean(L, 2, false));
return 1;
}
int LuaScriptInterface::luaItemIsStoreItem(lua_State* L)
{
// item:isStoreItem()
Item* item = getUserdata<Item>(L, 1);
if (item) {
pushBoolean(L, item->isStoreItem());
} else {
lua_pushnil(L);
}
return 1;
}
// Container
int LuaScriptInterface::luaContainerCreate(lua_State* L)
{
// Container(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Container* container = getScriptEnv()->getContainerByUID(id);
if (container) {
pushUserdata(L, container);
setMetatable(L, -1, "Container");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetSize(lua_State* L)
{
// container:getSize()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetCapacity(lua_State* L)
{
// container:getCapacity()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->capacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetEmptySlots(lua_State* L)
{
// container:getEmptySlots([recursive = false])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint32_t slots = container->capacity() - container->size();
bool recursive = getBoolean(L, 2, false);
if (recursive) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if (Container* tmpContainer = (*it)->getContainer()) {
slots += tmpContainer->capacity() - tmpContainer->size();
}
}
}
lua_pushnumber(L, slots);
return 1;
}
int LuaScriptInterface::luaContainerGetItemHoldingCount(lua_State* L)
{
// container:getItemHoldingCount()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->getItemHoldingCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetItem(lua_State* L)
{
// container:getItem(index)
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint32_t index = getNumber<uint32_t>(L, 2);
Item* item = container->getItemByIndex(index);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerHasItem(lua_State* L)
{
// container:hasItem(item)
Item* item = getUserdata<Item>(L, 2);
Container* container = getUserdata<Container>(L, 1);
if (container) {
pushBoolean(L, container->isHoldingItem(item));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerAddItem(lua_State* L)
{
// container:addItem(itemId[, count/subType = 1[, index = INDEX_WHEREEVER[, flags = 0]]])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t count = getNumber<uint32_t>(L, 3, 1);
const ItemType& it = Item::items[itemId];
if (it.stackable) {
count = std::min<uint16_t>(count, 100);
}
Item* item = Item::CreateItem(itemId, count);
if (!item) {
lua_pushnil(L);
return 1;
}
int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 5, 0);
ReturnValue ret = g_game.internalAddItem(container, item, index, flags);
if (ret == RETURNVALUE_NOERROR) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
delete item;
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerAddItemEx(lua_State* L)
{
// container:addItemEx(item[, index = INDEX_WHEREEVER[, flags = 0]])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
lua_pushnil(L);
return 1;
}
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc(L, "Item already has a parent");
lua_pushnil(L);
return 1;
}
int32_t index = getNumber<int32_t>(L, 3, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 4, 0);
ReturnValue ret = g_game.internalAddItem(container, item, index, flags);
if (ret == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, ret);
return 1;
}
int LuaScriptInterface::luaContainerGetCorpseOwner(lua_State* L)
{
// container:getCorpseOwner()
Container* container = getUserdata<Container>(L, 1);
if (container) {
lua_pushnumber(L, container->getCorpseOwner());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetItemCountById(lua_State* L)
{
// container:getItemCountById(itemId[, subType = -1])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
lua_pushnumber(L, container->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaContainerGetContentDescription(lua_State* L)
{
// container:getContentDescription()
Container* container = getUserdata<Container>(L, 1);
if (container) {
pushString(L, container->getContentDescription());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaContainerGetItems(lua_State* L)
{
// container:getItems([recursive = false])
Container* container = getUserdata<Container>(L, 1);
if (!container) {
lua_pushnil(L);
return 1;
}
bool recursive = getBoolean(L, 2, false);
std::vector<Item*> items = container->getItems(recursive);
lua_createtable(L, items.size(), 0);
int index = 0;
for (Item* item : items) {
pushUserdata(L, item);
setItemMetatable(L, -1, item);
lua_rawseti(L, -2, ++index);
}
return 1;
}
// Teleport
int LuaScriptInterface::luaTeleportCreate(lua_State* L)
{
// Teleport(uid)
uint32_t id = getNumber<uint32_t>(L, 2);
Item* item = getScriptEnv()->getItemByUID(id);
if (item && item->getTeleport()) {
pushUserdata(L, item);
setMetatable(L, -1, "Teleport");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTeleportGetDestination(lua_State* L)
{
// teleport:getDestination()
Teleport* teleport = getUserdata<Teleport>(L, 1);
if (teleport) {
pushPosition(L, teleport->getDestPos());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTeleportSetDestination(lua_State* L)
{
// teleport:setDestination(position)
Teleport* teleport = getUserdata<Teleport>(L, 1);
if (teleport) {
teleport->setDestPos(getPosition(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Creature
int LuaScriptInterface::luaCreatureCreate(lua_State* L)
{
// Creature(id or name or userdata)
Creature* creature;
if (isNumber(L, 2)) {
creature = g_game.getCreatureByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
creature = g_game.getCreatureByName(getString(L, 2));
} else if (isUserdata(L, 2)) {
LuaDataType type = getUserdataType(L, 2);
if (type != LuaData_Player && type != LuaData_Monster && type != LuaData_Npc) {
lua_pushnil(L);
return 1;
}
creature = getUserdata<Creature>(L, 2);
} else {
creature = nullptr;
}
if (creature) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetEvents(lua_State* L)
{
// creature:getEvents(type)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
CreatureEventType_t eventType = getNumber<CreatureEventType_t>(L, 2);
const auto& eventList = creature->getCreatureEvents(eventType);
lua_createtable(L, eventList.size(), 0);
int index = 0;
for (CreatureEvent* event : eventList) {
pushString(L, event->getName());
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaCreatureRegisterEvent(lua_State* L)
{
// creature:registerEvent(name)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
const std::string& name = getString(L, 2);
pushBoolean(L, creature->registerCreatureEvent(name));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureUnregisterEvent(lua_State* L)
{
// creature:unregisterEvent(name)
const std::string& name = getString(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->unregisterCreatureEvent(name));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsRemoved(lua_State* L)
{
// creature:isRemoved()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isRemoved());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsCreature(lua_State* L)
{
// creature:isCreature()
pushBoolean(L, getUserdata<const Creature>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaCreatureIsInGhostMode(lua_State* L)
{
// creature:isInGhostMode()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isInGhostMode());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsHealthHidden(lua_State* L)
{
// creature:isHealthHidden()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isHealthHidden());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureIsMovementBlocked(lua_State* L)
{
// creature:isMovementBlocked()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->isMovementBlocked());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSee(lua_State* L)
{
// creature:canSee(position)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Position& position = getPosition(L, 2);
pushBoolean(L, creature->canSee(position));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSeeCreature(lua_State* L)
{
// creature:canSeeCreature(creature)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Creature* otherCreature = getCreature(L, 2);
if (!otherCreature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, creature->canSeeCreature(otherCreature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSeeGhostMode(lua_State* L)
{
// creature:canSeeGhostMode(creature)
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
const Creature* otherCreature = getCreature(L, 2);
if (!otherCreature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, creature->canSeeGhostMode(otherCreature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureCanSeeInvisibility(lua_State* L)
{
// creature:canSeeInvisibility()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->canSeeInvisibility());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetParent(lua_State* L)
{
// creature:getParent()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Cylinder* parent = creature->getParent();
if (!parent) {
lua_pushnil(L);
return 1;
}
pushCylinder(L, parent);
return 1;
}
int LuaScriptInterface::luaCreatureGetId(lua_State* L)
{
// creature:getId()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetName(lua_State* L)
{
// creature:getName()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushString(L, creature->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetTarget(lua_State* L)
{
// creature:getTarget()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* target = creature->getAttackedCreature();
if (target) {
pushUserdata<Creature>(L, target);
setCreatureMetatable(L, -1, target);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetTarget(lua_State* L)
{
// creature:setTarget(target)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->setAttackedCreature(getCreature(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetFollowCreature(lua_State* L)
{
// creature:getFollowCreature()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* followCreature = creature->getFollowCreature();
if (followCreature) {
pushUserdata<Creature>(L, followCreature);
setCreatureMetatable(L, -1, followCreature);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetFollowCreature(lua_State* L)
{
// creature:setFollowCreature(followedCreature)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, creature->setFollowCreature(getCreature(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetMaster(lua_State* L)
{
// creature:getMaster()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Creature* master = creature->getMaster();
if (!master) {
lua_pushnil(L);
return 1;
}
pushUserdata<Creature>(L, master);
setCreatureMetatable(L, -1, master);
return 1;
}
int LuaScriptInterface::luaCreatureSetMaster(lua_State* L)
{
// creature:setMaster(master)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
pushBoolean(L, creature->setMaster(getCreature(L, 2)));
g_game.updateCreatureType(creature);
return 1;
}
int LuaScriptInterface::luaCreatureGetLight(lua_State* L)
{
// creature:getLight()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
LightInfo lightInfo = creature->getCreatureLight();
lua_pushnumber(L, lightInfo.level);
lua_pushnumber(L, lightInfo.color);
return 2;
}
int LuaScriptInterface::luaCreatureSetLight(lua_State* L)
{
// creature:setLight(color, level)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
LightInfo light;
light.color = getNumber<uint8_t>(L, 2);
light.level = getNumber<uint8_t>(L, 3);
creature->setCreatureLight(light);
g_game.changeLight(creature);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureGetSpeed(lua_State* L)
{
// creature:getSpeed()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetBaseSpeed(lua_State* L)
{
// creature:getBaseSpeed()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getBaseSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureChangeSpeed(lua_State* L)
{
// creature:changeSpeed(delta)
Creature* creature = getCreature(L, 1);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
int32_t delta = getNumber<int32_t>(L, 2);
g_game.changeSpeed(creature, delta);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSetDropLoot(lua_State* L)
{
// creature:setDropLoot(doDrop)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setDropLoot(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetSkillLoss(lua_State* L)
{
// creature:setSkillLoss(skillLoss)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setSkillLoss(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetPosition(lua_State* L)
{
// creature:getPosition()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushPosition(L, creature->getPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetTile(lua_State* L)
{
// creature:getTile()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Tile* tile = creature->getTile();
if (tile) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDirection(lua_State* L)
{
// creature:getDirection()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getDirection());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetDirection(lua_State* L)
{
// creature:setDirection(direction)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushBoolean(L, g_game.internalCreatureTurn(creature, getNumber<Direction>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetHealth(lua_State* L)
{
// creature:getHealth()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getHealth());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetHealth(lua_State* L)
{
// creature:setHealth(health)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
creature->health = std::min<int32_t>(getNumber<uint32_t>(L, 2), creature->healthMax);
g_game.addCreatureHealth(creature);
Player* player = creature->getPlayer();
if (player) {
player->sendStats();
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureAddHealth(lua_State* L)
{
// creature:addHealth(healthChange)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
CombatDamage damage;
damage.primary.value = getNumber<int32_t>(L, 2);
if (damage.primary.value >= 0) {
damage.primary.type = COMBAT_HEALING;
} else {
damage.primary.type = COMBAT_UNDEFINEDDAMAGE;
}
pushBoolean(L, g_game.combatChangeHealth(nullptr, creature, damage));
return 1;
}
int LuaScriptInterface::luaCreatureGetMaxHealth(lua_State* L)
{
// creature:getMaxHealth()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getMaxHealth());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetMaxHealth(lua_State* L)
{
// creature:setMaxHealth(maxHealth)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
creature->healthMax = getNumber<uint32_t>(L, 2);
creature->health = std::min<int32_t>(creature->health, creature->healthMax);
g_game.addCreatureHealth(creature);
Player* player = creature->getPlayer();
if (player) {
player->sendStats();
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSetHiddenHealth(lua_State* L)
{
// creature:setHiddenHealth(hide)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setHiddenHealth(getBoolean(L, 2));
g_game.addCreatureHealth(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetMovementBlocked(lua_State* L)
{
// creature:setMovementBlocked(state)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setMovementBlocked(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetSkull(lua_State* L)
{
// creature:getSkull()
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getSkull());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetSkull(lua_State* L)
{
// creature:setSkull(skull)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->setSkull(getNumber<Skulls_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetOutfit(lua_State* L)
{
// creature:getOutfit()
const Creature* creature = getUserdata<const Creature>(L, 1);
if (creature) {
pushOutfit(L, creature->getCurrentOutfit());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureSetOutfit(lua_State* L)
{
// creature:setOutfit(outfit)
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
creature->defaultOutfit = getOutfit(L, 2);
g_game.internalCreatureChangeOutfit(creature, creature->defaultOutfit);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetCondition(lua_State* L)
{
// creature:getCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
uint32_t subId = getNumber<uint32_t>(L, 4, 0);
Condition* condition = creature->getCondition(conditionType, conditionId, subId);
if (condition) {
pushUserdata<Condition>(L, condition);
setWeakMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureAddCondition(lua_State* L)
{
// creature:addCondition(condition[, force = false])
Creature* creature = getUserdata<Creature>(L, 1);
Condition* condition = getUserdata<Condition>(L, 2);
if (creature && condition) {
bool force = getBoolean(L, 3, false);
pushBoolean(L, creature->addCondition(condition->clone(), force));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureRemoveCondition(lua_State* L)
{
// creature:removeCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0[, force = false]]])
// creature:removeCondition(condition[, force = false])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
Condition* condition = nullptr;
bool force = false;
if (isUserdata(L, 2)) {
condition = getUserdata<Condition>(L, 2);
force = getBoolean(L, 3, false);
} else {
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
uint32_t subId = getNumber<uint32_t>(L, 4, 0);
condition = creature->getCondition(conditionType, conditionId, subId);
force = getBoolean(L, 5, false);
}
if (condition) {
creature->removeCondition(condition, force);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureHasCondition(lua_State* L)
{
// creature:hasCondition(conditionType[, subId = 0])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
uint32_t subId = getNumber<uint32_t>(L, 3, 0);
pushBoolean(L, creature->hasCondition(conditionType, subId));
return 1;
}
int LuaScriptInterface::luaCreatureIsImmune(lua_State* L)
{
// creature:isImmune(condition or conditionType)
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
if (isNumber(L, 2)) {
pushBoolean(L, creature->isImmune(getNumber<ConditionType_t>(L, 2)));
} else if (Condition* condition = getUserdata<Condition>(L, 2)) {
pushBoolean(L, creature->isImmune(condition->getType()));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureRemove(lua_State* L)
{
// creature:remove()
Creature** creaturePtr = getRawUserdata<Creature>(L, 1);
if (!creaturePtr) {
lua_pushnil(L);
return 1;
}
Creature* creature = *creaturePtr;
if (!creature) {
lua_pushnil(L);
return 1;
}
Player* player = creature->getPlayer();
if (player) {
player->kickPlayer(true);
} else {
g_game.removeCreature(creature);
}
*creaturePtr = nullptr;
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureTeleportTo(lua_State* L)
{
// creature:teleportTo(position[, pushMovement = false])
bool pushMovement = getBoolean(L, 3, false);
const Position& position = getPosition(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Position oldPosition = creature->getPosition();
if (g_game.internalTeleport(creature, position, pushMovement) != RETURNVALUE_NOERROR) {
pushBoolean(L, false);
return 1;
}
if (pushMovement) {
if (oldPosition.x == position.x) {
if (oldPosition.y < position.y) {
g_game.internalCreatureTurn(creature, DIRECTION_SOUTH);
} else {
g_game.internalCreatureTurn(creature, DIRECTION_NORTH);
}
} else if (oldPosition.x > position.x) {
g_game.internalCreatureTurn(creature, DIRECTION_WEST);
} else if (oldPosition.x < position.x) {
g_game.internalCreatureTurn(creature, DIRECTION_EAST);
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCreatureSay(lua_State* L)
{
// creature:say(text[, type = TALKTYPE_MONSTER_SAY[, ghost = false[, target = nullptr[, position]]]])
int parameters = lua_gettop(L);
Position position;
if (parameters >= 6) {
position = getPosition(L, 6);
if (!position.x || !position.y) {
reportErrorFunc(L, "Invalid position specified.");
pushBoolean(L, false);
return 1;
}
}
Creature* target = nullptr;
if (parameters >= 5) {
target = getCreature(L, 5);
}
bool ghost = getBoolean(L, 4, false);
SpeakClasses type = getNumber<SpeakClasses>(L, 3, TALKTYPE_MONSTER_SAY);
const std::string& text = getString(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
SpectatorVec spectators;
if (target) {
spectators.emplace_back(target);
}
if (position.x != 0) {
pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators, &position));
} else {
pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators));
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDamageMap(lua_State* L)
{
// creature:getDamageMap()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creature->damageMap.size(), 0);
for (const auto& damageEntry : creature->damageMap) {
lua_createtable(L, 0, 2);
setField(L, "total", damageEntry.second.total);
setField(L, "ticks", damageEntry.second.ticks);
lua_rawseti(L, -2, damageEntry.first);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetSummons(lua_State* L)
{
// creature:getSummons()
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, creature->getSummonCount(), 0);
int index = 0;
for (Creature* summon : creature->getSummons()) {
pushUserdata<Creature>(L, summon);
setCreatureMetatable(L, -1, summon);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetDescription(lua_State* L)
{
// creature:getDescription(distance)
int32_t distance = getNumber<int32_t>(L, 2);
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
pushString(L, creature->getDescription(distance));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureGetPathTo(lua_State* L)
{
// creature:getPathTo(pos[, minTargetDist = 0[, maxTargetDist = 1[, fullPathSearch = true[, clearSight = true[, maxSearchDist = 0]]]]])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
const Position& position = getPosition(L, 2);
FindPathParams fpp;
fpp.minTargetDist = getNumber<int32_t>(L, 3, 0);
fpp.maxTargetDist = getNumber<int32_t>(L, 4, 1);
fpp.fullPathSearch = getBoolean(L, 5, fpp.fullPathSearch);
fpp.clearSight = getBoolean(L, 6, fpp.clearSight);
fpp.maxSearchDist = getNumber<int32_t>(L, 7, fpp.maxSearchDist);
std::vector<Direction> dirList;
if (creature->getPathTo(position, dirList, fpp)) {
lua_newtable(L);
int index = 0;
for (auto it = dirList.rbegin(); it != dirList.rend(); ++it) {
lua_pushnumber(L, *it);
lua_rawseti(L, -2, ++index);
}
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaCreatureMove(lua_State* L)
{
// creature:move(direction)
// creature:move(tile[, flags = 0])
Creature* creature = getUserdata<Creature>(L, 1);
if (!creature) {
lua_pushnil(L);
return 1;
}
if (isNumber(L, 2)) {
Direction direction = getNumber<Direction>(L, 2);
if (direction > DIRECTION_LAST) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, g_game.internalMoveCreature(creature, direction, FLAG_NOLIMIT));
} else {
Tile* tile = getUserdata<Tile>(L, 2);
if (!tile) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, g_game.internalMoveCreature(*creature, *tile, getNumber<uint32_t>(L, 3)));
}
return 1;
}
int LuaScriptInterface::luaCreatureGetZone(lua_State* L)
{
// creature:getZone()
Creature* creature = getUserdata<Creature>(L, 1);
if (creature) {
lua_pushnumber(L, creature->getZone());
} else {
lua_pushnil(L);
}
return 1;
}
// Player
int LuaScriptInterface::luaPlayerCreate(lua_State* L)
{
// Player(id or guid or name or userdata)
Player* player;
if (isNumber(L, 2)) {
uint32_t id = getNumber<uint32_t>(L, 2);
if (id >= 0x10000000 && id <= Player::playerAutoID) {
player = g_game.getPlayerByID(id);
} else {
player = g_game.getPlayerByGUID(id);
}
} else if (isString(L, 2)) {
ReturnValue ret = g_game.getPlayerByNameWildcard(getString(L, 2), player);
if (ret != RETURNVALUE_NOERROR) {
lua_pushnil(L);
lua_pushnumber(L, ret);
return 2;
}
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Player) {
lua_pushnil(L);
return 1;
}
player = getUserdata<Player>(L, 2);
} else {
player = nullptr;
}
if (player) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerIsPlayer(lua_State* L)
{
// player:isPlayer()
pushBoolean(L, getUserdata<const Player>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaPlayerGetGuid(lua_State* L)
{
// player:getGuid()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getGUID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetIp(lua_State* L)
{
// player:getIp()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getIP());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetAccountId(lua_State* L)
{
// player:getAccountId()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getAccount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLastLoginSaved(lua_State* L)
{
// player:getLastLoginSaved()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLastLoginSaved());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLastLogout(lua_State* L)
{
// player:getLastLogout()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLastLogout());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetAccountType(lua_State* L)
{
// player:getAccountType()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getAccountType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetAccountType(lua_State* L)
{
// player:setAccountType(accountType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->accountType = getNumber<AccountType_t>(L, 2);
IOLoginData::setAccountType(player->getAccount(), player->accountType);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetCapacity(lua_State* L)
{
// player:getCapacity()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getCapacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetCapacity(lua_State* L)
{
// player:setCapacity(capacity)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->capacity = getNumber<uint32_t>(L, 2);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetFreeCapacity(lua_State* L)
{
// player:getFreeCapacity()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getFreeCapacity());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetDepotChest(lua_State* L)
{
// player:getDepotChest(depotId[, autoCreate = false])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t depotId = getNumber<uint32_t>(L, 2);
bool autoCreate = getBoolean(L, 3, false);
DepotChest* depotChest = player->getDepotChest(depotId, autoCreate);
if (depotChest) {
player->setLastDepotId(depotId); // FIXME: workaround for #2251
pushUserdata<Item>(L, depotChest);
setItemMetatable(L, -1, depotChest);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetInbox(lua_State* L)
{
// player:getInbox()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Inbox* inbox = player->getInbox();
if (inbox) {
pushUserdata<Item>(L, inbox);
setItemMetatable(L, -1, inbox);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkullTime(lua_State* L)
{
// player:getSkullTime()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSkullTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetSkullTime(lua_State* L)
{
// player:setSkullTime(skullTime)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setSkullTicks(getNumber<int64_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetDeathPenalty(lua_State* L)
{
// player:getDeathPenalty()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLostPercent() * 100);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetExperience(lua_State* L)
{
// player:getExperience()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getExperience());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddExperience(lua_State* L)
{
// player:addExperience(experience[, sendText = false])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint64_t experience = getNumber<uint64_t>(L, 2);
bool sendText = getBoolean(L, 3, false);
player->addExperience(nullptr, experience, sendText);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveExperience(lua_State* L)
{
// player:removeExperience(experience[, sendText = false])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint64_t experience = getNumber<uint64_t>(L, 2);
bool sendText = getBoolean(L, 3, false);
player->removeExperience(experience, sendText);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetLevel(lua_State* L)
{
// player:getLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMagicLevel(lua_State* L)
{
// player:getMagicLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMagicLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMagicLevel(lua_State* L)
{
// player:getBaseMagicLevel()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getBaseMagicLevel());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMana(lua_State* L)
{
// player:getMana()
const Player* player = getUserdata<const Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMana(lua_State* L)
{
// player:addMana(manaChange[, animationOnLoss = false])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int32_t manaChange = getNumber<int32_t>(L, 2);
bool animationOnLoss = getBoolean(L, 3, false);
if (!animationOnLoss && manaChange < 0) {
player->changeMana(manaChange);
} else {
CombatDamage damage;
damage.primary.value = manaChange;
damage.origin = ORIGIN_NONE;
g_game.combatChangeMana(nullptr, player, damage);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetMaxMana(lua_State* L)
{
// player:getMaxMana()
const Player* player = getUserdata<const Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMaxMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetMaxMana(lua_State* L)
{
// player:setMaxMana(maxMana)
Player* player = getPlayer(L, 1);
if (player) {
player->manaMax = getNumber<int32_t>(L, 2);
player->mana = std::min<int32_t>(player->mana, player->manaMax);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetManaSpent(lua_State* L)
{
// player:getManaSpent()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSpentMana());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddManaSpent(lua_State* L)
{
// player:addManaSpent(amount)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->addManaSpent(getNumber<uint64_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveManaSpent(lua_State* L)
{
// player:removeManaSpent(amount[, notify = true])
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->removeManaSpent(getNumber<uint64_t>(L, 2), getBoolean(L, 3, true));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMaxHealth(lua_State* L)
{
// player:getBaseMaxHealth()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->healthMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBaseMaxMana(lua_State* L)
{
// player:getBaseMaxMana()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->manaMax);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillLevel(lua_State* L)
{
// player:getSkillLevel(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetEffectiveSkillLevel(lua_State* L)
{
// player:getEffectiveSkillLevel(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->getSkillLevel(skillType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillPercent(lua_State* L)
{
// player:getSkillPercent(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].percent);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSkillTries(lua_State* L)
{
// player:getSkillTries(skillType)
skills_t skillType = getNumber<skills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && skillType <= SKILL_LAST) {
lua_pushnumber(L, player->skills[skillType].tries);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSkillTries(lua_State* L)
{
// player:addSkillTries(skillType, tries)
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
player->addSkillAdvance(skillType, tries);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveSkillTries(lua_State* L)
{
// player:removeSkillTries(skillType, tries[, notify = true])
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
player->removeSkillTries(skillType, tries, getBoolean(L, 4, true));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSpecialSkill(lua_State* L)
{
// player:getSpecialSkill(specialSkillType)
SpecialSkills_t specialSkillType = getNumber<SpecialSkills_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player && specialSkillType <= SPECIALSKILL_LAST) {
lua_pushnumber(L, player->getSpecialSkill(specialSkillType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSpecialSkill(lua_State* L)
{
// player:addSpecialSkill(specialSkillType, value)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
SpecialSkills_t specialSkillType = getNumber<SpecialSkills_t>(L, 2);
if (specialSkillType > SPECIALSKILL_LAST) {
lua_pushnil(L);
return 1;
}
player->setVarSpecialSkill(specialSkillType, getNumber<int32_t>(L, 3));
player->sendSkills();
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerAddOfflineTrainingTime(lua_State* L)
{
// player:addOfflineTrainingTime(time)
Player* player = getUserdata<Player>(L, 1);
if (player) {
int32_t time = getNumber<int32_t>(L, 2);
player->addOfflineTrainingTime(time);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetOfflineTrainingTime(lua_State* L)
{
// player:getOfflineTrainingTime()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getOfflineTrainingTime());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime(lua_State* L)
{
// player:removeOfflineTrainingTime(time)
Player* player = getUserdata<Player>(L, 1);
if (player) {
int32_t time = getNumber<int32_t>(L, 2);
player->removeOfflineTrainingTime(time);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOfflineTrainingTries(lua_State* L)
{
// player:addOfflineTrainingTries(skillType, tries)
Player* player = getUserdata<Player>(L, 1);
if (player) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint64_t tries = getNumber<uint64_t>(L, 3);
pushBoolean(L, player->addOfflineTrainingTries(skillType, tries));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetOfflineTrainingSkill(lua_State* L)
{
// player:getOfflineTrainingSkill()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getOfflineTrainingSkill());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetOfflineTrainingSkill(lua_State* L)
{
// player:setOfflineTrainingSkill(skillId)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint32_t skillId = getNumber<uint32_t>(L, 2);
player->setOfflineTrainingSkill(skillId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetItemCount(lua_State* L)
{
// player:getItemCount(itemId[, subType = -1])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t subType = getNumber<int32_t>(L, 3, -1);
lua_pushnumber(L, player->getItemTypeCount(itemId, subType));
return 1;
}
int LuaScriptInterface::luaPlayerGetItemById(lua_State* L)
{
// player:getItemById(itemId, deepSearch[, subType = -1])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
bool deepSearch = getBoolean(L, 3);
int32_t subType = getNumber<int32_t>(L, 4, -1);
Item* item = g_game.findItemOfType(player, itemId, deepSearch, subType);
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetVocation(lua_State* L)
{
// player:getVocation()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Vocation>(L, player->getVocation());
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetVocation(lua_State* L)
{
// player:setVocation(id or name or userdata)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Vocation* vocation;
if (isNumber(L, 2)) {
vocation = g_vocations.getVocation(getNumber<uint16_t>(L, 2));
} else if (isString(L, 2)) {
vocation = g_vocations.getVocation(g_vocations.getVocationId(getString(L, 2)));
} else if (isUserdata(L, 2)) {
vocation = getUserdata<Vocation>(L, 2);
} else {
vocation = nullptr;
}
if (!vocation) {
pushBoolean(L, false);
return 1;
}
player->setVocation(vocation->getId());
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetSex(lua_State* L)
{
// player:getSex()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSex());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetSex(lua_State* L)
{
// player:setSex(newSex)
Player* player = getUserdata<Player>(L, 1);
if (player) {
PlayerSex_t newSex = getNumber<PlayerSex_t>(L, 2);
player->setSex(newSex);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetTown(lua_State* L)
{
// player:getTown()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Town>(L, player->getTown());
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetTown(lua_State* L)
{
// player:setTown(town)
Town* town = getUserdata<Town>(L, 2);
if (!town) {
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setTown(town);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGuild(lua_State* L)
{
// player:getGuild()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Guild* guild = player->getGuild();
if (!guild) {
lua_pushnil(L);
return 1;
}
pushUserdata<Guild>(L, guild);
setMetatable(L, -1, "Guild");
return 1;
}
int LuaScriptInterface::luaPlayerSetGuild(lua_State* L)
{
// player:setGuild(guild)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
player->setGuild(getUserdata<Guild>(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetGuildLevel(lua_State* L)
{
// player:getGuildLevel()
Player* player = getUserdata<Player>(L, 1);
if (player && player->getGuild()) {
lua_pushnumber(L, player->getGuildRank()->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGuildLevel(lua_State* L)
{
// player:setGuildLevel(level)
uint8_t level = getNumber<uint8_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (!player || !player->getGuild()) {
lua_pushnil(L);
return 1;
}
GuildRank_ptr rank = player->getGuild()->getRankByLevel(level);
if (!rank) {
pushBoolean(L, false);
} else {
player->setGuildRank(rank);
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGuildNick(lua_State* L)
{
// player:getGuildNick()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushString(L, player->getGuildNick());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGuildNick(lua_State* L)
{
// player:setGuildNick(nick)
const std::string& nick = getString(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setGuildNick(nick);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetGroup(lua_State* L)
{
// player:getGroup()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushUserdata<Group>(L, player->getGroup());
setMetatable(L, -1, "Group");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetGroup(lua_State* L)
{
// player:setGroup(group)
Group* group = getUserdata<Group>(L, 2);
if (!group) {
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->setGroup(group);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetStamina(lua_State* L)
{
// player:getStamina()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getStaminaMinutes());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetStamina(lua_State* L)
{
// player:setStamina(stamina)
uint16_t stamina = getNumber<uint16_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->staminaMinutes = std::min<uint16_t>(2520, stamina);
player->sendStats();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSoul(lua_State* L)
{
// player:getSoul()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getSoul());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddSoul(lua_State* L)
{
// player:addSoul(soulChange)
int32_t soulChange = getNumber<int32_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->changeSoul(soulChange);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetMaxSoul(lua_State* L)
{
// player:getMaxSoul()
Player* player = getUserdata<Player>(L, 1);
if (player && player->vocation) {
lua_pushnumber(L, player->vocation->getSoulMax());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetBankBalance(lua_State* L)
{
// player:getBankBalance()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getBankBalance());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetBankBalance(lua_State* L)
{
// player:setBankBalance(bankBalance)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int64_t balance = getNumber<int64_t>(L, 2);
if (balance < 0) {
reportErrorFunc(L, "Invalid bank balance value.");
lua_pushnil(L);
return 1;
}
player->setBankBalance(balance);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetStorageValue(lua_State* L)
{
// player:getStorageValue(key)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t key = getNumber<uint32_t>(L, 2);
int32_t value;
if (player->getStorageValue(key, value)) {
lua_pushnumber(L, value);
} else {
lua_pushnumber(L, -1);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetStorageValue(lua_State* L)
{
// player:setStorageValue(key, value)
int32_t value = getNumber<int32_t>(L, 3);
uint32_t key = getNumber<uint32_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) {
reportErrorFunc(L, fmt::format("Accessing reserved range: {:d}", key));
pushBoolean(L, false);
return 1;
}
if (player) {
player->addStorageValue(key, value);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddItem(lua_State* L)
{
// player:addItem(itemId[, count = 1[, canDropOnMap = true[, subType = 1[, slot = CONST_SLOT_WHEREEVER]]]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
pushBoolean(L, false);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
int32_t count = getNumber<int32_t>(L, 3, 1);
int32_t subType = getNumber<int32_t>(L, 5, 1);
const ItemType& it = Item::items[itemId];
int32_t itemCount = 1;
int parameters = lua_gettop(L);
if (parameters >= 4) {
itemCount = std::max<int32_t>(1, count);
} else if (it.hasSubType()) {
if (it.stackable) {
itemCount = std::ceil(count / 100.f);
}
subType = count;
} else {
itemCount = std::max<int32_t>(1, count);
}
bool hasTable = itemCount > 1;
if (hasTable) {
lua_newtable(L);
} else if (itemCount == 0) {
lua_pushnil(L);
return 1;
}
bool canDropOnMap = getBoolean(L, 4, true);
slots_t slot = getNumber<slots_t>(L, 6, CONST_SLOT_WHEREEVER);
for (int32_t i = 1; i <= itemCount; ++i) {
int32_t stackCount = subType;
if (it.stackable) {
stackCount = std::min<int32_t>(stackCount, 100);
subType -= stackCount;
}
Item* item = Item::CreateItem(itemId, stackCount);
if (!item) {
if (!hasTable) {
lua_pushnil(L);
}
return 1;
}
ReturnValue ret = g_game.internalPlayerAddItem(player, item, canDropOnMap, slot);
if (ret != RETURNVALUE_NOERROR) {
delete item;
if (!hasTable) {
lua_pushnil(L);
}
return 1;
}
if (hasTable) {
lua_pushnumber(L, i);
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_settable(L, -3);
} else {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
}
}
return 1;
}
int LuaScriptInterface::luaPlayerAddItemEx(lua_State* L)
{
// player:addItemEx(item[, canDropOnMap = false[, index = INDEX_WHEREEVER[, flags = 0]]])
// player:addItemEx(item[, canDropOnMap = true[, slot = CONST_SLOT_WHEREEVER]])
Item* item = getUserdata<Item>(L, 2);
if (!item) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
if (item->getParent() != VirtualCylinder::virtualCylinder) {
reportErrorFunc(L, "Item already has a parent");
pushBoolean(L, false);
return 1;
}
bool canDropOnMap = getBoolean(L, 3, false);
ReturnValue returnValue;
if (canDropOnMap) {
slots_t slot = getNumber<slots_t>(L, 4, CONST_SLOT_WHEREEVER);
returnValue = g_game.internalPlayerAddItem(player, item, true, slot);
} else {
int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER);
uint32_t flags = getNumber<uint32_t>(L, 5, 0);
returnValue = g_game.internalAddItem(player, item, index, flags);
}
if (returnValue == RETURNVALUE_NOERROR) {
ScriptEnvironment::removeTempItem(item);
}
lua_pushnumber(L, returnValue);
return 1;
}
int LuaScriptInterface::luaPlayerRemoveItem(lua_State* L)
{
// player:removeItem(itemId, count[, subType = -1[, ignoreEquipped = false]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t itemId;
if (isNumber(L, 2)) {
itemId = getNumber<uint16_t>(L, 2);
} else {
itemId = Item::items.getItemIdByName(getString(L, 2));
if (itemId == 0) {
lua_pushnil(L);
return 1;
}
}
uint32_t count = getNumber<uint32_t>(L, 3);
int32_t subType = getNumber<int32_t>(L, 4, -1);
bool ignoreEquipped = getBoolean(L, 5, false);
pushBoolean(L, player->removeItemOfType(itemId, count, subType, ignoreEquipped));
return 1;
}
int LuaScriptInterface::luaPlayerGetMoney(lua_State* L)
{
// player:getMoney()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getMoney());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMoney(lua_State* L)
{
// player:addMoney(money)
uint64_t money = getNumber<uint64_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
g_game.addMoney(player, money);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveMoney(lua_State* L)
{
// player:removeMoney(money)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint64_t money = getNumber<uint64_t>(L, 2);
pushBoolean(L, g_game.removeMoney(player, money));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerShowTextDialog(lua_State* L)
{
// player:showTextDialog(id or name or userdata[, text[, canWrite[, length]]])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int32_t length = getNumber<int32_t>(L, 5, -1);
bool canWrite = getBoolean(L, 4, false);
std::string text;
int parameters = lua_gettop(L);
if (parameters >= 3) {
text = getString(L, 3);
}
Item* item;
if (isNumber(L, 2)) {
item = Item::CreateItem(getNumber<uint16_t>(L, 2));
} else if (isString(L, 2)) {
item = Item::CreateItem(Item::items.getItemIdByName(getString(L, 2)));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Item) {
pushBoolean(L, false);
return 1;
}
item = getUserdata<Item>(L, 2);
} else {
item = nullptr;
}
if (!item) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
if (length < 0) {
length = Item::items[item->getID()].maxTextLen;
}
if (!text.empty()) {
item->setText(text);
length = std::max<int32_t>(text.size(), length);
}
item->setParent(player);
player->setWriteItem(item, length);
player->sendTextWindow(item, length, canWrite);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSendTextMessage(lua_State* L)
{
// player:sendTextMessage(type, text[, position, primaryValue = 0, primaryColor = TEXTCOLOR_NONE[, secondaryValue = 0, secondaryColor = TEXTCOLOR_NONE]])
// player:sendTextMessage(type, text, channelId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
int parameters = lua_gettop(L);
TextMessage message(getNumber<MessageClasses>(L, 2), getString(L, 3));
if (parameters == 4) {
uint16_t channelId = getNumber<uint16_t>(L, 4);
ChatChannel* channel = g_chat->getChannel(*player, channelId);
if (!channel || !channel->hasUser(*player)) {
pushBoolean(L, false);
return 1;
}
message.channelId = channelId;
} else {
if (parameters >= 6) {
message.position = getPosition(L, 4);
message.primary.value = getNumber<int32_t>(L, 5);
message.primary.color = getNumber<TextColor_t>(L, 6);
}
if (parameters >= 8) {
message.secondary.value = getNumber<int32_t>(L, 7);
message.secondary.color = getNumber<TextColor_t>(L, 8);
}
}
player->sendTextMessage(message);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSendChannelMessage(lua_State* L)
{
// player:sendChannelMessage(author, text, type, channelId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint16_t channelId = getNumber<uint16_t>(L, 5);
SpeakClasses type = getNumber<SpeakClasses>(L, 4);
const std::string& text = getString(L, 3);
const std::string& author = getString(L, 2);
player->sendChannelMessage(author, text, type, channelId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSendPrivateMessage(lua_State* L)
{
// player:sendPrivateMessage(speaker, text[, type])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
const Player* speaker = getUserdata<const Player>(L, 2);
const std::string& text = getString(L, 3);
SpeakClasses type = getNumber<SpeakClasses>(L, 4, TALKTYPE_PRIVATE_FROM);
player->sendPrivateMessage(speaker, type, text);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerChannelSay(lua_State* L)
{
// player:channelSay(speaker, type, text, channelId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Creature* speaker = getCreature(L, 2);
SpeakClasses type = getNumber<SpeakClasses>(L, 3);
const std::string& text = getString(L, 4);
uint16_t channelId = getNumber<uint16_t>(L, 5);
player->sendToChannel(speaker, type, text, channelId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerOpenChannel(lua_State* L)
{
// player:openChannel(channelId)
uint16_t channelId = getNumber<uint16_t>(L, 2);
Player* player = getUserdata<Player>(L, 1);
if (player) {
g_game.playerOpenChannel(player->getID(), channelId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetSlotItem(lua_State* L)
{
// player:getSlotItem(slot)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint32_t slot = getNumber<uint32_t>(L, 2);
Thing* thing = player->getThing(slot);
if (!thing) {
lua_pushnil(L);
return 1;
}
Item* item = thing->getItem();
if (item) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetParty(lua_State* L)
{
// player:getParty()
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Party* party = player->getParty();
if (party) {
pushUserdata<Party>(L, party);
setMetatable(L, -1, "Party");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOutfit(lua_State* L)
{
// player:addOutfit(lookType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->addOutfit(getNumber<uint16_t>(L, 2), 0);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddOutfitAddon(lua_State* L)
{
// player:addOutfitAddon(lookType, addon)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3);
player->addOutfit(lookType, addon);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOutfit(lua_State* L)
{
// player:removeOutfit(lookType)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
pushBoolean(L, player->removeOutfit(lookType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerRemoveOutfitAddon(lua_State* L)
{
// player:removeOutfitAddon(lookType, addon)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3);
pushBoolean(L, player->removeOutfitAddon(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasOutfit(lua_State* L)
{
// player:hasOutfit(lookType[, addon = 0])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3, 0);
pushBoolean(L, player->hasOutfit(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerCanWearOutfit(lua_State* L)
{
// player:canWearOutfit(lookType[, addon = 0])
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint16_t lookType = getNumber<uint16_t>(L, 2);
uint8_t addon = getNumber<uint8_t>(L, 3, 0);
pushBoolean(L, player->canWear(lookType, addon));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendOutfitWindow(lua_State* L)
{
// player:sendOutfitWindow()
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->sendOutfitWindow();
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMount(lua_State* L) {
// player:addMount(mountId or mountName)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t mountId;
if (isNumber(L, 2)) {
mountId = getNumber<uint8_t>(L, 2);
} else {
Mount* mount = g_game.mounts.getMountByName(getString(L, 2));
if (!mount) {
lua_pushnil(L);
return 1;
}
mountId = mount->id;
}
pushBoolean(L, player->tameMount(mountId));
return 1;
}
int LuaScriptInterface::luaPlayerRemoveMount(lua_State* L) {
// player:removeMount(mountId or mountName)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t mountId;
if (isNumber(L, 2)) {
mountId = getNumber<uint8_t>(L, 2);
} else {
Mount* mount = g_game.mounts.getMountByName(getString(L, 2));
if (!mount) {
lua_pushnil(L);
return 1;
}
mountId = mount->id;
}
pushBoolean(L, player->untameMount(mountId));
return 1;
}
int LuaScriptInterface::luaPlayerHasMount(lua_State* L) {
// player:hasMount(mountId or mountName)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Mount* mount = nullptr;
if (isNumber(L, 2)) {
mount = g_game.mounts.getMountByID(getNumber<uint8_t>(L, 2));
} else {
mount = g_game.mounts.getMountByName(getString(L, 2));
}
if (mount) {
pushBoolean(L, player->hasMount(mount));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetPremiumEndsAt(lua_State* L)
{
// player:getPremiumEndsAt()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->premiumEndsAt);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSetPremiumEndsAt(lua_State* L)
{
// player:setPremiumEndsAt(timestamp)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
time_t timestamp = getNumber<time_t>(L, 2);
player->setPremiumTime(timestamp);
IOLoginData::updatePremiumTime(player->getAccount(), timestamp);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerHasBlessing(lua_State* L)
{
// player:hasBlessing(blessing)
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->hasBlessing(blessing));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddBlessing(lua_State* L)
{
// player:addBlessing(blessing)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
if (player->hasBlessing(blessing)) {
pushBoolean(L, false);
return 1;
}
player->addBlessing(blessing);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerRemoveBlessing(lua_State* L)
{
// player:removeBlessing(blessing)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
uint8_t blessing = getNumber<uint8_t>(L, 2) - 1;
if (!player->hasBlessing(blessing)) {
pushBoolean(L, false);
return 1;
}
player->removeBlessing(blessing);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerCanLearnSpell(lua_State* L)
{
// player:canLearnSpell(spellName)
const Player* player = getUserdata<const Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
const std::string& spellName = getString(L, 2);
InstantSpell* spell = g_spells->getInstantSpellByName(spellName);
if (!spell) {
reportErrorFunc(L, "Spell \"" + spellName + "\" not found");
pushBoolean(L, false);
return 1;
}
if (player->hasFlag(PlayerFlag_IgnoreSpellCheck)) {
pushBoolean(L, true);
return 1;
}
const auto& vocMap = spell->getVocMap();
if (vocMap.count(player->getVocationId()) == 0) {
pushBoolean(L, false);
} else if (player->getLevel() < spell->getLevel()) {
pushBoolean(L, false);
} else if (player->getMagicLevel() < spell->getMagicLevel()) {
pushBoolean(L, false);
} else {
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaPlayerLearnSpell(lua_State* L)
{
// player:learnSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
player->learnInstantSpell(spellName);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerForgetSpell(lua_State* L)
{
// player:forgetSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
player->forgetInstantSpell(spellName);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasLearnedSpell(lua_State* L)
{
// player:hasLearnedSpell(spellName)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& spellName = getString(L, 2);
pushBoolean(L, player->hasLearnedInstantSpell(spellName));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendTutorial(lua_State* L)
{
// player:sendTutorial(tutorialId)
Player* player = getUserdata<Player>(L, 1);
if (player) {
uint8_t tutorialId = getNumber<uint8_t>(L, 2);
player->sendTutorial(tutorialId);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerAddMapMark(lua_State* L)
{
// player:addMapMark(position, type, description)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const Position& position = getPosition(L, 2);
uint8_t type = getNumber<uint8_t>(L, 3);
const std::string& description = getString(L, 4);
player->sendAddMarker(position, type, description);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSave(lua_State* L)
{
// player:save()
Player* player = getUserdata<Player>(L, 1);
if (player) {
player->loginPosition = player->getPosition();
pushBoolean(L, IOLoginData::savePlayer(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerPopupFYI(lua_State* L)
{
// player:popupFYI(message)
Player* player = getUserdata<Player>(L, 1);
if (player) {
const std::string& message = getString(L, 2);
player->sendFYIBox(message);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerIsPzLocked(lua_State* L)
{
// player:isPzLocked()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->isPzLocked());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetClient(lua_State* L)
{
// player:getClient()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_createtable(L, 0, 2);
setField(L, "version", player->getProtocolVersion());
setField(L, "os", player->getOperatingSystem());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetHouse(lua_State* L)
{
// player:getHouse()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = g_game.map.houses.getHouseByPlayerId(player->getGUID());
if (house) {
pushUserdata<House>(L, house);
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerSendHouseWindow(lua_State* L)
{
// player:sendHouseWindow(house, listId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = getUserdata<House>(L, 2);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 3);
player->sendHouseWindow(house, listId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSetEditHouse(lua_State* L)
{
// player:setEditHouse(house, listId)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
House* house = getUserdata<House>(L, 2);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 3);
player->setEditHouse(house, listId);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerSetGhostMode(lua_State* L)
{
// player:setGhostMode(enabled[, showEffect=true])
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
bool enabled = getBoolean(L, 2);
if (player->isInGhostMode() == enabled) {
pushBoolean(L, true);
return 1;
}
bool showEffect = getBoolean(L, 3, true);
player->switchGhostMode();
Tile* tile = player->getTile();
const Position& position = player->getPosition();
const bool isInvisible = player->isInvisible();
SpectatorVec spectators;
g_game.map.getSpectators(spectators, position, true, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer != player && !tmpPlayer->isAccessPlayer()) {
if (enabled) {
tmpPlayer->sendRemoveTileCreature(player, position, tile->getClientIndexOfCreature(tmpPlayer, player));
} else {
tmpPlayer->sendCreatureAppear(player, position, showEffect);
}
} else {
if (isInvisible) {
continue;
}
tmpPlayer->sendCreatureChangeVisible(player, !enabled);
}
}
if (player->isInGhostMode()) {
for (const auto& it : g_game.getPlayers()) {
if (!it.second->isAccessPlayer()) {
it.second->notifyStatusChange(player, VIPSTATUS_OFFLINE);
}
}
IOLoginData::updateOnlineStatus(player->getGUID(), false);
} else {
for (const auto& it : g_game.getPlayers()) {
if (!it.second->isAccessPlayer()) {
it.second->notifyStatusChange(player, VIPSTATUS_ONLINE);
}
}
IOLoginData::updateOnlineStatus(player->getGUID(), true);
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerId(lua_State* L)
{
// player:getContainerId(container)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* container = getUserdata<Container>(L, 2);
if (container) {
lua_pushnumber(L, player->getContainerID(container));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerById(lua_State* L)
{
// player:getContainerById(id)
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* container = player->getContainerByID(getNumber<uint8_t>(L, 2));
if (container) {
pushUserdata<Container>(L, container);
setMetatable(L, -1, "Container");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetContainerIndex(lua_State* L)
{
// player:getContainerIndex(id)
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->getContainerIndex(getNumber<uint8_t>(L, 2)));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetInstantSpells(lua_State* L)
{
// player:getInstantSpells()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
std::vector<const InstantSpell*> spells;
for (auto& spell : g_spells->getInstantSpells()) {
if (spell.second.canCast(player)) {
spells.push_back(&spell.second);
}
}
lua_createtable(L, spells.size(), 0);
int index = 0;
for (auto spell : spells) {
pushInstantSpell(L, *spell);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaPlayerCanCast(lua_State* L)
{
// player:canCast(spell)
Player* player = getUserdata<Player>(L, 1);
InstantSpell* spell = getUserdata<InstantSpell>(L, 2);
if (player && spell) {
pushBoolean(L, spell->canCast(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasChaseMode(lua_State* L)
{
// player:hasChaseMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->chaseMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerHasSecureMode(lua_State* L)
{
// player:hasSecureMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
pushBoolean(L, player->secureMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetFightMode(lua_State* L)
{
// player:getFightMode()
Player* player = getUserdata<Player>(L, 1);
if (player) {
lua_pushnumber(L, player->fightMode);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPlayerGetStoreInbox(lua_State* L)
{
// player:getStoreInbox()
Player* player = getUserdata<Player>(L, 1);
if (!player) {
lua_pushnil(L);
return 1;
}
Container* storeInbox = player->getStoreInbox();
if (!storeInbox) {
lua_pushnil(L);
return 1;
}
pushUserdata<Container>(L, storeInbox);
setMetatable(L, -1, "Container");
return 1;
}
// Monster
int LuaScriptInterface::luaMonsterCreate(lua_State* L)
{
// Monster(id or userdata)
Monster* monster;
if (isNumber(L, 2)) {
monster = g_game.getMonsterByID(getNumber<uint32_t>(L, 2));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Monster) {
lua_pushnil(L);
return 1;
}
monster = getUserdata<Monster>(L, 2);
} else {
monster = nullptr;
}
if (monster) {
pushUserdata<Monster>(L, monster);
setMetatable(L, -1, "Monster");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsMonster(lua_State* L)
{
// monster:isMonster()
pushBoolean(L, getUserdata<const Monster>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaMonsterGetType(lua_State* L)
{
// monster:getType()
const Monster* monster = getUserdata<const Monster>(L, 1);
if (monster) {
pushUserdata<MonsterType>(L, monster->mType);
setMetatable(L, -1, "MonsterType");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterRename(lua_State* L)
{
// monster:rename(name[, nameDescription])
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
monster->setName(getString(L, 2));
if (lua_gettop(L) >= 3) {
monster->setNameDescription(getString(L, 3));
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterGetSpawnPosition(lua_State* L)
{
// monster:getSpawnPosition()
const Monster* monster = getUserdata<const Monster>(L, 1);
if (monster) {
pushPosition(L, monster->getMasterPos());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsInSpawnRange(lua_State* L)
{
// monster:isInSpawnRange([position])
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->isInSpawnRange(lua_gettop(L) >= 2 ? getPosition(L, 2) : monster->getPosition()));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsIdle(lua_State* L)
{
// monster:isIdle()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->getIdleStatus());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSetIdle(lua_State* L)
{
// monster:setIdle(idle)
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
monster->setIdle(getBoolean(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterIsTarget(lua_State* L)
{
// monster:isTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, monster->isTarget(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsOpponent(lua_State* L)
{
// monster:isOpponent(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, monster->isOpponent(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsFriend(lua_State* L)
{
// monster:isFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
const Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, monster->isFriend(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterAddFriend(lua_State* L)
{
// monster:addFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
monster->addFriend(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterRemoveFriend(lua_State* L)
{
// monster:removeFriend(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
monster->removeFriend(creature);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetFriendList(lua_State* L)
{
// monster:getFriendList()
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
const auto& friendList = monster->getFriendList();
lua_createtable(L, friendList.size(), 0);
int index = 0;
for (Creature* creature : friendList) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetFriendCount(lua_State* L)
{
// monster:getFriendCount()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
lua_pushnumber(L, monster->getFriendList().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterAddTarget(lua_State* L)
{
// monster:addTarget(creature[, pushFront = false])
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
bool pushFront = getBoolean(L, 3, false);
monster->addTarget(creature, pushFront);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterRemoveTarget(lua_State* L)
{
// monster:removeTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
monster->removeTarget(creature);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMonsterGetTargetList(lua_State* L)
{
// monster:getTargetList()
Monster* monster = getUserdata<Monster>(L, 1);
if (!monster) {
lua_pushnil(L);
return 1;
}
const auto& targetList = monster->getTargetList();
lua_createtable(L, targetList.size(), 0);
int index = 0;
for (Creature* creature : targetList) {
pushUserdata<Creature>(L, creature);
setCreatureMetatable(L, -1, creature);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterGetTargetCount(lua_State* L)
{
// monster:getTargetCount()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
lua_pushnumber(L, monster->getTargetList().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSelectTarget(lua_State* L)
{
// monster:selectTarget(creature)
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
Creature* creature = getCreature(L, 2);
if (!creature) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
pushBoolean(L, monster->selectTarget(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSearchTarget(lua_State* L)
{
// monster:searchTarget([searchType = TARGETSEARCH_DEFAULT])
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
TargetSearchType_t searchType = getNumber<TargetSearchType_t>(L, 2, TARGETSEARCH_DEFAULT);
pushBoolean(L, monster->searchTarget(searchType));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterIsWalkingToSpawn(lua_State* L)
{
// monster:isWalkingToSpawn()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->isWalkingToSpawn());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterWalkToSpawn(lua_State* L)
{
// monster:walkToSpawn()
Monster* monster = getUserdata<Monster>(L, 1);
if (monster) {
pushBoolean(L, monster->walkToSpawn());
} else {
lua_pushnil(L);
}
return 1;
}
// Npc
int LuaScriptInterface::luaNpcCreate(lua_State* L)
{
// Npc([id or name or userdata])
Npc* npc;
if (lua_gettop(L) >= 2) {
if (isNumber(L, 2)) {
npc = g_game.getNpcByID(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
npc = g_game.getNpcByName(getString(L, 2));
} else if (isUserdata(L, 2)) {
if (getUserdataType(L, 2) != LuaData_Npc) {
lua_pushnil(L);
return 1;
}
npc = getUserdata<Npc>(L, 2);
} else {
npc = nullptr;
}
} else {
npc = getScriptEnv()->getNpc();
}
if (npc) {
pushUserdata<Npc>(L, npc);
setMetatable(L, -1, "Npc");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNpcIsNpc(lua_State* L)
{
// npc:isNpc()
pushBoolean(L, getUserdata<const Npc>(L, 1) != nullptr);
return 1;
}
int LuaScriptInterface::luaNpcSetMasterPos(lua_State* L)
{
// npc:setMasterPos(pos[, radius])
Npc* npc = getUserdata<Npc>(L, 1);
if (!npc) {
lua_pushnil(L);
return 1;
}
const Position& pos = getPosition(L, 2);
int32_t radius = getNumber<int32_t>(L, 3, 1);
npc->setMasterPos(pos, radius);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaNpcGetSpeechBubble(lua_State* L)
{
// npc:getSpeechBubble()
Npc* npc = getUserdata<Npc>(L, 1);
if (npc) {
lua_pushnumber(L, npc->getSpeechBubble());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaNpcSetSpeechBubble(lua_State* L)
{
// npc:setSpeechBubble(speechBubble)
Npc* npc = getUserdata<Npc>(L, 1);
if (npc) {
npc->setSpeechBubble(getNumber<uint8_t>(L, 2));
}
return 0;
}
// Guild
int LuaScriptInterface::luaGuildCreate(lua_State* L)
{
// Guild(id)
uint32_t id = getNumber<uint32_t>(L, 2);
Guild* guild = g_game.getGuild(id);
if (guild) {
pushUserdata<Guild>(L, guild);
setMetatable(L, -1, "Guild");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetId(lua_State* L)
{
// guild:getId()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
lua_pushnumber(L, guild->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetName(lua_State* L)
{
// guild:getName()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
pushString(L, guild->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetMembersOnline(lua_State* L)
{
// guild:getMembersOnline()
const Guild* guild = getUserdata<const Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
const auto& members = guild->getMembersOnline();
lua_createtable(L, members.size(), 0);
int index = 0;
for (Player* player : members) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaGuildAddRank(lua_State* L)
{
// guild:addRank(id, name, level)
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
uint32_t id = getNumber<uint32_t>(L, 2);
const std::string& name = getString(L, 3);
uint8_t level = getNumber<uint8_t>(L, 4);
guild->addRank(id, name, level);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetRankById(lua_State* L)
{
// guild:getRankById(id)
Guild* guild = getUserdata<Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
uint32_t id = getNumber<uint32_t>(L, 2);
GuildRank_ptr rank = guild->getRankById(id);
if (rank) {
lua_createtable(L, 0, 3);
setField(L, "id", rank->id);
setField(L, "name", rank->name);
setField(L, "level", rank->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetRankByLevel(lua_State* L)
{
// guild:getRankByLevel(level)
const Guild* guild = getUserdata<const Guild>(L, 1);
if (!guild) {
lua_pushnil(L);
return 1;
}
uint8_t level = getNumber<uint8_t>(L, 2);
GuildRank_ptr rank = guild->getRankByLevel(level);
if (rank) {
lua_createtable(L, 0, 3);
setField(L, "id", rank->id);
setField(L, "name", rank->name);
setField(L, "level", rank->level);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildGetMotd(lua_State* L)
{
// guild:getMotd()
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
pushString(L, guild->getMotd());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGuildSetMotd(lua_State* L)
{
// guild:setMotd(motd)
const std::string& motd = getString(L, 2);
Guild* guild = getUserdata<Guild>(L, 1);
if (guild) {
guild->setMotd(motd);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Group
int LuaScriptInterface::luaGroupCreate(lua_State* L)
{
// Group(id)
uint32_t id = getNumber<uint32_t>(L, 2);
Group* group = g_game.groups.getGroup(id);
if (group) {
pushUserdata<Group>(L, group);
setMetatable(L, -1, "Group");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetId(lua_State* L)
{
// group:getId()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetName(lua_State* L)
{
// group:getName()
Group* group = getUserdata<Group>(L, 1);
if (group) {
pushString(L, group->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetFlags(lua_State* L)
{
// group:getFlags()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->flags);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetAccess(lua_State* L)
{
// group:getAccess()
Group* group = getUserdata<Group>(L, 1);
if (group) {
pushBoolean(L, group->access);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetMaxDepotItems(lua_State* L)
{
// group:getMaxDepotItems()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->maxDepotItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupGetMaxVipEntries(lua_State* L)
{
// group:getMaxVipEntries()
Group* group = getUserdata<Group>(L, 1);
if (group) {
lua_pushnumber(L, group->maxVipEntries);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGroupHasFlag(lua_State* L)
{
// group:hasFlag(flag)
Group* group = getUserdata<Group>(L, 1);
if (group) {
PlayerFlags flag = getNumber<PlayerFlags>(L, 2);
pushBoolean(L, (group->flags & flag) != 0);
} else {
lua_pushnil(L);
}
return 1;
}
// Vocation
int LuaScriptInterface::luaVocationCreate(lua_State* L)
{
// Vocation(id or name)
uint32_t id;
if (isNumber(L, 2)) {
id = getNumber<uint32_t>(L, 2);
} else {
id = g_vocations.getVocationId(getString(L, 2));
}
Vocation* vocation = g_vocations.getVocation(id);
if (vocation) {
pushUserdata<Vocation>(L, vocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetId(lua_State* L)
{
// vocation:getId()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetClientId(lua_State* L)
{
// vocation:getClientId()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getClientId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetName(lua_State* L)
{
// vocation:getName()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushString(L, vocation->getVocName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetDescription(lua_State* L)
{
// vocation:getDescription()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushString(L, vocation->getVocDescription());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetRequiredSkillTries(lua_State* L)
{
// vocation:getRequiredSkillTries(skillType, skillLevel)
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
skills_t skillType = getNumber<skills_t>(L, 2);
uint16_t skillLevel = getNumber<uint16_t>(L, 3);
lua_pushnumber(L, vocation->getReqSkillTries(skillType, skillLevel));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetRequiredManaSpent(lua_State* L)
{
// vocation:getRequiredManaSpent(magicLevel)
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
uint32_t magicLevel = getNumber<uint32_t>(L, 2);
lua_pushnumber(L, vocation->getReqMana(magicLevel));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetCapacityGain(lua_State* L)
{
// vocation:getCapacityGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getCapGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGain(lua_State* L)
{
// vocation:getHealthGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHPGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGainTicks(lua_State* L)
{
// vocation:getHealthGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHealthGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetHealthGainAmount(lua_State* L)
{
// vocation:getHealthGainAmount()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getHealthGainAmount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGain(lua_State* L)
{
// vocation:getManaGain()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGain());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGainTicks(lua_State* L)
{
// vocation:getManaGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetManaGainAmount(lua_State* L)
{
// vocation:getManaGainAmount()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getManaGainAmount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetMaxSoul(lua_State* L)
{
// vocation:getMaxSoul()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getSoulMax());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetSoulGainTicks(lua_State* L)
{
// vocation:getSoulGainTicks()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getSoulGainTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetAttackSpeed(lua_State* L)
{
// vocation:getAttackSpeed()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getAttackSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetBaseSpeed(lua_State* L)
{
// vocation:getBaseSpeed()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
lua_pushnumber(L, vocation->getBaseSpeed());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetDemotion(lua_State* L)
{
// vocation:getDemotion()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (!vocation) {
lua_pushnil(L);
return 1;
}
uint16_t fromId = vocation->getFromVocation();
if (fromId == VOCATION_NONE) {
lua_pushnil(L);
return 1;
}
Vocation* demotedVocation = g_vocations.getVocation(fromId);
if (demotedVocation && demotedVocation != vocation) {
pushUserdata<Vocation>(L, demotedVocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationGetPromotion(lua_State* L)
{
// vocation:getPromotion()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (!vocation) {
lua_pushnil(L);
return 1;
}
uint16_t promotedId = g_vocations.getPromotedVocation(vocation->getId());
if (promotedId == VOCATION_NONE) {
lua_pushnil(L);
return 1;
}
Vocation* promotedVocation = g_vocations.getVocation(promotedId);
if (promotedVocation && promotedVocation != vocation) {
pushUserdata<Vocation>(L, promotedVocation);
setMetatable(L, -1, "Vocation");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaVocationAllowsPvp(lua_State* L)
{
// vocation:allowsPvp()
Vocation* vocation = getUserdata<Vocation>(L, 1);
if (vocation) {
pushBoolean(L, vocation->allowsPvp());
} else {
lua_pushnil(L);
}
return 1;
}
// Town
int LuaScriptInterface::luaTownCreate(lua_State* L)
{
// Town(id or name)
Town* town;
if (isNumber(L, 2)) {
town = g_game.map.towns.getTown(getNumber<uint32_t>(L, 2));
} else if (isString(L, 2)) {
town = g_game.map.towns.getTown(getString(L, 2));
} else {
town = nullptr;
}
if (town) {
pushUserdata<Town>(L, town);
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetId(lua_State* L)
{
// town:getId()
Town* town = getUserdata<Town>(L, 1);
if (town) {
lua_pushnumber(L, town->getID());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetName(lua_State* L)
{
// town:getName()
Town* town = getUserdata<Town>(L, 1);
if (town) {
pushString(L, town->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTownGetTemplePosition(lua_State* L)
{
// town:getTemplePosition()
Town* town = getUserdata<Town>(L, 1);
if (town) {
pushPosition(L, town->getTemplePosition());
} else {
lua_pushnil(L);
}
return 1;
}
// House
int LuaScriptInterface::luaHouseCreate(lua_State* L)
{
// House(id)
House* house = g_game.map.houses.getHouse(getNumber<uint32_t>(L, 2));
if (house) {
pushUserdata<House>(L, house);
setMetatable(L, -1, "House");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetId(lua_State* L)
{
// house:getId()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetName(lua_State* L)
{
// house:getName()
House* house = getUserdata<House>(L, 1);
if (house) {
pushString(L, house->getName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTown(lua_State* L)
{
// house:getTown()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
Town* town = g_game.map.towns.getTown(house->getTownId());
if (town) {
pushUserdata<Town>(L, town);
setMetatable(L, -1, "Town");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetExitPosition(lua_State* L)
{
// house:getExitPosition()
House* house = getUserdata<House>(L, 1);
if (house) {
pushPosition(L, house->getEntryPosition());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetRent(lua_State* L)
{
// house:getRent()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getRent());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetOwnerGuid(lua_State* L)
{
// house:getOwnerGuid()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getOwner());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseSetOwnerGuid(lua_State* L)
{
// house:setOwnerGuid(guid[, updateDatabase = true])
House* house = getUserdata<House>(L, 1);
if (house) {
uint32_t guid = getNumber<uint32_t>(L, 2);
bool updateDatabase = getBoolean(L, 3, true);
house->setOwner(guid, updateDatabase);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseStartTrade(lua_State* L)
{
// house:startTrade(player, tradePartner)
House* house = getUserdata<House>(L, 1);
Player* player = getUserdata<Player>(L, 2);
Player* tradePartner = getUserdata<Player>(L, 3);
if (!player || !tradePartner || !house) {
lua_pushnil(L);
return 1;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERFARAWAY);
return 1;
}
if (house->getOwner() != player->getGUID()) {
lua_pushnumber(L, RETURNVALUE_YOUDONTOWNTHISHOUSE);
return 1;
}
if (g_game.map.houses.getHouseByPlayerId(tradePartner->getGUID())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERALREADYOWNSAHOUSE);
return 1;
}
if (IOLoginData::hasBiddedOnHouse(tradePartner->getGUID())) {
lua_pushnumber(L, RETURNVALUE_TRADEPLAYERHIGHESTBIDDER);
return 1;
}
Item* transferItem = house->getTransferItem();
if (!transferItem) {
lua_pushnumber(L, RETURNVALUE_YOUCANNOTTRADETHISHOUSE);
return 1;
}
transferItem->getParent()->setParent(player);
if (!g_game.internalStartTrade(player, tradePartner, transferItem)) {
house->resetTransferItem();
}
lua_pushnumber(L, RETURNVALUE_NOERROR);
return 1;
}
int LuaScriptInterface::luaHouseGetBeds(lua_State* L)
{
// house:getBeds()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& beds = house->getBeds();
lua_createtable(L, beds.size(), 0);
int index = 0;
for (BedItem* bedItem : beds) {
pushUserdata<Item>(L, bedItem);
setItemMetatable(L, -1, bedItem);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetBedCount(lua_State* L)
{
// house:getBedCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getBedCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoors(lua_State* L)
{
// house:getDoors()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& doors = house->getDoors();
lua_createtable(L, doors.size(), 0);
int index = 0;
for (Door* door : doors) {
pushUserdata<Item>(L, door);
setItemMetatable(L, -1, door);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoorCount(lua_State* L)
{
// house:getDoorCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getDoors().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetDoorIdByPosition(lua_State* L)
{
// house:getDoorIdByPosition(position)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
Door* door = house->getDoorByPosition(getPosition(L, 2));
if (door) {
lua_pushnumber(L, door->getDoorId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseGetTiles(lua_State* L)
{
// house:getTiles()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& tiles = house->getTiles();
lua_createtable(L, tiles.size(), 0);
int index = 0;
for (Tile* tile : tiles) {
pushUserdata<Tile>(L, tile);
setMetatable(L, -1, "Tile");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaHouseGetItems(lua_State* L)
{
// house:getItems()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
const auto& tiles = house->getTiles();
lua_newtable(L);
int index = 0;
for (Tile* tile : tiles) {
TileItemVector* itemVector = tile->getItemList();
if(itemVector) {
for(Item* item : *itemVector) {
pushUserdata<Item>(L, item);
setItemMetatable(L, -1, item);
lua_rawseti(L, -2, ++index);
}
}
}
return 1;
}
int LuaScriptInterface::luaHouseGetTileCount(lua_State* L)
{
// house:getTileCount()
House* house = getUserdata<House>(L, 1);
if (house) {
lua_pushnumber(L, house->getTiles().size());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaHouseCanEditAccessList(lua_State* L)
{
// house:canEditAccessList(listId, player)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 2);
Player* player = getPlayer(L, 3);
pushBoolean(L, house->canEditAccessList(listId, player));
return 1;
}
int LuaScriptInterface::luaHouseGetAccessList(lua_State* L)
{
// house:getAccessList(listId)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
std::string list;
uint32_t listId = getNumber<uint32_t>(L, 2);
if (house->getAccessList(listId, list)) {
pushString(L, list);
} else {
pushBoolean(L, false);
}
return 1;
}
int LuaScriptInterface::luaHouseSetAccessList(lua_State* L)
{
// house:setAccessList(listId, list)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
uint32_t listId = getNumber<uint32_t>(L, 2);
const std::string& list = getString(L, 3);
house->setAccessList(listId, list);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaHouseKickPlayer(lua_State* L)
{
// house:kickPlayer(player, targetPlayer)
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
pushBoolean(L, house->kickPlayer(getPlayer(L, 2), getPlayer(L, 3)));
return 1;
}
int LuaScriptInterface::luaHouseSave(lua_State* L)
{
// house:save()
House* house = getUserdata<House>(L, 1);
if (!house) {
lua_pushnil(L);
return 1;
}
pushBoolean(L, IOMapSerialize::saveHouse(house));
return 1;
}
// ItemType
int LuaScriptInterface::luaItemTypeCreate(lua_State* L)
{
// ItemType(id or name)
uint32_t id;
if (isNumber(L, 2)) {
id = getNumber<uint32_t>(L, 2);
} else if (isString(L, 2)) {
id = Item::items.getItemIdByName(getString(L, 2));
} else {
lua_pushnil(L);
return 1;
}
const ItemType& itemType = Item::items[id];
pushUserdata<const ItemType>(L, &itemType);
setMetatable(L, -1, "ItemType");
return 1;
}
int LuaScriptInterface::luaItemTypeIsCorpse(lua_State* L)
{
// itemType:isCorpse()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->corpseType != RACE_NONE);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsDoor(lua_State* L)
{
// itemType:isDoor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isDoor());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsContainer(lua_State* L)
{
// itemType:isContainer()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isContainer());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsFluidContainer(lua_State* L)
{
// itemType:isFluidContainer()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isFluidContainer());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsMovable(lua_State* L)
{
// itemType:isMovable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->moveable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsRune(lua_State* L)
{
// itemType:isRune()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isRune());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsStackable(lua_State* L)
{
// itemType:isStackable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->stackable);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsReadable(lua_State* L)
{
// itemType:isReadable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->canReadText);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsWritable(lua_State* L)
{
// itemType:isWritable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->canWriteText);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsBlocking(lua_State* L)
{
// itemType:isBlocking()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->blockProjectile || itemType->blockSolid);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsGroundTile(lua_State* L)
{
// itemType:isGroundTile()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isGroundTile());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsMagicField(lua_State* L)
{
// itemType:isMagicField()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isMagicField());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsUseable(lua_State* L)
{
// itemType:isUseable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isUseable());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsPickupable(lua_State* L)
{
// itemType:isPickupable()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->isPickupable());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetType(lua_State* L)
{
// itemType:getType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->type);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetGroup(lua_State* L)
{
// itemType:getGroup()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->group);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetId(lua_State* L)
{
// itemType:getId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->id);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetClientId(lua_State* L)
{
// itemType:getClientId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->clientId);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetName(lua_State* L)
{
// itemType:getName()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->name);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetPluralName(lua_State* L)
{
// itemType:getPluralName()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->getPluralName());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetArticle(lua_State* L)
{
// itemType:getArticle()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->article);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDescription(lua_State* L)
{
// itemType:getDescription()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->description);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetSlotPosition(lua_State *L)
{
// itemType:getSlotPosition()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->slotPosition);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCharges(lua_State* L)
{
// itemType:getCharges()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->charges);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetFluidSource(lua_State* L)
{
// itemType:getFluidSource()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->fluidSource);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCapacity(lua_State* L)
{
// itemType:getCapacity()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->maxItems);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWeight(lua_State* L)
{
// itemType:getWeight([count = 1])
uint16_t count = getNumber<uint16_t>(L, 2, 1);
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
uint64_t weight = static_cast<uint64_t>(itemType->weight) * std::max<int32_t>(1, count);
lua_pushnumber(L, weight);
return 1;
}
int LuaScriptInterface::luaItemTypeGetHitChance(lua_State* L)
{
// itemType:getHitChance()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->hitChance);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetShootRange(lua_State* L)
{
// itemType:getShootRange()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->shootRange);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAttack(lua_State* L)
{
// itemType:getAttack()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->attack);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAttackSpeed(lua_State* L)
{
// itemType:getAttackSpeed()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->attackSpeed);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDefense(lua_State* L)
{
// itemType:getDefense()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->defense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetExtraDefense(lua_State* L)
{
// itemType:getExtraDefense()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->extraDefense);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetArmor(lua_State* L)
{
// itemType:getArmor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->armor);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWeaponType(lua_State* L)
{
// itemType:getWeaponType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->weaponType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAmmoType(lua_State* L)
{
// itemType:getAmmoType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->ammoType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetCorpseType(lua_State* L)
{
// itemType:getCorpseType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->corpseType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetAbilities(lua_State* L)
{
// itemType:getAbilities()
ItemType* itemType = getUserdata<ItemType>(L, 1);
if (itemType) {
Abilities& abilities = itemType->getAbilities();
lua_createtable(L, 6, 12);
setField(L, "healthGain", abilities.healthGain);
setField(L, "healthTicks", abilities.healthTicks);
setField(L, "manaGain", abilities.manaGain);
setField(L, "manaTicks", abilities.manaTicks);
setField(L, "conditionImmunities", abilities.conditionImmunities);
setField(L, "conditionSuppressions", abilities.conditionSuppressions);
setField(L, "speed", abilities.speed);
setField(L, "elementDamage", abilities.elementDamage);
setField(L, "elementType", abilities.elementType);
lua_pushboolean(L, abilities.manaShield);
lua_setfield(L, -2, "manaShield");
lua_pushboolean(L, abilities.invisible);
lua_setfield(L, -2, "invisible");
lua_pushboolean(L, abilities.regeneration);
lua_setfield(L, -2, "regeneration");
// Stats
lua_createtable(L, 0, STAT_LAST + 1);
for (int32_t i = STAT_FIRST; i <= STAT_LAST; i++) {
lua_pushnumber(L, abilities.stats[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "stats");
// Stats percent
lua_createtable(L, 0, STAT_LAST + 1);
for (int32_t i = STAT_FIRST; i <= STAT_LAST; i++) {
lua_pushnumber(L, abilities.statsPercent[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "statsPercent");
// Skills
lua_createtable(L, 0, SKILL_LAST + 1);
for (int32_t i = SKILL_FIRST; i <= SKILL_LAST; i++) {
lua_pushnumber(L, abilities.skills[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "skills");
// Special skills
lua_createtable(L, 0, SPECIALSKILL_LAST + 1);
for (int32_t i = SPECIALSKILL_FIRST; i <= SPECIALSKILL_LAST; i++) {
lua_pushnumber(L, abilities.specialSkills[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "specialSkills");
// Field absorb percent
lua_createtable(L, 0, COMBAT_COUNT);
for (int32_t i = 0; i < COMBAT_COUNT; i++) {
lua_pushnumber(L, abilities.fieldAbsorbPercent[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "fieldAbsorbPercent");
// Absorb percent
lua_createtable(L, 0, COMBAT_COUNT);
for (int32_t i = 0; i < COMBAT_COUNT; i++) {
lua_pushnumber(L, abilities.absorbPercent[i]);
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "absorbPercent");
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasShowAttributes(lua_State* L)
{
// itemType:hasShowAttributes()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->showAttributes);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasShowCount(lua_State* L)
{
// itemType:hasShowCount()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->showCount);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasShowCharges(lua_State* L)
{
// itemType:hasShowCharges()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->showCharges);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasShowDuration(lua_State* L)
{
// itemType:hasShowDuration()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->showDuration);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasAllowDistRead(lua_State* L)
{
// itemType:hasAllowDistRead()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->allowDistRead);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetWieldInfo(lua_State* L)
{
// itemType:getWieldInfo()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->wieldInfo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDuration(lua_State* L)
{
// itemType:getDuration()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->decayTime);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetLevelDoor(lua_State* L)
{
// itemType:getLevelDoor()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->levelDoor);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetRuneSpellName(lua_State* L)
{
// itemType:getRuneSpellName()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType && itemType->isRune()) {
pushString(L, itemType->runeSpellName);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetVocationString(lua_State* L)
{
// itemType:getVocationString()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushString(L, itemType->vocationString);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetMinReqLevel(lua_State* L)
{
// itemType:getMinReqLevel()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->minReqLevel);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetMinReqMagicLevel(lua_State* L)
{
// itemType:getMinReqMagicLevel()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushinteger(L, itemType->minReqMagicLevel);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetElementType(lua_State* L)
{
// itemType:getElementType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
auto& abilities = itemType->abilities;
if (abilities) {
lua_pushnumber(L, abilities->elementType);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetElementDamage(lua_State* L)
{
// itemType:getElementDamage()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (!itemType) {
lua_pushnil(L);
return 1;
}
auto& abilities = itemType->abilities;
if (abilities) {
lua_pushnumber(L, abilities->elementDamage);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetTransformEquipId(lua_State* L)
{
// itemType:getTransformEquipId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->transformEquipTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetTransformDeEquipId(lua_State* L)
{
// itemType:getTransformDeEquipId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->transformDeEquipTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDestroyId(lua_State* L)
{
// itemType:getDestroyId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->destroyTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetDecayId(lua_State* L)
{
// itemType:getDecayId()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->decayTo);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeGetRequiredLevel(lua_State* L)
{
// itemType:getRequiredLevel()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
lua_pushnumber(L, itemType->minReqLevel);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeHasSubType(lua_State* L)
{
// itemType:hasSubType()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->hasSubType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaItemTypeIsStoreItem(lua_State* L)
{
// itemType:isStoreItem()
const ItemType* itemType = getUserdata<const ItemType>(L, 1);
if (itemType) {
pushBoolean(L, itemType->storeItem);
} else {
lua_pushnil(L);
}
return 1;
}
// Combat
int LuaScriptInterface::luaCombatCreate(lua_State* L)
{
// Combat()
pushSharedPtr(L, g_luaEnvironment.createCombatObject(getScriptEnv()->getScriptInterface()));
setMetatable(L, -1, "Combat");
return 1;
}
int LuaScriptInterface::luaCombatDelete(lua_State* L)
{
Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (combat) {
combat.reset();
}
return 0;
}
int LuaScriptInterface::luaCombatSetParameter(lua_State* L)
{
// combat:setParameter(key, value)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
CombatParam_t key = getNumber<CombatParam_t>(L, 2);
uint32_t value;
if (isBoolean(L, 3)) {
value = getBoolean(L, 3) ? 1 : 0;
} else {
value = getNumber<uint32_t>(L, 3);
}
combat->setParam(key, value);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatGetParameter(lua_State* L)
{
// combat:getParameter(key)
Combat* combat = getUserdata<Combat>(L, 1);
if (!combat) {
lua_pushnil(L);
return 1;
}
int32_t value = combat->getParam(getNumber<CombatParam_t>(L, 2));
if (value == std::numeric_limits<int32_t>().max()) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, value);
return 1;
}
int LuaScriptInterface::luaCombatSetFormula(lua_State* L)
{
// combat:setFormula(type, mina, minb, maxa, maxb)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
formulaType_t type = getNumber<formulaType_t>(L, 2);
double mina = getNumber<double>(L, 3);
double minb = getNumber<double>(L, 4);
double maxa = getNumber<double>(L, 5);
double maxb = getNumber<double>(L, 6);
combat->setPlayerCombatValues(type, mina, minb, maxa, maxb);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatSetArea(lua_State* L)
{
// combat:setArea(area)
if (getScriptEnv()->getScriptId() != EVENT_ID_LOADING) {
reportErrorFunc(L, "This function can only be used while loading the script.");
lua_pushnil(L);
return 1;
}
const AreaCombat* area = g_luaEnvironment.getAreaObject(getNumber<uint32_t>(L, 2));
if (!area) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_AREA_NOT_FOUND));
lua_pushnil(L);
return 1;
}
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
combat->setArea(new AreaCombat(*area));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatAddCondition(lua_State* L)
{
// combat:addCondition(condition)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
Condition* condition = getUserdata<Condition>(L, 2);
if (condition) {
combat->addCondition(condition->clone());
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCombatClearConditions(lua_State* L)
{
// combat:clearConditions()
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
combat->clearConditions();
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatSetCallback(lua_State* L)
{
// combat:setCallback(key, function)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
CallBackParam_t key = getNumber<CallBackParam_t>(L, 2);
if (!combat->setCallback(key)) {
lua_pushnil(L);
return 1;
}
CallBack* callback = combat->getCallback(key);
if (!callback) {
lua_pushnil(L);
return 1;
}
const std::string& function = getString(L, 3);
pushBoolean(L, callback->loadCallBack(getScriptEnv()->getScriptInterface(), function));
return 1;
}
int LuaScriptInterface::luaCombatSetOrigin(lua_State* L)
{
// combat:setOrigin(origin)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
combat->setOrigin(getNumber<CombatOrigin>(L, 2));
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaCombatExecute(lua_State* L)
{
// combat:execute(creature, variant)
const Combat_ptr& combat = getSharedPtr<Combat>(L, 1);
if (!combat) {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_COMBAT_NOT_FOUND));
lua_pushnil(L);
return 1;
}
if (isUserdata(L, 2)) {
LuaDataType type = getUserdataType(L, 2);
if (type != LuaData_Player && type != LuaData_Monster && type != LuaData_Npc) {
pushBoolean(L, false);
return 1;
}
}
Creature* creature = getCreature(L, 2);
const LuaVariant& variant = getVariant(L, 3);
switch (variant.type) {
case VARIANT_NUMBER: {
Creature* target = g_game.getCreatureByID(variant.number);
if (!target) {
pushBoolean(L, false);
return 1;
}
if (combat->hasArea()) {
combat->doCombat(creature, target->getPosition());
} else {
combat->doCombat(creature, target);
}
break;
}
case VARIANT_POSITION: {
combat->doCombat(creature, variant.pos);
break;
}
case VARIANT_TARGETPOSITION: {
if (combat->hasArea()) {
combat->doCombat(creature, variant.pos);
} else {
combat->postCombatEffects(creature, variant.pos);
g_game.addMagicEffect(variant.pos, CONST_ME_POFF);
}
break;
}
case VARIANT_STRING: {
Player* target = g_game.getPlayerByName(variant.text);
if (!target) {
pushBoolean(L, false);
return 1;
}
combat->doCombat(creature, target);
break;
}
case VARIANT_NONE: {
reportErrorFunc(L, getErrorDesc(LUA_ERROR_VARIANT_NOT_FOUND));
pushBoolean(L, false);
return 1;
}
default: {
break;
}
}
pushBoolean(L, true);
return 1;
}
// Condition
int LuaScriptInterface::luaConditionCreate(lua_State* L)
{
// Condition(conditionType[, conditionId = CONDITIONID_COMBAT])
ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2);
ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT);
Condition* condition = Condition::createCondition(conditionId, conditionType, 0, 0);
if (condition) {
pushUserdata<Condition>(L, condition);
setMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionDelete(lua_State* L)
{
// condition:delete()
Condition** conditionPtr = getRawUserdata<Condition>(L, 1);
if (conditionPtr && *conditionPtr) {
delete *conditionPtr;
*conditionPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaConditionGetId(lua_State* L)
{
// condition:getId()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetSubId(lua_State* L)
{
// condition:getSubId()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getSubId());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetType(lua_State* L)
{
// condition:getType()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getType());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetIcons(lua_State* L)
{
// condition:getIcons()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getIcons());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetEndTime(lua_State* L)
{
// condition:getEndTime()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getEndTime());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionClone(lua_State* L)
{
// condition:clone()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
pushUserdata<Condition>(L, condition->clone());
setMetatable(L, -1, "Condition");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionGetTicks(lua_State* L)
{
// condition:getTicks()
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
lua_pushnumber(L, condition->getTicks());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetTicks(lua_State* L)
{
// condition:setTicks(ticks)
int32_t ticks = getNumber<int32_t>(L, 2);
Condition* condition = getUserdata<Condition>(L, 1);
if (condition) {
condition->setTicks(ticks);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetParameter(lua_State* L)
{
// condition:setParameter(key, value)
Condition* condition = getUserdata<Condition>(L, 1);
if (!condition) {
lua_pushnil(L);
return 1;
}
ConditionParam_t key = getNumber<ConditionParam_t>(L, 2);
int32_t value;
if (isBoolean(L, 3)) {
value = getBoolean(L, 3) ? 1 : 0;
} else {
value = getNumber<int32_t>(L, 3);
}
condition->setParam(key, value);
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaConditionGetParameter(lua_State* L)
{
// condition:getParameter(key)
Condition* condition = getUserdata<Condition>(L, 1);
if (!condition) {
lua_pushnil(L);
return 1;
}
int32_t value = condition->getParam(getNumber<ConditionParam_t>(L, 2));
if (value == std::numeric_limits<int32_t>().max()) {
lua_pushnil(L);
return 1;
}
lua_pushnumber(L, value);
return 1;
}
int LuaScriptInterface::luaConditionSetFormula(lua_State* L)
{
// condition:setFormula(mina, minb, maxa, maxb)
double maxb = getNumber<double>(L, 5);
double maxa = getNumber<double>(L, 4);
double minb = getNumber<double>(L, 3);
double mina = getNumber<double>(L, 2);
ConditionSpeed* condition = dynamic_cast<ConditionSpeed*>(getUserdata<Condition>(L, 1));
if (condition) {
condition->setFormulaVars(mina, minb, maxa, maxb);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionSetOutfit(lua_State* L)
{
// condition:setOutfit(outfit)
// condition:setOutfit(lookTypeEx, lookType, lookHead, lookBody, lookLegs, lookFeet[, lookAddons[, lookMount]])
Outfit_t outfit;
if (isTable(L, 2)) {
outfit = getOutfit(L, 2);
} else {
outfit.lookMount = getNumber<uint16_t>(L, 9, outfit.lookMount);
outfit.lookAddons = getNumber<uint8_t>(L, 8, outfit.lookAddons);
outfit.lookFeet = getNumber<uint8_t>(L, 7);
outfit.lookLegs = getNumber<uint8_t>(L, 6);
outfit.lookBody = getNumber<uint8_t>(L, 5);
outfit.lookHead = getNumber<uint8_t>(L, 4);
outfit.lookType = getNumber<uint16_t>(L, 3);
outfit.lookTypeEx = getNumber<uint16_t>(L, 2);
}
ConditionOutfit* condition = dynamic_cast<ConditionOutfit*>(getUserdata<Condition>(L, 1));
if (condition) {
condition->setOutfit(outfit);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaConditionAddDamage(lua_State* L)
{
// condition:addDamage(rounds, time, value)
int32_t value = getNumber<int32_t>(L, 4);
int32_t time = getNumber<int32_t>(L, 3);
int32_t rounds = getNumber<int32_t>(L, 2);
ConditionDamage* condition = dynamic_cast<ConditionDamage*>(getUserdata<Condition>(L, 1));
if (condition) {
pushBoolean(L, condition->addDamage(rounds, time, value));
} else {
lua_pushnil(L);
}
return 1;
}
// Outfit
int LuaScriptInterface::luaOutfitCreate(lua_State* L)
{
// Outfit(looktype)
const Outfit* outfit = Outfits::getInstance().getOutfitByLookType(getNumber<uint16_t>(L, 2));
if (outfit) {
pushOutfit(L, outfit);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaOutfitCompare(lua_State* L)
{
// outfit == outfitEx
Outfit outfitEx = getOutfitClass(L, 2);
Outfit outfit = getOutfitClass(L, 1);
pushBoolean(L, outfit == outfitEx);
return 1;
}
// MonsterType
int LuaScriptInterface::luaMonsterTypeCreate(lua_State* L)
{
// MonsterType(name)
MonsterType* monsterType = g_monsters.getMonsterType(getString(L, 2));
if (monsterType) {
pushUserdata<MonsterType>(L, monsterType);
setMetatable(L, -1, "MonsterType");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsAttackable(lua_State* L)
{
// get: monsterType:isAttackable() set: monsterType:isAttackable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isAttackable);
} else {
monsterType->info.isAttackable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsChallengeable(lua_State* L)
{
// get: monsterType:isChallengeable() set: monsterType:isChallengeable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isChallengeable);
} else {
monsterType->info.isChallengeable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsConvinceable(lua_State* L)
{
// get: monsterType:isConvinceable() set: monsterType:isConvinceable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isConvinceable);
} else {
monsterType->info.isConvinceable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsSummonable(lua_State* L)
{
// get: monsterType:isSummonable() set: monsterType:isSummonable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isSummonable);
} else {
monsterType->info.isSummonable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsIgnoringSpawnBlock(lua_State* L)
{
// get: monsterType:isIgnoringSpawnBlock() set: monsterType:isIgnoringSpawnBlock(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isIgnoringSpawnBlock);
} else {
monsterType->info.isIgnoringSpawnBlock = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsIllusionable(lua_State* L)
{
// get: monsterType:isIllusionable() set: monsterType:isIllusionable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isIllusionable);
} else {
monsterType->info.isIllusionable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsHostile(lua_State* L)
{
// get: monsterType:isHostile() set: monsterType:isHostile(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isHostile);
} else {
monsterType->info.isHostile = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsPushable(lua_State* L)
{
// get: monsterType:isPushable() set: monsterType:isPushable(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.pushable);
} else {
monsterType->info.pushable = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsHealthHidden(lua_State* L)
{
// get: monsterType:isHealthHidden() set: monsterType:isHealthHidden(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.hiddenHealth);
} else {
monsterType->info.hiddenHealth = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeIsBoss(lua_State* L)
{
// get: monsterType:isBoss() set: monsterType:isBoss(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.isBoss);
} else {
monsterType->info.isBoss = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanPushItems(lua_State* L)
{
// get: monsterType:canPushItems() set: monsterType:canPushItems(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canPushItems);
} else {
monsterType->info.canPushItems = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanPushCreatures(lua_State* L)
{
// get: monsterType:canPushCreatures() set: monsterType:canPushCreatures(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canPushCreatures);
} else {
monsterType->info.canPushCreatures = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanWalkOnEnergy(lua_State* L)
{
// get: monsterType:canWalkOnEnergy() set: monsterType:canWalkOnEnergy(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canWalkOnEnergy);
} else {
monsterType->info.canWalkOnEnergy = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanWalkOnFire(lua_State* L)
{
// get: monsterType:canWalkOnFire() set: monsterType:canWalkOnFire(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canWalkOnFire);
} else {
monsterType->info.canWalkOnFire = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCanWalkOnPoison(lua_State* L)
{
// get: monsterType:canWalkOnPoison() set: monsterType:canWalkOnPoison(bool)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushBoolean(L, monsterType->info.canWalkOnPoison);
} else {
monsterType->info.canWalkOnPoison = getBoolean(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int32_t LuaScriptInterface::luaMonsterTypeName(lua_State* L)
{
// get: monsterType:name() set: monsterType:name(name)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushString(L, monsterType->name);
} else {
monsterType->name = getString(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeNameDescription(lua_State* L)
{
// get: monsterType:nameDescription() set: monsterType:nameDescription(desc)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushString(L, monsterType->nameDescription);
} else {
monsterType->nameDescription = getString(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeHealth(lua_State* L)
{
// get: monsterType:health() set: monsterType:health(health)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.health);
} else {
monsterType->info.health = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeMaxHealth(lua_State* L)
{
// get: monsterType:maxHealth() set: monsterType:maxHealth(health)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.healthMax);
} else {
monsterType->info.healthMax = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeRunHealth(lua_State* L)
{
// get: monsterType:runHealth() set: monsterType:runHealth(health)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.runAwayHealth);
} else {
monsterType->info.runAwayHealth = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeExperience(lua_State* L)
{
// get: monsterType:experience() set: monsterType:experience(exp)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.experience);
} else {
monsterType->info.experience = getNumber<uint64_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeSkull(lua_State* L)
{
// get: monsterType:skull() set: monsterType:skull(str/constant)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.skull);
} else {
if (isNumber(L, 2)) {
monsterType->info.skull = getNumber<Skulls_t>(L, 2);
} else {
monsterType->info.skull = getSkullType(getString(L, 2));
}
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCombatImmunities(lua_State* L)
{
// get: monsterType:combatImmunities() set: monsterType:combatImmunities(immunity)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.damageImmunities);
} else {
std::string immunity = getString(L, 2);
if (immunity == "physical") {
monsterType->info.damageImmunities |= COMBAT_PHYSICALDAMAGE;
pushBoolean(L, true);
} else if (immunity == "energy") {
monsterType->info.damageImmunities |= COMBAT_ENERGYDAMAGE;
pushBoolean(L, true);
} else if (immunity == "fire") {
monsterType->info.damageImmunities |= COMBAT_FIREDAMAGE;
pushBoolean(L, true);
} else if (immunity == "poison" || immunity == "earth") {
monsterType->info.damageImmunities |= COMBAT_EARTHDAMAGE;
pushBoolean(L, true);
} else if (immunity == "drown") {
monsterType->info.damageImmunities |= COMBAT_DROWNDAMAGE;
pushBoolean(L, true);
} else if (immunity == "ice") {
monsterType->info.damageImmunities |= COMBAT_ICEDAMAGE;
pushBoolean(L, true);
} else if (immunity == "holy") {
monsterType->info.damageImmunities |= COMBAT_HOLYDAMAGE;
pushBoolean(L, true);
} else if (immunity == "death") {
monsterType->info.damageImmunities |= COMBAT_DEATHDAMAGE;
pushBoolean(L, true);
} else if (immunity == "lifedrain") {
monsterType->info.damageImmunities |= COMBAT_LIFEDRAIN;
pushBoolean(L, true);
} else if (immunity == "manadrain") {
monsterType->info.damageImmunities |= COMBAT_MANADRAIN;
pushBoolean(L, true);
} else {
std::cout << "[Warning - Monsters::loadMonster] Unknown immunity name " << immunity << " for monster: " << monsterType->name << std::endl;
lua_pushnil(L);
}
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeConditionImmunities(lua_State* L)
{
// get: monsterType:conditionImmunities() set: monsterType:conditionImmunities(immunity)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.conditionImmunities);
} else {
std::string immunity = getString(L, 2);
if (immunity == "physical") {
monsterType->info.conditionImmunities |= CONDITION_BLEEDING;
pushBoolean(L, true);
} else if (immunity == "energy") {
monsterType->info.conditionImmunities |= CONDITION_ENERGY;
pushBoolean(L, true);
} else if (immunity == "fire") {
monsterType->info.conditionImmunities |= CONDITION_FIRE;
pushBoolean(L, true);
} else if (immunity == "poison" || immunity == "earth") {
monsterType->info.conditionImmunities |= CONDITION_POISON;
pushBoolean(L, true);
} else if (immunity == "drown") {
monsterType->info.conditionImmunities |= CONDITION_DROWN;
pushBoolean(L, true);
} else if (immunity == "ice") {
monsterType->info.conditionImmunities |= CONDITION_FREEZING;
pushBoolean(L, true);
} else if (immunity == "holy") {
monsterType->info.conditionImmunities |= CONDITION_DAZZLED;
pushBoolean(L, true);
} else if (immunity == "death") {
monsterType->info.conditionImmunities |= CONDITION_CURSED;
pushBoolean(L, true);
} else if (immunity == "paralyze") {
monsterType->info.conditionImmunities |= CONDITION_PARALYZE;
pushBoolean(L, true);
} else if (immunity == "outfit") {
monsterType->info.conditionImmunities |= CONDITION_OUTFIT;
pushBoolean(L, true);
} else if (immunity == "drunk") {
monsterType->info.conditionImmunities |= CONDITION_DRUNK;
pushBoolean(L, true);
} else if (immunity == "invisible" || immunity == "invisibility") {
monsterType->info.conditionImmunities |= CONDITION_INVISIBLE;
pushBoolean(L, true);
} else if (immunity == "bleed") {
monsterType->info.conditionImmunities |= CONDITION_BLEEDING;
pushBoolean(L, true);
} else {
std::cout << "[Warning - Monsters::loadMonster] Unknown immunity name " << immunity << " for monster: " << monsterType->name << std::endl;
lua_pushnil(L);
}
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetAttackList(lua_State* L)
{
// monsterType:getAttackList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.attackSpells.size(), 0);
int index = 0;
for (const auto& spellBlock : monsterType->info.attackSpells) {
lua_createtable(L, 0, 8);
setField(L, "chance", spellBlock.chance);
setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0);
setField(L, "isMelee", spellBlock.isMelee ? 1 : 0);
setField(L, "minCombatValue", spellBlock.minCombatValue);
setField(L, "maxCombatValue", spellBlock.maxCombatValue);
setField(L, "range", spellBlock.range);
setField(L, "speed", spellBlock.speed);
pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell));
lua_setfield(L, -2, "spell");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddAttack(lua_State* L)
{
// monsterType:addAttack(monsterspell)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 2);
if (spell) {
spellBlock_t sb;
if (g_monsters.deserializeSpell(spell, sb, monsterType->name)) {
monsterType->info.attackSpells.push_back(std::move(sb));
} else {
std::cout << monsterType->name << std::endl;
std::cout << "[Warning - Monsters::loadMonster] Cant load spell. " << spell->name << std::endl;
}
} else {
lua_pushnil(L);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetDefenseList(lua_State* L)
{
// monsterType:getDefenseList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.defenseSpells.size(), 0);
int index = 0;
for (const auto& spellBlock : monsterType->info.defenseSpells) {
lua_createtable(L, 0, 8);
setField(L, "chance", spellBlock.chance);
setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0);
setField(L, "isMelee", spellBlock.isMelee ? 1 : 0);
setField(L, "minCombatValue", spellBlock.minCombatValue);
setField(L, "maxCombatValue", spellBlock.maxCombatValue);
setField(L, "range", spellBlock.range);
setField(L, "speed", spellBlock.speed);
pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell));
lua_setfield(L, -2, "spell");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddDefense(lua_State* L)
{
// monsterType:addDefense(monsterspell)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 2);
if (spell) {
spellBlock_t sb;
if (g_monsters.deserializeSpell(spell, sb, monsterType->name)) {
monsterType->info.defenseSpells.push_back(std::move(sb));
} else {
std::cout << monsterType->name << std::endl;
std::cout << "[Warning - Monsters::loadMonster] Cant load spell. " << spell->name << std::endl;
}
} else {
lua_pushnil(L);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetElementList(lua_State* L)
{
// monsterType:getElementList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
lua_createtable(L, monsterType->info.elementMap.size(), 0);
for (const auto& elementEntry : monsterType->info.elementMap) {
lua_pushnumber(L, elementEntry.second);
lua_rawseti(L, -2, elementEntry.first);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddElement(lua_State* L)
{
// monsterType:addElement(type, percent)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
CombatType_t element = getNumber<CombatType_t>(L, 2);
monsterType->info.elementMap[element] = getNumber<int32_t>(L, 3);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetVoices(lua_State* L)
{
// monsterType:getVoices()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.voiceVector.size(), 0);
for (const auto& voiceBlock : monsterType->info.voiceVector) {
lua_createtable(L, 0, 2);
setField(L, "text", voiceBlock.text);
setField(L, "yellText", voiceBlock.yellText);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddVoice(lua_State* L)
{
// monsterType:addVoice(sentence, interval, chance, yell)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
voiceBlock_t voice;
voice.text = getString(L, 2);
monsterType->info.yellSpeedTicks = getNumber<uint32_t>(L, 3);
monsterType->info.yellChance = getNumber<uint32_t>(L, 4);
voice.yellText = getBoolean(L, 5);
monsterType->info.voiceVector.push_back(voice);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetLoot(lua_State* L)
{
// monsterType:getLoot()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
pushLoot(L, monsterType->info.lootItems);
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddLoot(lua_State* L)
{
// monsterType:addLoot(loot)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
Loot* loot = getUserdata<Loot>(L, 2);
if (loot) {
monsterType->loadLoot(monsterType, loot->lootBlock);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetCreatureEvents(lua_State* L)
{
// monsterType:getCreatureEvents()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.scripts.size(), 0);
for (const std::string& creatureEvent : monsterType->info.scripts) {
pushString(L, creatureEvent);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeRegisterEvent(lua_State* L)
{
// monsterType:registerEvent(name)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
monsterType->info.scripts.push_back(getString(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeEventOnCallback(lua_State* L)
{
// monsterType:onThink(callback)
// monsterType:onAppear(callback)
// monsterType:onDisappear(callback)
// monsterType:onMove(callback)
// monsterType:onSay(callback)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (monsterType->loadCallback(&g_scripts->getScriptInterface())) {
pushBoolean(L, true);
return 1;
}
pushBoolean(L, false);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeEventType(lua_State* L)
{
// monstertype:eventType(event)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
monsterType->info.eventType = getNumber<MonstersEvent_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeGetSummonList(lua_State* L)
{
// monsterType:getSummonList()
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, monsterType->info.summons.size(), 0);
for (const auto& summonBlock : monsterType->info.summons) {
lua_createtable(L, 0, 3);
setField(L, "name", summonBlock.name);
setField(L, "speed", summonBlock.speed);
setField(L, "chance", summonBlock.chance);
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeAddSummon(lua_State* L)
{
// monsterType:addSummon(name, interval, chance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
summonBlock_t summon;
summon.name = getString(L, 2);
summon.chance = getNumber<int32_t>(L, 3);
summon.speed = getNumber<int32_t>(L, 4);
monsterType->info.summons.push_back(summon);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeMaxSummons(lua_State* L)
{
// get: monsterType:maxSummons() set: monsterType:maxSummons(ammount)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.maxSummons);
} else {
monsterType->info.maxSummons = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeArmor(lua_State* L)
{
// get: monsterType:armor() set: monsterType:armor(armor)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.armor);
} else {
monsterType->info.armor = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeDefense(lua_State* L)
{
// get: monsterType:defense() set: monsterType:defense(defense)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.defense);
} else {
monsterType->info.defense = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeOutfit(lua_State* L)
{
// get: monsterType:outfit() set: monsterType:outfit(outfit)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
pushOutfit(L, monsterType->info.outfit);
} else {
monsterType->info.outfit = getOutfit(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeRace(lua_State* L)
{
// get: monsterType:race() set: monsterType:race(race)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
std::string race = getString(L, 2);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.race);
} else {
if (race == "venom") {
monsterType->info.race = RACE_VENOM;
} else if (race == "blood") {
monsterType->info.race = RACE_BLOOD;
} else if (race == "undead") {
monsterType->info.race = RACE_UNDEAD;
} else if (race == "fire") {
monsterType->info.race = RACE_FIRE;
} else if (race == "energy") {
monsterType->info.race = RACE_ENERGY;
} else {
std::cout << "[Warning - Monsters::loadMonster] Unknown race type " << race << "." << std::endl;
lua_pushnil(L);
return 1;
}
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeCorpseId(lua_State* L)
{
// get: monsterType:corpseId() set: monsterType:corpseId(id)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.lookcorpse);
} else {
monsterType->info.lookcorpse = getNumber<uint16_t>(L, 2);
lua_pushboolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeManaCost(lua_State* L)
{
// get: monsterType:manaCost() set: monsterType:manaCost(mana)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.manaCost);
} else {
monsterType->info.manaCost = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeBaseSpeed(lua_State* L)
{
// get: monsterType:baseSpeed() set: monsterType:baseSpeed(speed)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.baseSpeed);
} else {
monsterType->info.baseSpeed = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeLight(lua_State* L)
{
// get: monsterType:light() set: monsterType:light(color, level)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (!monsterType) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.light.level);
lua_pushnumber(L, monsterType->info.light.color);
return 2;
} else {
monsterType->info.light.color = getNumber<uint8_t>(L, 2);
monsterType->info.light.level = getNumber<uint8_t>(L, 3);
pushBoolean(L, true);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeStaticAttackChance(lua_State* L)
{
// get: monsterType:staticAttackChance() set: monsterType:staticAttackChance(chance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.staticAttackChance);
} else {
monsterType->info.staticAttackChance = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeTargetDistance(lua_State* L)
{
// get: monsterType:targetDistance() set: monsterType:targetDistance(distance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.targetDistance);
} else {
monsterType->info.targetDistance = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeYellChance(lua_State* L)
{
// get: monsterType:yellChance() set: monsterType:yellChance(chance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.yellChance);
} else {
monsterType->info.yellChance = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeYellSpeedTicks(lua_State* L)
{
// get: monsterType:yellSpeedTicks() set: monsterType:yellSpeedTicks(rate)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.yellSpeedTicks);
} else {
monsterType->info.yellSpeedTicks = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeChangeTargetChance(lua_State* L)
{
// get: monsterType:changeTargetChance() set: monsterType:changeTargetChance(chance)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.changeTargetChance);
} else {
monsterType->info.changeTargetChance = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterTypeChangeTargetSpeed(lua_State* L)
{
// get: monsterType:changeTargetSpeed() set: monsterType:changeTargetSpeed(speed)
MonsterType* monsterType = getUserdata<MonsterType>(L, 1);
if (monsterType) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, monsterType->info.changeTargetSpeed);
} else {
monsterType->info.changeTargetSpeed = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// Loot
int LuaScriptInterface::luaCreateLoot(lua_State* L)
{
// Loot() will create a new loot item
Loot* loot = new Loot();
if (loot) {
pushUserdata<Loot>(L, loot);
setMetatable(L, -1, "Loot");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaDeleteLoot(lua_State* L)
{
// loot:delete() loot:__gc()
Loot** lootPtr = getRawUserdata<Loot>(L, 1);
if (lootPtr && *lootPtr) {
delete *lootPtr;
*lootPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaLootSetId(lua_State* L)
{
// loot:setId(id or name)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
if (isNumber(L, 2)) {
loot->lootBlock.id = getNumber<uint16_t>(L, 2);
} else {
auto name = getString(L, 2);
auto ids = Item::items.nameToItems.equal_range(asLowerCaseString(name));
if (ids.first == Item::items.nameToItems.cend()) {
std::cout << "[Warning - Loot:setId] Unknown loot item \"" << name << "\". " << std::endl;
pushBoolean(L, false);
return 1;
}
if (std::next(ids.first) != ids.second) {
std::cout << "[Warning - Loot:setId] Non-unique loot item \"" << name << "\". " << std::endl;
pushBoolean(L, false);
return 1;
}
loot->lootBlock.id = ids.first->second;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetSubType(lua_State* L)
{
// loot:setSubType(type)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.subType = getNumber<uint16_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetChance(lua_State* L)
{
// loot:setChance(chance)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.chance = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetMaxCount(lua_State* L)
{
// loot:setMaxCount(max)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.countmax = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetActionId(lua_State* L)
{
// loot:setActionId(actionid)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.actionId = getNumber<uint32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootSetDescription(lua_State* L)
{
// loot:setDescription(desc)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.text = getString(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaLootAddChildLoot(lua_State* L)
{
// loot:addChildLoot(loot)
Loot* loot = getUserdata<Loot>(L, 1);
if (loot) {
loot->lootBlock.childLoot.push_back(getUserdata<Loot>(L, 2)->lootBlock);
} else {
lua_pushnil(L);
}
return 1;
}
// MonsterSpell
int LuaScriptInterface::luaCreateMonsterSpell(lua_State* L)
{
// MonsterSpell() will create a new Monster Spell
MonsterSpell* spell = new MonsterSpell();
if (spell) {
pushUserdata<MonsterSpell>(L, spell);
setMetatable(L, -1, "MonsterSpell");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaDeleteMonsterSpell(lua_State* L)
{
// monsterSpell:delete() monsterSpell:__gc()
MonsterSpell** monsterSpellPtr = getRawUserdata<MonsterSpell>(L, 1);
if (monsterSpellPtr && *monsterSpellPtr) {
delete *monsterSpellPtr;
*monsterSpellPtr = nullptr;
}
return 0;
}
int LuaScriptInterface::luaMonsterSpellSetType(lua_State* L)
{
// monsterSpell:setType(type)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->name = getString(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetScriptName(lua_State* L)
{
// monsterSpell:setScriptName(name)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->scriptName = getString(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetChance(lua_State* L)
{
// monsterSpell:setChance(chance)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->chance = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetInterval(lua_State* L)
{
// monsterSpell:setInterval(interval)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->interval = getNumber<uint16_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetRange(lua_State* L)
{
// monsterSpell:setRange(range)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->range = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatValue(lua_State* L)
{
// monsterSpell:setCombatValue(min, max)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->minCombatValue = getNumber<int32_t>(L, 2);
spell->maxCombatValue = getNumber<int32_t>(L, 3);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatType(lua_State* L)
{
// monsterSpell:setCombatType(combatType_t)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->combatType = getNumber<CombatType_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetAttackValue(lua_State* L)
{
// monsterSpell:setAttackValue(attack, skill)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->attack = getNumber<int32_t>(L, 2);
spell->skill = getNumber<int32_t>(L, 3);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetNeedTarget(lua_State* L)
{
// monsterSpell:setNeedTarget(bool)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->needTarget = getBoolean(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetNeedDirection(lua_State* L)
{
// monsterSpell:setNeedDirection(bool)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->needDirection = getBoolean(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatLength(lua_State* L)
{
// monsterSpell:setCombatLength(length)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->length = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatSpread(lua_State* L)
{
// monsterSpell:setCombatSpread(spread)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->spread = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatRadius(lua_State* L)
{
// monsterSpell:setCombatRadius(radius)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->radius = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatRing(lua_State* L)
{
// monsterSpell:setCombatRing(ring)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->ring = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionType(lua_State* L)
{
// monsterSpell:setConditionType(type)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->conditionType = getNumber<ConditionType_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionDamage(lua_State* L)
{
// monsterSpell:setConditionDamage(min, max, start)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->conditionMinDamage = getNumber<int32_t>(L, 2);
spell->conditionMaxDamage = getNumber<int32_t>(L, 3);
spell->conditionStartDamage = getNumber<int32_t>(L, 4);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionSpeedChange(lua_State* L)
{
// monsterSpell:setConditionSpeedChange(minSpeed[, maxSpeed])
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->minSpeedChange = getNumber<int32_t>(L, 2);
spell->maxSpeedChange = getNumber<int32_t>(L, 3, 0);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionDuration(lua_State* L)
{
// monsterSpell:setConditionDuration(duration)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->duration = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionDrunkenness(lua_State* L)
{
// monsterSpell:setConditionDrunkenness(drunkenness)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->drunkenness = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetConditionTickInterval(lua_State* L)
{
// monsterSpell:setConditionTickInterval(interval)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->tickInterval = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatShootEffect(lua_State* L)
{
// monsterSpell:setCombatShootEffect(effect)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->shoot = getNumber<ShootType_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMonsterSpellSetCombatEffect(lua_State* L)
{
// monsterSpell:setCombatEffect(effect)
MonsterSpell* spell = getUserdata<MonsterSpell>(L, 1);
if (spell) {
spell->effect = getNumber<MagicEffectClasses>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Party
int32_t LuaScriptInterface::luaPartyCreate(lua_State* L)
{
// Party(userdata)
Player* player = getUserdata<Player>(L, 2);
if (!player) {
lua_pushnil(L);
return 1;
}
Party* party = player->getParty();
if (!party) {
party = new Party(player);
g_game.updatePlayerShield(player);
player->sendCreatureSkull(player);
pushUserdata<Party>(L, party);
setMetatable(L, -1, "Party");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyDisband(lua_State* L)
{
// party:disband()
Party** partyPtr = getRawUserdata<Party>(L, 1);
if (partyPtr && *partyPtr) {
Party*& party = *partyPtr;
party->disband();
party = nullptr;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetLeader(lua_State* L)
{
// party:getLeader()
Party* party = getUserdata<Party>(L, 1);
if (!party) {
lua_pushnil(L);
return 1;
}
Player* leader = party->getLeader();
if (leader) {
pushUserdata<Player>(L, leader);
setMetatable(L, -1, "Player");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartySetLeader(lua_State* L)
{
// party:setLeader(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->passPartyLeadership(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetMembers(lua_State* L)
{
// party:getMembers()
Party* party = getUserdata<Party>(L, 1);
if (!party) {
lua_pushnil(L);
return 1;
}
int index = 0;
lua_createtable(L, party->getMemberCount(), 0);
for (Player* player : party->getMembers()) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
return 1;
}
int LuaScriptInterface::luaPartyGetMemberCount(lua_State* L)
{
// party:getMemberCount()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_pushnumber(L, party->getMemberCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetInvitees(lua_State* L)
{
// party:getInvitees()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_createtable(L, party->getInvitationCount(), 0);
int index = 0;
for (Player* player : party->getInvitees()) {
pushUserdata<Player>(L, player);
setMetatable(L, -1, "Player");
lua_rawseti(L, -2, ++index);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyGetInviteeCount(lua_State* L)
{
// party:getInviteeCount()
Party* party = getUserdata<Party>(L, 1);
if (party) {
lua_pushnumber(L, party->getInvitationCount());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyAddInvite(lua_State* L)
{
// party:addInvite(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->invitePlayer(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyRemoveInvite(lua_State* L)
{
// party:removeInvite(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->removeInvite(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyAddMember(lua_State* L)
{
// party:addMember(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->joinParty(*player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyRemoveMember(lua_State* L)
{
// party:removeMember(player)
Player* player = getPlayer(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party && player) {
pushBoolean(L, party->leaveParty(player));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyIsSharedExperienceActive(lua_State* L)
{
// party:isSharedExperienceActive()
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->isSharedExperienceActive());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyIsSharedExperienceEnabled(lua_State* L)
{
// party:isSharedExperienceEnabled()
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->isSharedExperienceEnabled());
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartyShareExperience(lua_State* L)
{
// party:shareExperience(experience)
uint64_t experience = getNumber<uint64_t>(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party) {
party->shareExperience(experience);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaPartySetSharedExperience(lua_State* L)
{
// party:setSharedExperience(active)
bool active = getBoolean(L, 2);
Party* party = getUserdata<Party>(L, 1);
if (party) {
pushBoolean(L, party->setSharedExperience(party->getLeader(), active));
} else {
lua_pushnil(L);
}
return 1;
}
// Spells
int LuaScriptInterface::luaSpellCreate(lua_State* L)
{
// Spell(words, name or id) to get an existing spell
// Spell(type) ex: Spell(SPELL_INSTANT) or Spell(SPELL_RUNE) to create a new spell
if (lua_gettop(L) == 1) {
std::cout << "[Error - Spell::luaSpellCreate] There is no parameter set!" << std::endl;
lua_pushnil(L);
return 1;
}
SpellType_t spellType = SPELL_UNDEFINED;
if (isNumber(L, 2)) {
int32_t id = getNumber<int32_t>(L, 2);
RuneSpell* rune = g_spells->getRuneSpell(id);
if (rune) {
pushUserdata<Spell>(L, rune);
setMetatable(L, -1, "Spell");
return 1;
}
spellType = static_cast<SpellType_t>(id);
} else if (isString(L, 2)) {
std::string arg = getString(L, 2);
InstantSpell* instant = g_spells->getInstantSpellByName(arg);
if (instant) {
pushUserdata<Spell>(L, instant);
setMetatable(L, -1, "Spell");
return 1;
}
instant = g_spells->getInstantSpell(arg);
if (instant) {
pushUserdata<Spell>(L, instant);
setMetatable(L, -1, "Spell");
return 1;
}
RuneSpell* rune = g_spells->getRuneSpellByName(arg);
if (rune) {
pushUserdata<Spell>(L, rune);
setMetatable(L, -1, "Spell");
return 1;
}
std::string tmp = asLowerCaseString(arg);
if (tmp == "instant") {
spellType = SPELL_INSTANT;
} else if (tmp == "rune") {
spellType = SPELL_RUNE;
}
}
if (spellType == SPELL_INSTANT) {
InstantSpell* spell = new InstantSpell(getScriptEnv()->getScriptInterface());
spell->fromLua = true;
pushUserdata<Spell>(L, spell);
setMetatable(L, -1, "Spell");
spell->spellType = SPELL_INSTANT;
return 1;
} else if (spellType == SPELL_RUNE) {
RuneSpell* spell = new RuneSpell(getScriptEnv()->getScriptInterface());
spell->fromLua = true;
pushUserdata<Spell>(L, spell);
setMetatable(L, -1, "Spell");
spell->spellType = SPELL_RUNE;
return 1;
}
lua_pushnil(L);
return 1;
}
int LuaScriptInterface::luaSpellOnCastSpell(lua_State* L)
{
// spell:onCastSpell(callback)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (spell->spellType == SPELL_INSTANT) {
InstantSpell* instant = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (!instant->loadCallback()) {
pushBoolean(L, false);
return 1;
}
instant->scripted = true;
pushBoolean(L, true);
} else if (spell->spellType == SPELL_RUNE) {
RuneSpell* rune = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (!rune->loadCallback()) {
pushBoolean(L, false);
return 1;
}
rune->scripted = true;
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellRegister(lua_State* L)
{
// spell:register()
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (spell->spellType == SPELL_INSTANT) {
InstantSpell* instant = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (!instant->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_spells->registerInstantLuaEvent(instant));
} else if (spell->spellType == SPELL_RUNE) {
RuneSpell* rune = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (rune->getMagicLevel() != 0 || rune->getLevel() != 0) {
//Change information in the ItemType to get accurate description
ItemType& iType = Item::items.getItemType(rune->getRuneItemId());
iType.name = rune->getName();
iType.runeMagLevel = rune->getMagicLevel();
iType.runeLevel = rune->getLevel();
iType.charges = rune->getCharges();
}
if (!rune->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_spells->registerRuneLuaEvent(rune));
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellName(lua_State* L)
{
// spell:name(name)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushString(L, spell->getName());
} else {
spell->setName(getString(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellId(lua_State* L)
{
// spell:id(id)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getId());
} else {
spell->setId(getNumber<uint8_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellGroup(lua_State* L)
{
// spell:group(primaryGroup[, secondaryGroup])
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getGroup());
lua_pushnumber(L, spell->getSecondaryGroup());
return 2;
} else if (lua_gettop(L) == 2) {
SpellGroup_t group = getNumber<SpellGroup_t>(L, 2);
if (group) {
spell->setGroup(group);
pushBoolean(L, true);
} else if (isString(L, 2)) {
group = stringToSpellGroup(getString(L, 2));
if (group != SPELLGROUP_NONE) {
spell->setGroup(group);
} else {
std::cout << "[Warning - Spell::group] Unknown group: " << getString(L, 2) << std::endl;
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
std::cout << "[Warning - Spell::group] Unknown group: " << getString(L, 2) << std::endl;
pushBoolean(L, false);
return 1;
}
} else {
SpellGroup_t primaryGroup = getNumber<SpellGroup_t>(L, 2);
SpellGroup_t secondaryGroup = getNumber<SpellGroup_t>(L, 2);
if (primaryGroup && secondaryGroup) {
spell->setGroup(primaryGroup);
spell->setSecondaryGroup(secondaryGroup);
pushBoolean(L, true);
} else if (isString(L, 2) && isString(L, 3)) {
primaryGroup = stringToSpellGroup(getString(L, 2));
if (primaryGroup != SPELLGROUP_NONE) {
spell->setGroup(primaryGroup);
} else {
std::cout << "[Warning - Spell::group] Unknown primaryGroup: " << getString(L, 2) << std::endl;
pushBoolean(L, false);
return 1;
}
secondaryGroup = stringToSpellGroup(getString(L, 3));
if (secondaryGroup != SPELLGROUP_NONE) {
spell->setSecondaryGroup(secondaryGroup);
} else {
std::cout << "[Warning - Spell::group] Unknown secondaryGroup: " << getString(L, 3) << std::endl;
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
std::cout << "[Warning - Spell::group] Unknown primaryGroup: " << getString(L, 2) << " or secondaryGroup: " << getString(L, 3) << std::endl;
pushBoolean(L, false);
return 1;
}
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellCooldown(lua_State* L)
{
// spell:cooldown(cooldown)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getCooldown());
} else {
spell->setCooldown(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellGroupCooldown(lua_State* L)
{
// spell:groupCooldown(primaryGroupCd[, secondaryGroupCd])
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getGroupCooldown());
lua_pushnumber(L, spell->getSecondaryCooldown());
return 2;
} else if (lua_gettop(L) == 2) {
spell->setGroupCooldown(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
spell->setGroupCooldown(getNumber<uint32_t>(L, 2));
spell->setSecondaryCooldown(getNumber<uint32_t>(L, 3));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellLevel(lua_State* L)
{
// spell:level(lvl)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getLevel());
} else {
spell->setLevel(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellMagicLevel(lua_State* L)
{
// spell:magicLevel(lvl)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getMagicLevel());
} else {
spell->setMagicLevel(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellMana(lua_State* L)
{
// spell:mana(mana)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getMana());
} else {
spell->setMana(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellManaPercent(lua_State* L)
{
// spell:manaPercent(percent)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getManaPercent());
} else {
spell->setManaPercent(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellSoul(lua_State* L)
{
// spell:soul(soul)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getSoulCost());
} else {
spell->setSoulCost(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellRange(lua_State* L)
{
// spell:range(range)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getRange());
} else {
spell->setRange(getNumber<int32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellPremium(lua_State* L)
{
// spell:isPremium(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->isPremium());
} else {
spell->setPremium(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellEnabled(lua_State* L)
{
// spell:isEnabled(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->isEnabled());
} else {
spell->setEnabled(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellNeedTarget(lua_State* L)
{
// spell:needTarget(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedTarget());
} else {
spell->setNeedTarget(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellNeedWeapon(lua_State* L)
{
// spell:needWeapon(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedWeapon());
} else {
spell->setNeedWeapon(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellNeedLearn(lua_State* L)
{
// spell:needLearn(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedLearn());
} else {
spell->setNeedLearn(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellSelfTarget(lua_State* L)
{
// spell:isSelfTarget(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getSelfTarget());
} else {
spell->setSelfTarget(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellBlocking(lua_State* L)
{
// spell:isBlocking(blockingSolid, blockingCreature)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getBlockingSolid());
pushBoolean(L, spell->getBlockingCreature());
return 2;
} else {
spell->setBlockingSolid(getBoolean(L, 2));
spell->setBlockingCreature(getBoolean(L, 3));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellAggressive(lua_State* L)
{
// spell:isAggressive(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getAggressive());
} else {
spell->setAggressive(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellPzLock(lua_State* L)
{
// spell:isPzLock(bool)
Spell* spell = getUserdata<Spell>(L, 1);
if (spell) {
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getPzLock());
} else {
spell->setPzLock(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaSpellVocation(lua_State* L)
{
// spell:vocation(vocation)
Spell* spell = getUserdata<Spell>(L, 1);
if (!spell) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_createtable(L, 0, 0);
int i = 0;
for (auto& voc : spell->getVocMap()) {
std::string name = g_vocations.getVocation(voc.first)->getVocName();
pushString(L, name);
lua_rawseti(L, -2, ++i);
}
} else {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
for (int i = 0; i < parameters; ++i) {
std::vector<std::string> vocList = explodeString(getString(L, 2 + i), ";");
spell->addVocMap(g_vocations.getVocationId(vocList[0]), vocList.size() > 1 ? booleanString(vocList[1]) : false);
}
pushBoolean(L, true);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellWords(lua_State* L)
{
// spell:words(words[, separator = ""])
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushString(L, spell->getWords());
pushString(L, spell->getSeparator());
return 2;
} else {
std::string sep = "";
if (lua_gettop(L) == 3) {
sep = getString(L, 3);
}
spell->setWords(getString(L, 2));
spell->setSeparator(sep);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellNeedDirection(lua_State* L)
{
// spell:needDirection(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedDirection());
} else {
spell->setNeedDirection(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellHasParams(lua_State* L)
{
// spell:hasParams(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getHasParam());
} else {
spell->setHasParam(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellHasPlayerNameParam(lua_State* L)
{
// spell:hasPlayerNameParam(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getHasPlayerNameParam());
} else {
spell->setHasPlayerNameParam(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellNeedCasterTargetOrDirection(lua_State* L)
{
// spell:needCasterTargetOrDirection(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getNeedCasterTargetOrDirection());
} else {
spell->setNeedCasterTargetOrDirection(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for InstantSpells
int LuaScriptInterface::luaSpellIsBlockingWalls(lua_State* L)
{
// spell:blockWalls(bool)
InstantSpell* spell = dynamic_cast<InstantSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_INSTANT, it means that this actually is no InstantSpell, so we return nil
if (spell->spellType != SPELL_INSTANT) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getBlockWalls());
} else {
spell->setBlockWalls(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellRuneLevel(lua_State* L)
{
// spell:runeLevel(level)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
int32_t level = getNumber<int32_t>(L, 2);
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getLevel());
} else {
spell->setLevel(level);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellRuneMagicLevel(lua_State* L)
{
// spell:runeMagicLevel(magLevel)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
int32_t magLevel = getNumber<int32_t>(L, 2);
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getMagicLevel());
} else {
spell->setMagicLevel(magLevel);
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellRuneId(lua_State* L)
{
// spell:runeId(id)
RuneSpell* rune = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (rune) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (rune->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, rune->getRuneItemId());
} else {
rune->setRuneItemId(getNumber<uint16_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellCharges(lua_State* L)
{
// spell:charges(charges)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
lua_pushnumber(L, spell->getCharges());
} else {
spell->setCharges(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellAllowFarUse(lua_State* L)
{
// spell:allowFarUse(bool)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getAllowFarUse());
} else {
spell->setAllowFarUse(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellBlockWalls(lua_State* L)
{
// spell:blockWalls(bool)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getCheckLineOfSight());
} else {
spell->setCheckLineOfSight(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
// only for RuneSpells
int LuaScriptInterface::luaSpellCheckFloor(lua_State* L)
{
// spell:checkFloor(bool)
RuneSpell* spell = dynamic_cast<RuneSpell*>(getUserdata<Spell>(L, 1));
if (spell) {
// if spell != SPELL_RUNE, it means that this actually is no RuneSpell, so we return nil
if (spell->spellType != SPELL_RUNE) {
lua_pushnil(L);
return 1;
}
if (lua_gettop(L) == 1) {
pushBoolean(L, spell->getCheckFloor());
} else {
spell->setCheckFloor(getBoolean(L, 2));
pushBoolean(L, true);
}
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateAction(lua_State* L)
{
// Action()
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "Actions can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
Action* action = new Action(getScriptEnv()->getScriptInterface());
if (action) {
action->fromLua = true;
pushUserdata<Action>(L, action);
setMetatable(L, -1, "Action");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionOnUse(lua_State* L)
{
// action:onUse(callback)
Action* action = getUserdata<Action>(L, 1);
if (action) {
if (!action->loadCallback()) {
pushBoolean(L, false);
return 1;
}
action->scripted = true;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionRegister(lua_State* L)
{
// action:register()
Action* action = getUserdata<Action>(L, 1);
if (action) {
if (!action->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_actions->registerLuaEvent(action));
action->clearActionIdRange();
action->clearItemIdRange();
action->clearUniqueIdRange();
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionItemId(lua_State* L)
{
// action:id(ids)
Action* action = getUserdata<Action>(L, 1);
if (action) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
action->addItemId(getNumber<uint32_t>(L, 2 + i));
}
} else {
action->addItemId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionActionId(lua_State* L)
{
// action:aid(aids)
Action* action = getUserdata<Action>(L, 1);
if (action) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
action->addActionId(getNumber<uint32_t>(L, 2 + i));
}
} else {
action->addActionId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionUniqueId(lua_State* L)
{
// action:uid(uids)
Action* action = getUserdata<Action>(L, 1);
if (action) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
action->addUniqueId(getNumber<uint32_t>(L, 2 + i));
}
} else {
action->addUniqueId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionAllowFarUse(lua_State* L)
{
// action:allowFarUse(bool)
Action* action = getUserdata<Action>(L, 1);
if (action) {
action->setAllowFarUse(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionBlockWalls(lua_State* L)
{
// action:blockWalls(bool)
Action* action = getUserdata<Action>(L, 1);
if (action) {
action->setCheckLineOfSight(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaActionCheckFloor(lua_State* L)
{
// action:checkFloor(bool)
Action* action = getUserdata<Action>(L, 1);
if (action) {
action->setCheckFloor(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateTalkaction(lua_State* L)
{
// TalkAction(words)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "TalkActions can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
TalkAction* talk = new TalkAction(getScriptEnv()->getScriptInterface());
if (talk) {
for (int i = 2; i <= lua_gettop(L); i++) {
talk->setWords(getString(L, i));
}
talk->fromLua = true;
pushUserdata<TalkAction>(L, talk);
setMetatable(L, -1, "TalkAction");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionOnSay(lua_State* L)
{
// talkAction:onSay(callback)
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
if (!talk->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionRegister(lua_State* L)
{
// talkAction:register()
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
if (!talk->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_talkActions->registerLuaEvent(talk));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionSeparator(lua_State* L)
{
// talkAction:separator(sep)
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
talk->setSeparator(getString(L, 2).c_str());
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionAccess(lua_State* L)
{
// talkAction:access(needAccess = false)
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
talk->setNeedAccess(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaTalkactionAccountType(lua_State* L)
{
// talkAction:accountType(AccountType_t = ACCOUNT_TYPE_NORMAL)
TalkAction* talk = getUserdata<TalkAction>(L, 1);
if (talk) {
talk->setRequiredAccountType(getNumber<AccountType_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateCreatureEvent(lua_State* L)
{
// CreatureEvent(eventName)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "CreatureEvents can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
CreatureEvent* creature = new CreatureEvent(getScriptEnv()->getScriptInterface());
if (creature) {
creature->setName(getString(L, 2));
creature->fromLua = true;
pushUserdata<CreatureEvent>(L, creature);
setMetatable(L, -1, "CreatureEvent");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureEventType(lua_State* L)
{
// creatureevent:type(callback)
CreatureEvent* creature = getUserdata<CreatureEvent>(L, 1);
if (creature) {
std::string typeName = getString(L, 2);
std::string tmpStr = asLowerCaseString(typeName);
if (tmpStr == "login") {
creature->setEventType(CREATURE_EVENT_LOGIN);
} else if (tmpStr == "logout") {
creature->setEventType(CREATURE_EVENT_LOGOUT);
} else if (tmpStr == "think") {
creature->setEventType(CREATURE_EVENT_THINK);
} else if (tmpStr == "preparedeath") {
creature->setEventType(CREATURE_EVENT_PREPAREDEATH);
} else if (tmpStr == "death") {
creature->setEventType(CREATURE_EVENT_DEATH);
} else if (tmpStr == "kill") {
creature->setEventType(CREATURE_EVENT_KILL);
} else if (tmpStr == "advance") {
creature->setEventType(CREATURE_EVENT_ADVANCE);
} else if (tmpStr == "modalwindow") {
creature->setEventType(CREATURE_EVENT_MODALWINDOW);
} else if (tmpStr == "textedit") {
creature->setEventType(CREATURE_EVENT_TEXTEDIT);
} else if (tmpStr == "healthchange") {
creature->setEventType(CREATURE_EVENT_HEALTHCHANGE);
} else if (tmpStr == "manachange") {
creature->setEventType(CREATURE_EVENT_MANACHANGE);
} else if (tmpStr == "extendedopcode") {
creature->setEventType(CREATURE_EVENT_EXTENDED_OPCODE);
} else {
std::cout << "[Error - CreatureEvent::configureLuaEvent] Invalid type for creature event: " << typeName << std::endl;
pushBoolean(L, false);
}
creature->setLoaded(true);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureEventRegister(lua_State* L)
{
// creatureevent:register()
CreatureEvent* creature = getUserdata<CreatureEvent>(L, 1);
if (creature) {
if (!creature->isScripted()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_creatureEvents->registerLuaEvent(creature));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreatureEventOnCallback(lua_State* L)
{
// creatureevent:onLogin / logout / etc. (callback)
CreatureEvent* creature = getUserdata<CreatureEvent>(L, 1);
if (creature) {
if (!creature->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateMoveEvent(lua_State* L)
{
// MoveEvent()
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "MoveEvents can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
MoveEvent* moveevent = new MoveEvent(getScriptEnv()->getScriptInterface());
if (moveevent) {
moveevent->fromLua = true;
pushUserdata<MoveEvent>(L, moveevent);
setMetatable(L, -1, "MoveEvent");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventType(lua_State* L)
{
// moveevent:type(callback)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
std::string typeName = getString(L, 2);
std::string tmpStr = asLowerCaseString(typeName);
if (tmpStr == "stepin") {
moveevent->setEventType(MOVE_EVENT_STEP_IN);
moveevent->stepFunction = moveevent->StepInField;
} else if (tmpStr == "stepout") {
moveevent->setEventType(MOVE_EVENT_STEP_OUT);
moveevent->stepFunction = moveevent->StepOutField;
} else if (tmpStr == "equip") {
moveevent->setEventType(MOVE_EVENT_EQUIP);
moveevent->equipFunction = moveevent->EquipItem;
} else if (tmpStr == "deequip") {
moveevent->setEventType(MOVE_EVENT_DEEQUIP);
moveevent->equipFunction = moveevent->DeEquipItem;
} else if (tmpStr == "additem") {
moveevent->setEventType(MOVE_EVENT_ADD_ITEM);
moveevent->moveFunction = moveevent->AddItemField;
} else if (tmpStr == "removeitem") {
moveevent->setEventType(MOVE_EVENT_REMOVE_ITEM);
moveevent->moveFunction = moveevent->RemoveItemField;
} else {
std::cout << "Error: [MoveEvent::configureMoveEvent] No valid event name " << typeName << std::endl;
pushBoolean(L, false);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventRegister(lua_State* L)
{
// moveevent:register()
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
if ((moveevent->getEventType() == MOVE_EVENT_EQUIP || moveevent->getEventType() == MOVE_EVENT_DEEQUIP) && moveevent->getSlot() == SLOTP_WHEREEVER) {
uint32_t id = moveevent->getItemIdRange().at(0);
ItemType& it = Item::items.getItemType(id);
moveevent->setSlot(it.slotPosition);
}
if (!moveevent->isScripted()) {
pushBoolean(L, g_moveEvents->registerLuaFunction(moveevent));
return 1;
}
pushBoolean(L, g_moveEvents->registerLuaEvent(moveevent));
moveevent->clearItemIdRange();
moveevent->clearActionIdRange();
moveevent->clearUniqueIdRange();
moveevent->clearPosList();
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventOnCallback(lua_State* L)
{
// moveevent:onEquip / deEquip / etc. (callback)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
if (!moveevent->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventSlot(lua_State* L)
{
// moveevent:slot(slot)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (!moveevent) {
lua_pushnil(L);
return 1;
}
if (moveevent->getEventType() == MOVE_EVENT_EQUIP || moveevent->getEventType() == MOVE_EVENT_DEEQUIP) {
std::string slotName = asLowerCaseString(getString(L, 2));
if (slotName == "head") {
moveevent->setSlot(SLOTP_HEAD);
} else if (slotName == "necklace") {
moveevent->setSlot(SLOTP_NECKLACE);
} else if (slotName == "backpack") {
moveevent->setSlot(SLOTP_BACKPACK);
} else if (slotName == "armor" || slotName == "body") {
moveevent->setSlot(SLOTP_ARMOR);
} else if (slotName == "right-hand") {
moveevent->setSlot(SLOTP_RIGHT);
} else if (slotName == "left-hand") {
moveevent->setSlot(SLOTP_LEFT);
} else if (slotName == "hand" || slotName == "shield") {
moveevent->setSlot(SLOTP_RIGHT | SLOTP_LEFT);
} else if (slotName == "legs") {
moveevent->setSlot(SLOTP_LEGS);
} else if (slotName == "feet") {
moveevent->setSlot(SLOTP_FEET);
} else if (slotName == "ring") {
moveevent->setSlot(SLOTP_RING);
} else if (slotName == "ammo") {
moveevent->setSlot(SLOTP_AMMO);
} else {
std::cout << "[Warning - MoveEvent::configureMoveEvent] Unknown slot type: " << slotName << std::endl;
pushBoolean(L, false);
return 1;
}
}
pushBoolean(L, true);
return 1;
}
int LuaScriptInterface::luaMoveEventLevel(lua_State* L)
{
// moveevent:level(lvl)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->setRequiredLevel(getNumber<uint32_t>(L, 2));
moveevent->setWieldInfo(WIELDINFO_LEVEL);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventMagLevel(lua_State* L)
{
// moveevent:magicLevel(lvl)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->setRequiredMagLevel(getNumber<uint32_t>(L, 2));
moveevent->setWieldInfo(WIELDINFO_MAGLV);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventPremium(lua_State* L)
{
// moveevent:premium(bool)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->setNeedPremium(getBoolean(L, 2));
moveevent->setWieldInfo(WIELDINFO_PREMIUM);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventVocation(lua_State* L)
{
// moveevent:vocation(vocName[, showInDescription = false, lastVoc = false])
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->addVocEquipMap(getString(L, 2));
moveevent->setWieldInfo(WIELDINFO_VOCREQ);
std::string tmp;
bool showInDescription = false;
bool lastVoc = false;
if (getBoolean(L, 3)) {
showInDescription = getBoolean(L, 3);
}
if (getBoolean(L, 4)) {
lastVoc = getBoolean(L, 4);
}
if (showInDescription) {
if (moveevent->getVocationString().empty()) {
tmp = asLowerCaseString(getString(L, 2));
tmp += "s";
moveevent->setVocationString(tmp);
} else {
tmp = moveevent->getVocationString();
if (lastVoc) {
tmp += " and ";
} else {
tmp += ", ";
}
tmp += asLowerCaseString(getString(L, 2));
tmp += "s";
moveevent->setVocationString(tmp);
}
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventTileItem(lua_State* L)
{
// moveevent:tileItem(bool)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
moveevent->setTileItem(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventItemId(lua_State* L)
{
// moveevent:id(ids)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
moveevent->addItemId(getNumber<uint32_t>(L, 2 + i));
}
} else {
moveevent->addItemId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventActionId(lua_State* L)
{
// moveevent:aid(ids)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
moveevent->addActionId(getNumber<uint32_t>(L, 2 + i));
}
} else {
moveevent->addActionId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventUniqueId(lua_State* L)
{
// moveevent:uid(ids)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
moveevent->addUniqueId(getNumber<uint32_t>(L, 2 + i));
}
} else {
moveevent->addUniqueId(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaMoveEventPosition(lua_State* L)
{
// moveevent:position(positions)
MoveEvent* moveevent = getUserdata<MoveEvent>(L, 1);
if (moveevent) {
int parameters = lua_gettop(L) - 1; // - 1 because self is a parameter aswell, which we want to skip ofc
if (parameters > 1) {
for (int i = 0; i < parameters; ++i) {
moveevent->addPosList(getPosition(L, 2 + i));
}
} else {
moveevent->addPosList(getPosition(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaCreateGlobalEvent(lua_State* L)
{
// GlobalEvent(eventName)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "GlobalEvents can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
GlobalEvent* global = new GlobalEvent(getScriptEnv()->getScriptInterface());
if (global) {
global->setName(getString(L, 2));
global->setEventType(GLOBALEVENT_NONE);
global->fromLua = true;
pushUserdata<GlobalEvent>(L, global);
setMetatable(L, -1, "GlobalEvent");
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventType(lua_State* L)
{
// globalevent:type(callback)
GlobalEvent* global = getUserdata<GlobalEvent>(L, 1);
if (global) {
std::string typeName = getString(L, 2);
std::string tmpStr = asLowerCaseString(typeName);
if (tmpStr == "startup") {
global->setEventType(GLOBALEVENT_STARTUP);
} else if (tmpStr == "shutdown") {
global->setEventType(GLOBALEVENT_SHUTDOWN);
} else if (tmpStr == "record") {
global->setEventType(GLOBALEVENT_RECORD);
} else {
std::cout << "[Error - CreatureEvent::configureLuaEvent] Invalid type for global event: " << typeName << std::endl;
pushBoolean(L, false);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventRegister(lua_State* L)
{
// globalevent:register()
GlobalEvent* globalevent = getUserdata<GlobalEvent>(L, 1);
if (globalevent) {
if (!globalevent->isScripted()) {
pushBoolean(L, false);
return 1;
}
if (globalevent->getEventType() == GLOBALEVENT_NONE && globalevent->getInterval() == 0) {
std::cout << "[Error - LuaScriptInterface::luaGlobalEventRegister] No interval for globalevent with name " << globalevent->getName() << std::endl;
pushBoolean(L, false);
return 1;
}
pushBoolean(L, g_globalEvents->registerLuaEvent(globalevent));
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventOnCallback(lua_State* L)
{
// globalevent:onThink / record / etc. (callback)
GlobalEvent* globalevent = getUserdata<GlobalEvent>(L, 1);
if (globalevent) {
if (!globalevent->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventTime(lua_State* L)
{
// globalevent:time(time)
GlobalEvent* globalevent = getUserdata<GlobalEvent>(L, 1);
if (globalevent) {
std::string timer = getString(L, 2);
std::vector<int32_t> params = vectorAtoi(explodeString(timer, ":"));
int32_t hour = params.front();
if (hour < 0 || hour > 23) {
std::cout << "[Error - GlobalEvent::configureEvent] Invalid hour \"" << timer << "\" for globalevent with name: " << globalevent->getName() << std::endl;
pushBoolean(L, false);
return 1;
}
globalevent->setInterval(hour << 16);
int32_t min = 0;
int32_t sec = 0;
if (params.size() > 1) {
min = params[1];
if (min < 0 || min > 59) {
std::cout << "[Error - GlobalEvent::configureEvent] Invalid minute \"" << timer << "\" for globalevent with name: " << globalevent->getName() << std::endl;
pushBoolean(L, false);
return 1;
}
if (params.size() > 2) {
sec = params[2];
if (sec < 0 || sec > 59) {
std::cout << "[Error - GlobalEvent::configureEvent] Invalid second \"" << timer << "\" for globalevent with name: " << globalevent->getName() << std::endl;
pushBoolean(L, false);
return 1;
}
}
}
time_t current_time = time(nullptr);
tm* timeinfo = localtime(¤t_time);
timeinfo->tm_hour = hour;
timeinfo->tm_min = min;
timeinfo->tm_sec = sec;
time_t difference = static_cast<time_t>(difftime(mktime(timeinfo), current_time));
if (difference < 0) {
difference += 86400;
}
globalevent->setNextExecution(current_time + difference);
globalevent->setEventType(GLOBALEVENT_TIMER);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaGlobalEventInterval(lua_State* L)
{
// globalevent:interval(interval)
GlobalEvent* globalevent = getUserdata<GlobalEvent>(L, 1);
if (globalevent) {
globalevent->setInterval(getNumber<uint32_t>(L, 2));
globalevent->setNextExecution(OTSYS_TIME() + getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
// Weapon
int LuaScriptInterface::luaCreateWeapon(lua_State* L)
{
// Weapon(type)
if (getScriptEnv()->getScriptInterface() != &g_scripts->getScriptInterface()) {
reportErrorFunc(L, "Weapons can only be registered in the Scripts interface.");
lua_pushnil(L);
return 1;
}
WeaponType_t type = getNumber<WeaponType_t>(L, 2);
switch (type) {
case WEAPON_SWORD:
case WEAPON_AXE:
case WEAPON_CLUB: {
WeaponMelee* weapon = new WeaponMelee(getScriptEnv()->getScriptInterface());
if (weapon) {
pushUserdata<WeaponMelee>(L, weapon);
setMetatable(L, -1, "Weapon");
weapon->weaponType = type;
weapon->fromLua = true;
} else {
lua_pushnil(L);
}
break;
}
case WEAPON_DISTANCE:
case WEAPON_AMMO: {
WeaponDistance* weapon = new WeaponDistance(getScriptEnv()->getScriptInterface());
if (weapon) {
pushUserdata<WeaponDistance>(L, weapon);
setMetatable(L, -1, "Weapon");
weapon->weaponType = type;
weapon->fromLua = true;
} else {
lua_pushnil(L);
}
break;
}
case WEAPON_WAND: {
WeaponWand* weapon = new WeaponWand(getScriptEnv()->getScriptInterface());
if (weapon) {
pushUserdata<WeaponWand>(L, weapon);
setMetatable(L, -1, "Weapon");
weapon->weaponType = type;
weapon->fromLua = true;
} else {
lua_pushnil(L);
}
break;
}
default: {
lua_pushnil(L);
break;
}
}
return 1;
}
int LuaScriptInterface::luaWeaponAction(lua_State* L)
{
// weapon:action(callback)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
std::string typeName = getString(L, 2);
std::string tmpStr = asLowerCaseString(typeName);
if (tmpStr == "removecount") {
weapon->action = WEAPONACTION_REMOVECOUNT;
} else if (tmpStr == "removecharge") {
weapon->action = WEAPONACTION_REMOVECHARGE;
} else if (tmpStr == "move") {
weapon->action = WEAPONACTION_MOVE;
} else {
std::cout << "Error: [Weapon::action] No valid action " << typeName << std::endl;
pushBoolean(L, false);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponRegister(lua_State* L)
{
// weapon:register()
Weapon** weaponPtr = getRawUserdata<Weapon>(L, 1);
if (!weaponPtr) {
lua_pushnil(L);
return 1;
}
if (auto* weapon = *weaponPtr) {
if (weapon->weaponType == WEAPON_DISTANCE || weapon->weaponType == WEAPON_AMMO) {
weapon = getUserdata<WeaponDistance>(L, 1);
} else if (weapon->weaponType == WEAPON_WAND) {
weapon = getUserdata<WeaponWand>(L, 1);
} else {
weapon = getUserdata<WeaponMelee>(L, 1);
}
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.weaponType = weapon->weaponType;
if (weapon->getWieldInfo() != 0) {
it.wieldInfo = weapon->getWieldInfo();
it.vocationString = weapon->getVocationString();
it.minReqLevel = weapon->getReqLevel();
it.minReqMagicLevel = weapon->getReqMagLv();
}
weapon->configureWeapon(it);
pushBoolean(L, g_weapons->registerLuaEvent(weapon));
*weaponPtr = nullptr; // Remove luascript reference
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponOnUseWeapon(lua_State* L)
{
// weapon:onUseWeapon(callback)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
if (!weapon->loadCallback()) {
pushBoolean(L, false);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponUnproperly(lua_State* L)
{
// weapon:wieldedUnproperly(bool)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setWieldUnproperly(getBoolean(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponLevel(lua_State* L)
{
// weapon:level(lvl)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setRequiredLevel(getNumber<uint32_t>(L, 2));
weapon->setWieldInfo(WIELDINFO_LEVEL);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponMagicLevel(lua_State* L)
{
// weapon:magicLevel(lvl)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setRequiredMagLevel(getNumber<uint32_t>(L, 2));
weapon->setWieldInfo(WIELDINFO_MAGLV);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponMana(lua_State* L)
{
// weapon:mana(mana)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setMana(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponManaPercent(lua_State* L)
{
// weapon:manaPercent(percent)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setManaPercent(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponHealth(lua_State* L)
{
// weapon:health(health)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setHealth(getNumber<int32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponHealthPercent(lua_State* L)
{
// weapon:healthPercent(percent)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setHealthPercent(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponSoul(lua_State* L)
{
// weapon:soul(soul)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setSoul(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponBreakChance(lua_State* L)
{
// weapon:breakChance(percent)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setBreakChance(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponWandDamage(lua_State* L)
{
// weapon:damage(damage[min, max]) only use this if the weapon is a wand!
WeaponWand* weapon = getUserdata<WeaponWand>(L, 1);
if (weapon) {
weapon->setMinChange(getNumber<uint32_t>(L, 2));
if (lua_gettop(L) > 2) {
weapon->setMaxChange(getNumber<uint32_t>(L, 3));
} else {
weapon->setMaxChange(getNumber<uint32_t>(L, 2));
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponElement(lua_State* L)
{
// weapon:element(combatType)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
if (!getNumber<CombatType_t>(L, 2)) {
std::string element = getString(L, 2);
std::string tmpStrValue = asLowerCaseString(element);
if (tmpStrValue == "earth") {
weapon->params.combatType = COMBAT_EARTHDAMAGE;
} else if (tmpStrValue == "ice") {
weapon->params.combatType = COMBAT_ICEDAMAGE;
} else if (tmpStrValue == "energy") {
weapon->params.combatType = COMBAT_ENERGYDAMAGE;
} else if (tmpStrValue == "fire") {
weapon->params.combatType = COMBAT_FIREDAMAGE;
} else if (tmpStrValue == "death") {
weapon->params.combatType = COMBAT_DEATHDAMAGE;
} else if (tmpStrValue == "holy") {
weapon->params.combatType = COMBAT_HOLYDAMAGE;
} else {
std::cout << "[Warning - weapon:element] Type \"" << element << "\" does not exist." << std::endl;
}
} else {
weapon->params.combatType = getNumber<CombatType_t>(L, 2);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponPremium(lua_State* L)
{
// weapon:premium(bool)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setNeedPremium(getBoolean(L, 2));
weapon->setWieldInfo(WIELDINFO_PREMIUM);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponVocation(lua_State* L)
{
// weapon:vocation(vocName[, showInDescription = false, lastVoc = false])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->addVocWeaponMap(getString(L, 2));
weapon->setWieldInfo(WIELDINFO_VOCREQ);
std::string tmp;
bool showInDescription = getBoolean(L, 3, false);
bool lastVoc = getBoolean(L, 4, false);
if (showInDescription) {
if (weapon->getVocationString().empty()) {
tmp = asLowerCaseString(getString(L, 2));
tmp += "s";
weapon->setVocationString(tmp);
} else {
tmp = weapon->getVocationString();
if (lastVoc) {
tmp += " and ";
} else {
tmp += ", ";
}
tmp += asLowerCaseString(getString(L, 2));
tmp += "s";
weapon->setVocationString(tmp);
}
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponId(lua_State* L)
{
// weapon:id(id)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
weapon->setID(getNumber<uint32_t>(L, 2));
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponAttack(lua_State* L)
{
// weapon:attack(atk)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.attack = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponDefense(lua_State* L)
{
// weapon:defense(defense[, extraDefense])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.defense = getNumber<int32_t>(L, 2);
if (lua_gettop(L) > 2) {
it.extraDefense = getNumber<int32_t>(L, 3);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponRange(lua_State* L)
{
// weapon:range(range)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.shootRange = getNumber<uint8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponCharges(lua_State* L)
{
// weapon:charges(charges[, showCharges = true])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
bool showCharges = getBoolean(L, 3, true);
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.charges = getNumber<uint8_t>(L, 2);
it.showCharges = showCharges;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponDuration(lua_State* L)
{
// weapon:duration(duration[, showDuration = true])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
bool showDuration = getBoolean(L, 3, true);
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.decayTime = getNumber<uint32_t>(L, 2);
it.showDuration = showDuration;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponDecayTo(lua_State* L)
{
// weapon:decayTo([itemid = 0])
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t itemid = getNumber<uint16_t>(L, 2, 0);
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.decayTo = itemid;
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponTransformEquipTo(lua_State* L)
{
// weapon:transformEquipTo(itemid)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.transformEquipTo = getNumber<uint16_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponTransformDeEquipTo(lua_State* L)
{
// weapon:transformDeEquipTo(itemid)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.transformDeEquipTo = getNumber<uint16_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponShootType(lua_State* L)
{
// weapon:shootType(type)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.shootType = getNumber<ShootType_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponSlotType(lua_State* L)
{
// weapon:slotType(slot)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
std::string slot = getString(L, 2);
if (slot == "two-handed") {
it.slotPosition |= SLOTP_TWO_HAND;
} else {
it.slotPosition |= SLOTP_HAND;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponAmmoType(lua_State* L)
{
// weapon:ammoType(type)
WeaponDistance* weapon = getUserdata<WeaponDistance>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
std::string type = getString(L, 2);
if (type == "arrow") {
it.ammoType = AMMO_ARROW;
} else if (type == "bolt"){
it.ammoType = AMMO_BOLT;
} else {
std::cout << "[Warning - weapon:ammoType] Type \"" << type << "\" does not exist." << std::endl;
lua_pushnil(L);
return 1;
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponHitChance(lua_State* L)
{
// weapon:hitChance(chance)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.hitChance = getNumber<int8_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponMaxHitChance(lua_State* L)
{
// weapon:maxHitChance(max)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.maxHitChance = getNumber<int32_t>(L, 2);
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
int LuaScriptInterface::luaWeaponExtraElement(lua_State* L)
{
// weapon:extraElement(atk, combatType)
Weapon* weapon = getUserdata<Weapon>(L, 1);
if (weapon) {
uint16_t id = weapon->getID();
ItemType& it = Item::items.getItemType(id);
it.abilities.get()->elementDamage = getNumber<uint16_t>(L, 2);
if (!getNumber<CombatType_t>(L, 3)) {
std::string element = getString(L, 3);
std::string tmpStrValue = asLowerCaseString(element);
if (tmpStrValue == "earth") {
it.abilities.get()->elementType = COMBAT_EARTHDAMAGE;
} else if (tmpStrValue == "ice") {
it.abilities.get()->elementType = COMBAT_ICEDAMAGE;
} else if (tmpStrValue == "energy") {
it.abilities.get()->elementType = COMBAT_ENERGYDAMAGE;
} else if (tmpStrValue == "fire") {
it.abilities.get()->elementType = COMBAT_FIREDAMAGE;
} else if (tmpStrValue == "death") {
it.abilities.get()->elementType = COMBAT_DEATHDAMAGE;
} else if (tmpStrValue == "holy") {
it.abilities.get()->elementType = COMBAT_HOLYDAMAGE;
} else {
std::cout << "[Warning - weapon:extraElement] Type \"" << element << "\" does not exist." << std::endl;
}
} else {
it.abilities.get()->elementType = getNumber<CombatType_t>(L, 3);
}
pushBoolean(L, true);
} else {
lua_pushnil(L);
}
return 1;
}
//
LuaEnvironment::LuaEnvironment() : LuaScriptInterface("Main Interface") {}
LuaEnvironment::~LuaEnvironment()
{
delete testInterface;
closeState();
}
bool LuaEnvironment::initState()
{
luaState = luaL_newstate();
if (!luaState) {
return false;
}
luaL_openlibs(luaState);
registerFunctions();
runningEventId = EVENT_ID_USER;
return true;
}
bool LuaEnvironment::reInitState()
{
// TODO: get children, reload children
closeState();
return initState();
}
bool LuaEnvironment::closeState()
{
if (!luaState) {
return false;
}
for (const auto& combatEntry : combatIdMap) {
clearCombatObjects(combatEntry.first);
}
for (const auto& areaEntry : areaIdMap) {
clearAreaObjects(areaEntry.first);
}
for (auto& timerEntry : timerEvents) {
LuaTimerEventDesc timerEventDesc = std::move(timerEntry.second);
for (int32_t parameter : timerEventDesc.parameters) {
luaL_unref(luaState, LUA_REGISTRYINDEX, parameter);
}
luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
}
combatIdMap.clear();
areaIdMap.clear();
timerEvents.clear();
cacheFiles.clear();
lua_close(luaState);
luaState = nullptr;
return true;
}
LuaScriptInterface* LuaEnvironment::getTestInterface()
{
if (!testInterface) {
testInterface = new LuaScriptInterface("Test Interface");
testInterface->initState();
}
return testInterface;
}
Combat_ptr LuaEnvironment::getCombatObject(uint32_t id) const
{
auto it = combatMap.find(id);
if (it == combatMap.end()) {
return nullptr;
}
return it->second;
}
Combat_ptr LuaEnvironment::createCombatObject(LuaScriptInterface* interface)
{
Combat_ptr combat = std::make_shared<Combat>();
combatMap[++lastCombatId] = combat;
combatIdMap[interface].push_back(lastCombatId);
return combat;
}
void LuaEnvironment::clearCombatObjects(LuaScriptInterface* interface)
{
auto it = combatIdMap.find(interface);
if (it == combatIdMap.end()) {
return;
}
for (uint32_t id : it->second) {
auto itt = combatMap.find(id);
if (itt != combatMap.end()) {
combatMap.erase(itt);
}
}
it->second.clear();
}
AreaCombat* LuaEnvironment::getAreaObject(uint32_t id) const
{
auto it = areaMap.find(id);
if (it == areaMap.end()) {
return nullptr;
}
return it->second;
}
uint32_t LuaEnvironment::createAreaObject(LuaScriptInterface* interface)
{
areaMap[++lastAreaId] = new AreaCombat;
areaIdMap[interface].push_back(lastAreaId);
return lastAreaId;
}
void LuaEnvironment::clearAreaObjects(LuaScriptInterface* interface)
{
auto it = areaIdMap.find(interface);
if (it == areaIdMap.end()) {
return;
}
for (uint32_t id : it->second) {
auto itt = areaMap.find(id);
if (itt != areaMap.end()) {
delete itt->second;
areaMap.erase(itt);
}
}
it->second.clear();
}
void LuaEnvironment::executeTimerEvent(uint32_t eventIndex)
{
auto it = timerEvents.find(eventIndex);
if (it == timerEvents.end()) {
return;
}
LuaTimerEventDesc timerEventDesc = std::move(it->second);
timerEvents.erase(it);
//push function
lua_rawgeti(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
//push parameters
for (auto parameter : boost::adaptors::reverse(timerEventDesc.parameters)) {
lua_rawgeti(luaState, LUA_REGISTRYINDEX, parameter);
}
//call the function
if (reserveScriptEnv()) {
ScriptEnvironment* env = getScriptEnv();
env->setTimerEvent();
env->setScriptId(timerEventDesc.scriptId, this);
callFunction(timerEventDesc.parameters.size());
} else {
std::cout << "[Error - LuaScriptInterface::executeTimerEvent] Call stack overflow" << std::endl;
}
//free resources
luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function);
for (auto parameter : timerEventDesc.parameters) {
luaL_unref(luaState, LUA_REGISTRYINDEX, parameter);
}
}
| 1 | 19,519 | it pushes a boolean, so it has to be explicitly casted to boolean with != nullptr, same with all other similar stuff | otland-forgottenserver | cpp |
@@ -237,6 +237,7 @@ Publisher* ParticipantImpl::createPublisher(
delete(pubimpl);
return nullptr;
}
+ pubimpl->m_history.rebuild_instances();
pubimpl->mp_writer = writer;
//SAVE THE PUBLISHER PAIR
t_p_PublisherPair pubpair; | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file ParticipantImpl.cpp
*
*/
#include <fastrtps_deprecated/participant/ParticipantImpl.h>
#include <fastrtps/participant/Participant.h>
#include <fastdds/rtps/participant/ParticipantDiscoveryInfo.h>
#include <fastdds/rtps/reader/ReaderDiscoveryInfo.h>
#include <fastdds/rtps/writer/WriterDiscoveryInfo.h>
#include <fastrtps/participant/ParticipantListener.h>
#include <fastdds/dds/topic/TopicDataType.hpp>
#include <fastdds/rtps/participant/RTPSParticipant.h>
#include <fastrtps/attributes/PublisherAttributes.h>
#include <fastrtps_deprecated/publisher/PublisherImpl.h>
#include <fastrtps/publisher/Publisher.h>
#include <fastrtps/attributes/SubscriberAttributes.h>
#include <fastrtps_deprecated/subscriber/SubscriberImpl.h>
#include <fastrtps/subscriber/Subscriber.h>
#include <fastdds/rtps/RTPSDomain.h>
#include <fastdds/rtps/transport/UDPv4Transport.h>
#include <fastdds/rtps/transport/UDPv6Transport.h>
#include <fastdds/rtps/transport/test_UDPv4Transport.h>
#include <fastdds/rtps/builtin/liveliness/WLP.h>
#include <fastdds/dds/log/Log.hpp>
using namespace eprosima::fastrtps;
using namespace eprosima::fastrtps::rtps;
using eprosima::fastdds::dds::TopicDataType;
ParticipantImpl::ParticipantImpl(
const ParticipantAttributes& patt,
Participant* pspart,
ParticipantListener* listen)
: m_att(patt)
, mp_rtpsParticipant(nullptr)
, mp_participant(pspart)
, mp_listener(listen)
#pragma warning (disable : 4355 )
, m_rtps_listener(this)
{
mp_participant->mp_impl = this;
}
ParticipantImpl::~ParticipantImpl()
{
while (m_publishers.size() > 0)
{
this->removePublisher(m_publishers.begin()->first);
}
while (m_subscribers.size() > 0)
{
this->removeSubscriber(m_subscribers.begin()->first);
}
if (this->mp_rtpsParticipant != nullptr)
{
RTPSDomain::removeRTPSParticipant(this->mp_rtpsParticipant);
}
delete(mp_participant);
}
bool ParticipantImpl::removePublisher(
Publisher* pub)
{
for (auto pit = this->m_publishers.begin(); pit != m_publishers.end(); ++pit)
{
if (pit->second->getGuid() == pub->getGuid())
{
delete(pit->second);
m_publishers.erase(pit);
return true;
}
}
return false;
}
bool ParticipantImpl::removeSubscriber(
Subscriber* sub)
{
for (auto sit = m_subscribers.begin(); sit != m_subscribers.end(); ++sit)
{
if (sit->second->getGuid() == sub->getGuid())
{
delete(sit->second);
m_subscribers.erase(sit);
return true;
}
}
return false;
}
const GUID_t& ParticipantImpl::getGuid() const
{
return this->mp_rtpsParticipant->getGuid();
}
Publisher* ParticipantImpl::createPublisher(
const PublisherAttributes& att,
PublisherListener* listen)
{
logInfo(PARTICIPANT, "CREATING PUBLISHER IN TOPIC: " << att.topic.getTopicName());
//Look for the correct type registration
TopicDataType* p_type = nullptr;
/// Preconditions
// Check the type was registered.
if (!getRegisteredType(att.topic.getTopicDataType().c_str(), &p_type))
{
logError(PARTICIPANT, "Type : " << att.topic.getTopicDataType() << " Not Registered");
return nullptr;
}
// Check the type supports keys.
if (att.topic.topicKind == WITH_KEY && !p_type->m_isGetKeyDefined)
{
logError(PARTICIPANT, "Keyed Topic needs getKey function");
return nullptr;
}
if (m_att.rtps.builtin.discovery_config.use_STATIC_EndpointDiscoveryProtocol)
{
if (att.getUserDefinedID() <= 0)
{
logError(PARTICIPANT, "Static EDP requires user defined Id");
return nullptr;
}
}
if (!att.unicastLocatorList.isValid())
{
logError(PARTICIPANT, "Unicast Locator List for Publisher contains invalid Locator");
return nullptr;
}
if (!att.multicastLocatorList.isValid())
{
logError(PARTICIPANT, " Multicast Locator List for Publisher contains invalid Locator");
return nullptr;
}
if (!att.remoteLocatorList.isValid())
{
logError(PARTICIPANT, "Remote Locator List for Publisher contains invalid Locator");
return nullptr;
}
if (!att.qos.checkQos() || !att.topic.checkQos())
{
return nullptr;
}
//TODO CONSTRUIR LA IMPLEMENTACION DENTRO DEL OBJETO DEL USUARIO.
PublisherImpl* pubimpl = new PublisherImpl(this, p_type, att, listen);
Publisher* pub = new Publisher(pubimpl);
pubimpl->mp_userPublisher = pub;
pubimpl->mp_rtpsParticipant = this->mp_rtpsParticipant;
WriterAttributes watt;
watt.throughputController = att.throughputController;
watt.endpoint.durabilityKind = att.qos.m_durability.durabilityKind();
watt.endpoint.endpointKind = WRITER;
watt.endpoint.multicastLocatorList = att.multicastLocatorList;
watt.endpoint.reliabilityKind = att.qos.m_reliability.kind == RELIABLE_RELIABILITY_QOS ? RELIABLE : BEST_EFFORT;
watt.endpoint.topicKind = att.topic.topicKind;
watt.endpoint.unicastLocatorList = att.unicastLocatorList;
watt.endpoint.remoteLocatorList = att.remoteLocatorList;
watt.endpoint.persistence_guid = att.persistence_guid;
watt.mode = att.qos.m_publishMode.kind ==
eprosima::fastrtps::SYNCHRONOUS_PUBLISH_MODE ? SYNCHRONOUS_WRITER : ASYNCHRONOUS_WRITER;
watt.endpoint.properties = att.properties;
if (att.getEntityID() > 0)
{
watt.endpoint.setEntityID((uint8_t)att.getEntityID());
}
if (att.getUserDefinedID() > 0)
{
watt.endpoint.setUserDefinedID((uint8_t)att.getUserDefinedID());
}
watt.times = att.times;
watt.liveliness_kind = att.qos.m_liveliness.kind;
watt.liveliness_lease_duration = att.qos.m_liveliness.lease_duration;
watt.liveliness_announcement_period = att.qos.m_liveliness.announcement_period;
watt.matched_readers_allocation = att.matched_subscriber_allocation;
// TODO(Ricardo) Remove in future
// Insert topic_name and partitions
Property property;
property.name("topic_name");
property.value(att.topic.getTopicName().c_str());
watt.endpoint.properties.properties().push_back(std::move(property));
if (att.qos.m_partition.names().size() > 0)
{
property.name("partitions");
std::string partitions;
for (auto partition : att.qos.m_partition.names())
{
partitions += partition + ";";
}
property.value(std::move(partitions));
watt.endpoint.properties.properties().push_back(std::move(property));
}
if (att.qos.m_disablePositiveACKs.enabled &&
att.qos.m_disablePositiveACKs.duration != c_TimeInfinite)
{
watt.disable_positive_acks = true;
watt.keep_duration = att.qos.m_disablePositiveACKs.duration;
}
RTPSWriter* writer = RTPSDomain::createRTPSWriter(
this->mp_rtpsParticipant,
watt,
(WriterHistory*)&pubimpl->m_history,
(WriterListener*)&pubimpl->m_writerListener);
if (writer == nullptr)
{
logError(PARTICIPANT, "Problem creating associated Writer");
delete(pubimpl);
return nullptr;
}
pubimpl->mp_writer = writer;
//SAVE THE PUBLISHER PAIR
t_p_PublisherPair pubpair;
pubpair.first = pub;
pubpair.second = pubimpl;
m_publishers.push_back(pubpair);
//REGISTER THE WRITER
this->mp_rtpsParticipant->registerWriter(writer, att.topic, att.qos);
return pub;
}
std::vector<std::string> ParticipantImpl::getParticipantNames() const
{
return mp_rtpsParticipant->getParticipantNames();
}
Subscriber* ParticipantImpl::createSubscriber(
const SubscriberAttributes& att,
SubscriberListener* listen)
{
logInfo(PARTICIPANT, "CREATING SUBSCRIBER IN TOPIC: " << att.topic.getTopicName())
//Look for the correct type registration
TopicDataType* p_type = nullptr;
if (!getRegisteredType(att.topic.getTopicDataType().c_str(), &p_type))
{
logError(PARTICIPANT, "Type : " << att.topic.getTopicDataType() << " Not Registered");
return nullptr;
}
if (att.topic.topicKind == WITH_KEY && !p_type->m_isGetKeyDefined)
{
logError(PARTICIPANT, "Keyed Topic needs getKey function");
return nullptr;
}
if (m_att.rtps.builtin.discovery_config.use_STATIC_EndpointDiscoveryProtocol)
{
if (att.getUserDefinedID() <= 0)
{
logError(PARTICIPANT, "Static EDP requires user defined Id");
return nullptr;
}
}
if (!att.unicastLocatorList.isValid())
{
logError(PARTICIPANT, "Unicast Locator List for Subscriber contains invalid Locator");
return nullptr;
}
if (!att.multicastLocatorList.isValid())
{
logError(PARTICIPANT, " Multicast Locator List for Subscriber contains invalid Locator");
return nullptr;
}
if (!att.remoteLocatorList.isValid())
{
logError(PARTICIPANT, "Output Locator List for Subscriber contains invalid Locator");
return nullptr;
}
if (!att.qos.checkQos() || !att.topic.checkQos())
{
return nullptr;
}
SubscriberImpl* subimpl = new SubscriberImpl(this, p_type, att, listen);
Subscriber* sub = new Subscriber(subimpl);
subimpl->mp_userSubscriber = sub;
subimpl->mp_rtpsParticipant = this->mp_rtpsParticipant;
ReaderAttributes ratt;
ratt.endpoint.durabilityKind = att.qos.m_durability.durabilityKind();
ratt.endpoint.endpointKind = READER;
ratt.endpoint.multicastLocatorList = att.multicastLocatorList;
ratt.endpoint.reliabilityKind = att.qos.m_reliability.kind == RELIABLE_RELIABILITY_QOS ? RELIABLE : BEST_EFFORT;
ratt.endpoint.topicKind = att.topic.topicKind;
ratt.endpoint.unicastLocatorList = att.unicastLocatorList;
ratt.endpoint.remoteLocatorList = att.remoteLocatorList;
ratt.expectsInlineQos = att.expectsInlineQos;
ratt.endpoint.persistence_guid = att.persistence_guid;
ratt.endpoint.properties = att.properties;
if (att.getEntityID() > 0)
{
ratt.endpoint.setEntityID((uint8_t)att.getEntityID());
}
if (att.getUserDefinedID() > 0)
{
ratt.endpoint.setUserDefinedID((uint8_t)att.getUserDefinedID());
}
ratt.times = att.times;
ratt.matched_writers_allocation = att.matched_publisher_allocation;
ratt.liveliness_kind_ = att.qos.m_liveliness.kind;
ratt.liveliness_lease_duration = att.qos.m_liveliness.lease_duration;
// TODO(Ricardo) Remove in future
// Insert topic_name and partitions
Property property;
property.name("topic_name");
property.value(att.topic.getTopicName().c_str());
ratt.endpoint.properties.properties().push_back(std::move(property));
if (att.qos.m_partition.names().size() > 0)
{
property.name("partitions");
std::string partitions;
for (auto partition : att.qos.m_partition.names())
{
partitions += partition + ";";
}
property.value(std::move(partitions));
ratt.endpoint.properties.properties().push_back(std::move(property));
}
if (att.qos.m_disablePositiveACKs.enabled)
{
ratt.disable_positive_acks = true;
}
RTPSReader* reader = RTPSDomain::createRTPSReader(this->mp_rtpsParticipant,
ratt,
(ReaderHistory*)&subimpl->m_history,
(ReaderListener*)&subimpl->m_readerListener);
if (reader == nullptr)
{
logError(PARTICIPANT, "Problem creating associated Reader");
delete(subimpl);
return nullptr;
}
subimpl->mp_reader = reader;
//SAVE THE PUBLICHER PAIR
t_p_SubscriberPair subpair;
subpair.first = sub;
subpair.second = subimpl;
m_subscribers.push_back(subpair);
//REGISTER THE READER
this->mp_rtpsParticipant->registerReader(reader, att.topic, att.qos);
return sub;
}
bool ParticipantImpl::getRegisteredType(
const char* typeName,
TopicDataType** type)
{
for (std::vector<TopicDataType*>::iterator it = m_types.begin();
it != m_types.end(); ++it)
{
if (strcmp((*it)->getName(), typeName) == 0)
{
*type = *it;
return true;
}
}
return false;
}
bool ParticipantImpl::registerType(
TopicDataType* type)
{
if (type->m_typeSize <= 0)
{
logError(PARTICIPANT, "Registered Type must have maximum byte size > 0");
return false;
}
const char* name = type->getName();
if (strlen(name) <= 0)
{
logError(PARTICIPANT, "Registered Type must have a name");
return false;
}
for (auto ty = m_types.begin(); ty != m_types.end(); ++ty)
{
if (strcmp((*ty)->getName(), type->getName()) == 0)
{
logError(PARTICIPANT, "Type with the same name already exists:" << type->getName());
return false;
}
}
m_types.push_back(type);
logInfo(PARTICIPANT, "Type " << type->getName() << " registered.");
return true;
}
bool ParticipantImpl::unregisterType(
const char* typeName)
{
bool retValue = true;
std::vector<TopicDataType*>::iterator typeit;
for (typeit = m_types.begin(); typeit != m_types.end(); ++typeit)
{
if (strcmp((*typeit)->getName(), typeName) == 0)
{
break;
}
}
if (typeit != m_types.end())
{
bool inUse = false;
for (auto sit = m_subscribers.begin(); !inUse && sit != m_subscribers.end(); ++sit)
{
if (strcmp(sit->second->getType()->getName(), typeName) == 0)
{
inUse = true;
}
}
for (auto pit = m_publishers.begin(); pit != m_publishers.end(); ++pit)
{
if (strcmp(pit->second->getType()->getName(), typeName) == 0)
{
inUse = true;
}
}
if (!inUse)
{
m_types.erase(typeit);
}
else
{
retValue = false;
}
}
return retValue;
}
void ParticipantImpl::MyRTPSParticipantListener::onParticipantDiscovery(
RTPSParticipant*,
rtps::ParticipantDiscoveryInfo&& info)
{
if (this->mp_participantimpl->mp_listener != nullptr)
{
this->mp_participantimpl->mp_listener->onParticipantDiscovery(mp_participantimpl->mp_participant, std::move(
info));
}
}
#if HAVE_SECURITY
void ParticipantImpl::MyRTPSParticipantListener::onParticipantAuthentication(
RTPSParticipant*,
ParticipantAuthenticationInfo&& info)
{
if (this->mp_participantimpl->mp_listener != nullptr)
{
this->mp_participantimpl->mp_listener->onParticipantAuthentication(mp_participantimpl->mp_participant, std::move(
info));
}
}
#endif
void ParticipantImpl::MyRTPSParticipantListener::onReaderDiscovery(
RTPSParticipant*,
rtps::ReaderDiscoveryInfo&& info)
{
if (this->mp_participantimpl->mp_listener != nullptr)
{
this->mp_participantimpl->mp_listener->onSubscriberDiscovery(mp_participantimpl->mp_participant,
std::move(info));
}
}
void ParticipantImpl::MyRTPSParticipantListener::onWriterDiscovery(
RTPSParticipant*,
rtps::WriterDiscoveryInfo&& info)
{
if (this->mp_participantimpl->mp_listener != nullptr)
{
this->mp_participantimpl->mp_listener->onPublisherDiscovery(mp_participantimpl->mp_participant,
std::move(info));
}
}
bool ParticipantImpl::newRemoteEndpointDiscovered(
const GUID_t& partguid,
uint16_t endpointId,
EndpointKind_t kind)
{
if (kind == WRITER)
{
return this->mp_rtpsParticipant->newRemoteWriterDiscovered(partguid, endpointId);
}
else
{
return this->mp_rtpsParticipant->newRemoteReaderDiscovered(partguid, endpointId);
}
}
ResourceEvent& ParticipantImpl::get_resource_event() const
{
return mp_rtpsParticipant->get_resource_event();
}
void ParticipantImpl::assert_liveliness()
{
if (mp_rtpsParticipant->wlp() != nullptr)
{
mp_rtpsParticipant->wlp()->assert_liveliness_manual_by_participant();
}
else
{
logError(PARTICIPANT, "Invalid WLP, cannot assert liveliness of participant");
}
}
const Participant* ParticipantImpl::get_participant() const
{
return mp_participant;
}
| 1 | 19,094 | It would be nice to have an equivalent of this call on `DataWriterImpl::enable` | eProsima-Fast-DDS | cpp |
@@ -1112,6 +1112,7 @@ func (a *PipedAPI) CreateDeploymentChain(ctx context.Context, req *pipedservice.
},
},
},
+ Status: model.ChainBlockStatus_DEPLOYMENT_BLOCK_RUNNING,
StartedAt: time.Now().Unix(),
})
| 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcapi
import (
"bytes"
"context"
"encoding/gob"
"encoding/json"
"errors"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/analysisresultstore"
"github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore"
"github.com/pipe-cd/pipe/pkg/app/api/commandstore"
"github.com/pipe-cd/pipe/pkg/app/api/service/pipedservice"
"github.com/pipe-cd/pipe/pkg/app/api/stagelogstore"
"github.com/pipe-cd/pipe/pkg/cache"
"github.com/pipe-cd/pipe/pkg/cache/memorycache"
"github.com/pipe-cd/pipe/pkg/cache/rediscache"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/filestore"
"github.com/pipe-cd/pipe/pkg/model"
"github.com/pipe-cd/pipe/pkg/redis"
"github.com/pipe-cd/pipe/pkg/rpc/rpcauth"
)
// PipedAPI implements the behaviors for the gRPC definitions of PipedAPI.
type PipedAPI struct {
applicationStore datastore.ApplicationStore
deploymentStore datastore.DeploymentStore
deploymentChainStore datastore.DeploymentChainStore
environmentStore datastore.EnvironmentStore
pipedStore datastore.PipedStore
projectStore datastore.ProjectStore
eventStore datastore.EventStore
stageLogStore stagelogstore.Store
applicationLiveStateStore applicationlivestatestore.Store
analysisResultStore analysisresultstore.Store
commandStore commandstore.Store
commandOutputPutter commandOutputPutter
appPipedCache cache.Cache
deploymentPipedCache cache.Cache
envProjectCache cache.Cache
pipedStatCache cache.Cache
redis redis.Redis
webBaseURL string
logger *zap.Logger
}
// NewPipedAPI creates a new PipedAPI instance.
func NewPipedAPI(ctx context.Context, ds datastore.DataStore, sls stagelogstore.Store, alss applicationlivestatestore.Store, las analysisresultstore.Store, cs commandstore.Store, hc cache.Cache, rd redis.Redis, cop commandOutputPutter, webBaseURL string, logger *zap.Logger) *PipedAPI {
a := &PipedAPI{
applicationStore: datastore.NewApplicationStore(ds),
deploymentStore: datastore.NewDeploymentStore(ds),
deploymentChainStore: datastore.NewDeploymentChainStore(ds),
environmentStore: datastore.NewEnvironmentStore(ds),
pipedStore: datastore.NewPipedStore(ds),
projectStore: datastore.NewProjectStore(ds),
eventStore: datastore.NewEventStore(ds),
stageLogStore: sls,
applicationLiveStateStore: alss,
analysisResultStore: las,
commandStore: cs,
commandOutputPutter: cop,
appPipedCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
deploymentPipedCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
envProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
pipedStatCache: hc,
redis: rd,
webBaseURL: webBaseURL,
logger: logger.Named("piped-api"),
}
return a
}
// Register registers all handling of this service into the specified gRPC server.
func (a *PipedAPI) Register(server *grpc.Server) {
pipedservice.RegisterPipedServiceServer(server, a)
}
// Ping is periodically sent to report its realtime status/stats to control-plane.
// The received stats will be pushed to the metrics collector.
// Note: This rpc is deprecated, use ReportStat instead.
func (a *PipedAPI) Ping(ctx context.Context, req *pipedservice.PingRequest) (*pipedservice.PingResponse, error) {
return &pipedservice.PingResponse{}, nil
// return nil, status.Error(codes.Unimplemented, "")
}
// ReportStat is periodically sent to report its realtime status/stats to control-plane.
// The received stats will be pushed to the metrics collector.
func (a *PipedAPI) ReportStat(ctx context.Context, req *pipedservice.ReportStatRequest) (*pipedservice.ReportStatResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
now := time.Now().Unix()
val, err := json.Marshal(model.PipedStat{PipedId: pipedID, Metrics: req.PipedStats, Timestamp: now})
if err != nil {
a.logger.Error("failed to store the reported piped stat",
zap.String("piped-id", pipedID),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to encode the reported piped stat")
}
if err := a.pipedStatCache.Put(pipedID, val); err != nil {
a.logger.Error("failed to store the reported piped stat",
zap.String("piped-id", pipedID),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to store the reported piped stat")
}
return &pipedservice.ReportStatResponse{}, nil
}
// ReportPipedMeta is sent by piped while starting up to report its metadata
// such as configured cloud providers.
func (a *PipedAPI) ReportPipedMeta(ctx context.Context, req *pipedservice.ReportPipedMetaRequest) (*pipedservice.ReportPipedMetaResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
now := time.Now().Unix()
connStatus := model.Piped_ONLINE
if err = a.pipedStore.UpdatePiped(ctx, pipedID, datastore.PipedMetadataUpdater(req.CloudProviders, req.Repositories, connStatus, req.SecretEncryption, req.Version, now)); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "piped is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to update the piped metadata",
zap.String("piped-id", pipedID),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to update the piped metadata")
}
}
piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger)
if err != nil {
return nil, err
}
return &pipedservice.ReportPipedMetaResponse{
Name: piped.Name,
WebBaseUrl: a.webBaseURL,
}, nil
}
// GetEnvironment finds and returns the environment for the specified ID.
func (a *PipedAPI) GetEnvironment(ctx context.Context, req *pipedservice.GetEnvironmentRequest) (*pipedservice.GetEnvironmentResponse, error) {
projectID, _, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateEnvBelongsToProject(ctx, req.Id, projectID); err != nil {
return nil, err
}
env, err := a.environmentStore.GetEnvironment(ctx, req.Id)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "environment is not found")
}
if err != nil {
a.logger.Error("failed to get environment", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to get environment")
}
return &pipedservice.GetEnvironmentResponse{
Environment: env,
}, nil
}
// ListApplications returns a list of registered applications
// that should be managed by the requested piped.
// Disabled applications should not be included in the response.
// Piped uses this RPC to fetch and sync the application configuration into its local database.
func (a *PipedAPI) ListApplications(ctx context.Context, req *pipedservice.ListApplicationsRequest) (*pipedservice.ListApplicationsResponse, error) {
projectID, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: projectID,
},
{
Field: "PipedId",
Operator: datastore.OperatorEqual,
Value: pipedID,
},
{
Field: "Disabled",
Operator: datastore.OperatorEqual,
Value: false,
},
},
}
// TODO: Support pagination in ListApplications
apps, _, err := a.applicationStore.ListApplications(ctx, opts)
if err != nil {
a.logger.Error("failed to fetch applications", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to fetch applications")
}
return &pipedservice.ListApplicationsResponse{
Applications: apps,
}, nil
}
// ReportApplicationSyncState is used to update the sync status of an application.
func (a *PipedAPI) ReportApplicationSyncState(ctx context.Context, req *pipedservice.ReportApplicationSyncStateRequest) (*pipedservice.ReportApplicationSyncStateResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil {
return nil, err
}
err = a.applicationStore.PutApplicationSyncState(ctx, req.ApplicationId, req.State)
if err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "application is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to update application sync state",
zap.String("application-id", req.ApplicationId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to update the application sync state")
}
}
return &pipedservice.ReportApplicationSyncStateResponse{}, nil
}
// ReportApplicationDeployingStatus is used to report whether the specified application is deploying or not.
func (a *PipedAPI) ReportApplicationDeployingStatus(ctx context.Context, req *pipedservice.ReportApplicationDeployingStatusRequest) (*pipedservice.ReportApplicationDeployingStatusResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil {
return nil, err
}
err = a.applicationStore.UpdateApplication(ctx, req.ApplicationId, func(app *model.Application) error {
app.Deploying = req.Deploying
return nil
})
if err == nil {
return &pipedservice.ReportApplicationDeployingStatusResponse{}, nil
}
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "application is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to update deploying status of application",
zap.String("application-id", req.ApplicationId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to update deploying status of application")
}
}
// ReportApplicationMostRecentDeployment is used to update the basic information about
// the most recent deployment of a specific application.
func (a *PipedAPI) ReportApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.ReportApplicationMostRecentDeploymentRequest) (*pipedservice.ReportApplicationMostRecentDeploymentResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil {
return nil, err
}
err = a.applicationStore.PutApplicationMostRecentDeployment(ctx, req.ApplicationId, req.Status, req.Deployment)
if err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "application is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to update application completed deployment",
zap.String("application-id", req.ApplicationId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to update the application completed deployment")
}
}
return &pipedservice.ReportApplicationMostRecentDeploymentResponse{}, nil
}
// GetApplicationMostRecentDeployment returns the most recent deployment of the given application.
func (a *PipedAPI) GetApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.GetApplicationMostRecentDeploymentRequest) (*pipedservice.GetApplicationMostRecentDeploymentResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil {
return nil, err
}
app, err := a.applicationStore.GetApplication(ctx, req.ApplicationId)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "application is not found")
}
if err != nil {
a.logger.Error("failed to get application", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to get application")
}
if req.Status == model.DeploymentStatus_DEPLOYMENT_SUCCESS && app.MostRecentlySuccessfulDeployment != nil {
return &pipedservice.GetApplicationMostRecentDeploymentResponse{Deployment: app.MostRecentlySuccessfulDeployment}, nil
}
if req.Status == model.DeploymentStatus_DEPLOYMENT_PENDING && app.MostRecentlyTriggeredDeployment != nil {
return &pipedservice.GetApplicationMostRecentDeploymentResponse{Deployment: app.MostRecentlyTriggeredDeployment}, nil
}
return nil, status.Error(codes.NotFound, "deployment is not found")
}
func (a *PipedAPI) GetDeployment(ctx context.Context, req *pipedservice.GetDeploymentRequest) (*pipedservice.GetDeploymentResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
deployment, err := getDeployment(ctx, a.deploymentStore, req.Id, a.logger)
if err != nil {
return nil, err
}
if deployment.PipedId != pipedID {
return nil, status.Error(codes.PermissionDenied, "requested deployment doesn't belong to the piped")
}
return &pipedservice.GetDeploymentResponse{
Deployment: deployment,
}, nil
}
// ListNotCompletedDeployments returns a list of not completed deployments
// which are managed by this piped.
// DeploymentController component uses this RPC to spawns/syncs its local deployment executors.
func (a *PipedAPI) ListNotCompletedDeployments(ctx context.Context, req *pipedservice.ListNotCompletedDeploymentsRequest) (*pipedservice.ListNotCompletedDeploymentsResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "PipedId",
Operator: datastore.OperatorEqual,
Value: pipedID,
},
// TODO: Change to simple conditional clause without using OR clause for portability
// Note: firestore does not support OR operator.
// See more: https://firebase.google.com/docs/firestore/query-data/queries?hl=en
{
Field: "Status",
Operator: datastore.OperatorIn,
Value: model.GetNotCompletedDeploymentStatuses(),
},
},
}
deployments, cursor, err := a.deploymentStore.ListDeployments(ctx, opts)
if err != nil {
a.logger.Error("failed to fetch deployments", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to fetch deployments")
}
return &pipedservice.ListNotCompletedDeploymentsResponse{
Deployments: deployments,
Cursor: cursor,
}, nil
}
// CreateDeployment creates/triggers a new deployment for an application
// that is managed by this piped.
// This will be used by DeploymentTrigger component.
func (a *PipedAPI) CreateDeployment(ctx context.Context, req *pipedservice.CreateDeploymentRequest) (*pipedservice.CreateDeploymentResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToPiped(ctx, req.Deployment.ApplicationId, pipedID); err != nil {
return nil, err
}
err = a.deploymentStore.AddDeployment(ctx, req.Deployment)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "deployment already exists")
}
if err != nil {
a.logger.Error("failed to create deployment", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to create deployment")
}
// If the deployment doesn't belong to another chain, return immediately.
if req.Deployment.DeploymentChainId == "" {
return &pipedservice.CreateDeploymentResponse{}, nil
}
// Otherwise, add the created deployment ref to its chain block model.
if err = a.deploymentChainStore.UpdateDeploymentChain(ctx, req.Deployment.DeploymentChainId, datastore.DeploymentChainAddDeploymentToBlock(req.Deployment)); err != nil {
return nil, status.Error(codes.Internal, "failed to add deployment ref to its chain block")
}
return &pipedservice.CreateDeploymentResponse{}, nil
}
// ReportDeploymentPlanned used by piped to update the status
// of a specific deployment to PLANNED.
func (a *PipedAPI) ReportDeploymentPlanned(ctx context.Context, req *pipedservice.ReportDeploymentPlannedRequest) (*pipedservice.ReportDeploymentPlannedResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil {
return nil, err
}
updater := datastore.DeploymentToPlannedUpdater(req.Summary, req.StatusReason, req.RunningCommitHash, req.Version, req.Stages)
err = a.deploymentStore.UpdateDeployment(ctx, req.DeploymentId, updater)
if err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "deployment is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to update deployment to be planned",
zap.String("deployment-id", req.DeploymentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to update deployment to be planned")
}
}
if err = a.updateDeploymentRefStatusIfNecessary(ctx, req.DeploymentChainId, req.DeploymentChainBlockIndex, req.DeploymentId, model.DeploymentStatus_DEPLOYMENT_PLANNED, req.StatusReason); err != nil {
return nil, status.Error(codes.Internal, "unable to update deployment ref status of the deployment chain this deployment belongs to")
}
return &pipedservice.ReportDeploymentPlannedResponse{}, nil
}
// ReportDeploymentStatusChanged is used to update the status
// of a specific deployment to RUNNING or ROLLING_BACK.
func (a *PipedAPI) ReportDeploymentStatusChanged(ctx context.Context, req *pipedservice.ReportDeploymentStatusChangedRequest) (*pipedservice.ReportDeploymentStatusChangedResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil {
return nil, err
}
updater := datastore.DeploymentStatusUpdater(req.Status, req.StatusReason)
err = a.deploymentStore.UpdateDeployment(ctx, req.DeploymentId, updater)
if err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "deployment is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to update deployment status",
zap.String("deployment-id", req.DeploymentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to update deployment status")
}
}
if err = a.updateDeploymentRefStatusIfNecessary(ctx, req.DeploymentChainId, req.DeploymentChainBlockIndex, req.DeploymentId, req.Status, req.StatusReason); err != nil {
return nil, status.Error(codes.Internal, "unable to update deployment ref status of the deployment chain this deployment belongs to")
}
return &pipedservice.ReportDeploymentStatusChangedResponse{}, nil
}
// ReportDeploymentCompleted used by piped to update the status
// of a specific deployment to SUCCESS | FAILURE | CANCELLED.
func (a *PipedAPI) ReportDeploymentCompleted(ctx context.Context, req *pipedservice.ReportDeploymentCompletedRequest) (*pipedservice.ReportDeploymentCompletedResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil {
return nil, err
}
updater := datastore.DeploymentToCompletedUpdater(req.Status, req.StageStatuses, req.StatusReason, req.CompletedAt)
err = a.deploymentStore.UpdateDeployment(ctx, req.DeploymentId, updater)
if err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "deployment is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to update deployment to be completed",
zap.String("deployment-id", req.DeploymentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to update deployment to be completed")
}
}
if err = a.updateDeploymentRefStatusIfNecessary(ctx, req.DeploymentChainId, req.DeploymentChainBlockIndex, req.DeploymentId, req.Status, req.StatusReason); err != nil {
return nil, status.Error(codes.Internal, "unable to update deployment ref status of the deployment chain this deployment belongs to")
}
return &pipedservice.ReportDeploymentCompletedResponse{}, nil
}
func (a *PipedAPI) updateDeploymentRefStatusIfNecessary(ctx context.Context, deploymentChainID string, blockIndex uint32, deploymentID string, status model.DeploymentStatus, reason string) error {
// If the deployment does not belongs to any deployment chain, no need to update anything.
if deploymentChainID == "" {
return nil
}
dcUpdater := datastore.DeploymentChainNodeDeploymentStatusUpdater(blockIndex, deploymentID, status, reason)
if err := a.deploymentChainStore.UpdateDeploymentChain(ctx, deploymentChainID, dcUpdater); err != nil {
return err
}
return nil
}
// SaveDeploymentMetadata used by piped to persist the metadata of a specific deployment.
func (a *PipedAPI) SaveDeploymentMetadata(ctx context.Context, req *pipedservice.SaveDeploymentMetadataRequest) (*pipedservice.SaveDeploymentMetadataResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil {
return nil, err
}
err = a.deploymentStore.PutDeploymentMetadata(ctx, req.DeploymentId, req.Metadata)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.InvalidArgument, "deployment is not found")
}
if err != nil {
a.logger.Error("failed to save deployment metadata",
zap.String("deployment-id", req.DeploymentId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to save deployment metadata")
}
return &pipedservice.SaveDeploymentMetadataResponse{}, nil
}
// SaveStageMetadata used by piped to persist the metadata
// of a specific stage of a deployment.
func (a *PipedAPI) SaveStageMetadata(ctx context.Context, req *pipedservice.SaveStageMetadataRequest) (*pipedservice.SaveStageMetadataResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil {
return nil, err
}
err = a.deploymentStore.PutDeploymentStageMetadata(ctx, req.DeploymentId, req.StageId, req.Metadata)
if err != nil {
switch errors.Unwrap(err) {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "deployment is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to save deployment stage metadata",
zap.String("deployment-id", req.DeploymentId),
zap.String("stage-id", req.StageId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to save deployment stage metadata")
}
}
return &pipedservice.SaveStageMetadataResponse{}, nil
}
// ReportStageLogs is sent by piped to save the log of a pipeline stage.
func (a *PipedAPI) ReportStageLogs(ctx context.Context, req *pipedservice.ReportStageLogsRequest) (*pipedservice.ReportStageLogsResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil {
return nil, err
}
err = a.stageLogStore.AppendLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.Blocks)
if errors.Is(err, stagelogstore.ErrAlreadyCompleted) {
return nil, status.Error(codes.FailedPrecondition, "could not append the logs because the stage was already completed")
}
if err != nil {
a.logger.Error("failed to append logs", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to append logs")
}
return &pipedservice.ReportStageLogsResponse{}, nil
}
// ReportStageLogsFromLastCheckpoint is used to save the full logs from the most recently saved point.
func (a *PipedAPI) ReportStageLogsFromLastCheckpoint(ctx context.Context, req *pipedservice.ReportStageLogsFromLastCheckpointRequest) (*pipedservice.ReportStageLogsFromLastCheckpointResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil {
return nil, err
}
err = a.stageLogStore.AppendLogsFromLastCheckpoint(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.Blocks, req.Completed)
if errors.Is(err, stagelogstore.ErrAlreadyCompleted) {
return nil, status.Error(codes.FailedPrecondition, "could not append the logs because the stage was already completed")
}
if err != nil {
a.logger.Error("failed to append logs", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to append logs")
}
return &pipedservice.ReportStageLogsFromLastCheckpointResponse{}, nil
}
// ReportStageStatusChanged used by piped to update the status
// of a specific stage of a deployment.
func (a *PipedAPI) ReportStageStatusChanged(ctx context.Context, req *pipedservice.ReportStageStatusChangedRequest) (*pipedservice.ReportStageStatusChangedResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil {
return nil, err
}
updater := datastore.StageStatusChangedUpdater(req.StageId, req.Status, req.StatusReason, req.Requires, req.Visible, req.RetriedCount, req.CompletedAt)
err = a.deploymentStore.UpdateDeployment(ctx, req.DeploymentId, updater)
if err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.InvalidArgument, "deployment is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to update stage status",
zap.String("deployment-id", req.DeploymentId),
zap.String("stage-id", req.StageId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to update stage status")
}
}
return &pipedservice.ReportStageStatusChangedResponse{}, nil
}
// ListUnhandledCommands is periodically called by piped to obtain the commands
// that should be handled.
// Whenever an user makes an interaction from WebUI (cancel/approve/retry/sync)
// a new command with a unique identifier will be generated an saved into the datastore.
// Piped uses this RPC to list all still-not-handled commands to handle them,
// then report back the result to server.
// On other side, the web will periodically check the command status and feedback the result to user.
// In the future, we may need a solution to remove all old-handled commands from datastore for space.
func (a *PipedAPI) ListUnhandledCommands(ctx context.Context, req *pipedservice.ListUnhandledCommandsRequest) (*pipedservice.ListUnhandledCommandsResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
cmds, err := a.commandStore.ListUnhandledCommands(ctx, pipedID)
if err != nil {
a.logger.Error("failed to fetch unhandled commands", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to unhandled commands")
}
return &pipedservice.ListUnhandledCommandsResponse{
Commands: cmds,
}, nil
}
// ReportCommandHandled is called by piped to mark a specific command as handled.
// The request payload will contain the handle status as well as any additional result data.
// The handle result should be updated to both datastore and cache (for reading from web).
func (a *PipedAPI) ReportCommandHandled(ctx context.Context, req *pipedservice.ReportCommandHandledRequest) (*pipedservice.ReportCommandHandledResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
cmd, err := a.getCommand(ctx, req.CommandId)
if err != nil {
return nil, err
}
if pipedID != cmd.PipedId {
return nil, status.Error(codes.PermissionDenied, "The current piped does not have requested command")
}
if len(req.Output) > 0 {
if err := a.commandOutputPutter.Put(ctx, req.CommandId, req.Output); err != nil {
a.logger.Error("failed to store output of command",
zap.String("command_id", req.CommandId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "Failed to store output of command")
}
}
err = a.commandStore.UpdateCommandHandled(ctx, req.CommandId, req.Status, req.Metadata, req.HandledAt)
if err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Error(codes.NotFound, "command is not found")
case datastore.ErrInvalidArgument:
return nil, status.Error(codes.InvalidArgument, "invalid value for update")
default:
a.logger.Error("failed to update command",
zap.String("command-id", req.CommandId),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "failed to update command")
}
}
return &pipedservice.ReportCommandHandledResponse{}, nil
}
func (a *PipedAPI) getCommand(ctx context.Context, pipedID string) (*model.Command, error) {
cmd, err := a.commandStore.GetCommand(ctx, pipedID)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "command is not found")
}
if err != nil {
return nil, status.Error(codes.Internal, "failed to get command")
}
return cmd, nil
}
// ReportApplicationLiveState is periodically sent to correct full state of an application.
// For kubernetes application, this contains a full tree of its kubernetes resources.
// The tree data should be written into filestore immediately and then the state in cache should be refreshsed too.
func (a *PipedAPI) ReportApplicationLiveState(ctx context.Context, req *pipedservice.ReportApplicationLiveStateRequest) (*pipedservice.ReportApplicationLiveStateResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToPiped(ctx, req.Snapshot.ApplicationId, pipedID); err != nil {
return nil, err
}
if err := a.applicationLiveStateStore.PutStateSnapshot(ctx, req.Snapshot); err != nil {
return nil, status.Error(codes.Internal, "failed to report application live state")
}
return &pipedservice.ReportApplicationLiveStateResponse{}, nil
}
// ReportApplicationLiveStateEvents is sent by piped to submit one or multiple events
// about the changes of application state.
// Control plane uses the received events to update the state of application-resource-tree.
// We want to start by a simple solution at this initial stage of development,
// so the API server just handles as below:
// - loads the releated application-resource-tree from filestore
// - checks and builds new state for the application-resource-tree
// - updates new state into fielstore and cache (cache data is for reading while handling web requests)
// In the future, we may want to redesign the behavior of this RPC by using pubsub/queue pattern.
// After receiving the events, all of them will be publish into a queue immediately,
// and then another Handler service will pick them inorder to apply to build new state.
// By that way we can control the traffic to the datastore in a better way.
func (a *PipedAPI) ReportApplicationLiveStateEvents(ctx context.Context, req *pipedservice.ReportApplicationLiveStateEventsRequest) (*pipedservice.ReportApplicationLiveStateEventsResponse, error) {
a.applicationLiveStateStore.PatchKubernetesApplicationLiveState(ctx, req.KubernetesEvents)
// TODO: Patch Terraform application live state
// TODO: Patch Cloud Run application live state
// TODO: Patch Lambda application live state
return &pipedservice.ReportApplicationLiveStateEventsResponse{}, nil
}
// GetLatestEvent returns the latest event that meets the given conditions.
func (a *PipedAPI) GetLatestEvent(ctx context.Context, req *pipedservice.GetLatestEventRequest) (*pipedservice.GetLatestEventResponse, error) {
projectID, _, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
// Try to fetch the most recently registered event that has the given parameters.
opts := datastore.ListOptions{
Limit: 1,
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: projectID,
},
{
Field: "Name",
Operator: datastore.OperatorEqual,
Value: req.Name,
},
{
Field: "EventKey",
Operator: datastore.OperatorEqual,
Value: model.MakeEventKey(req.Name, req.Labels),
},
},
Orders: []datastore.Order{
{
Field: "CreatedAt",
Direction: datastore.Desc,
},
{
Field: "Id",
Direction: datastore.Asc,
},
},
}
events, err := a.eventStore.ListEvents(ctx, opts)
if err != nil {
a.logger.Error("failed to list events", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to list event")
}
if len(events) == 0 {
return nil, status.Error(codes.NotFound, "no events found")
}
return &pipedservice.GetLatestEventResponse{
Event: events[0],
}, nil
}
// ListEvents returns a list of Events inside the given range.
func (a *PipedAPI) ListEvents(ctx context.Context, req *pipedservice.ListEventsRequest) (*pipedservice.ListEventsResponse, error) {
projectID, _, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
// Build options based on the request.
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: projectID,
},
},
}
if req.From > 0 {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "CreatedAt",
Operator: datastore.OperatorGreaterThanOrEqual,
Value: req.From,
})
}
if req.To > 0 {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "CreatedAt",
Operator: datastore.OperatorLessThan,
Value: req.To,
})
}
switch req.Order {
case pipedservice.ListOrder_ASC:
opts.Orders = []datastore.Order{
{
Field: "CreatedAt",
Direction: datastore.Asc,
},
{
Field: "Id",
Direction: datastore.Asc,
},
}
case pipedservice.ListOrder_DESC:
opts.Orders = []datastore.Order{
{
Field: "CreatedAt",
Direction: datastore.Desc,
},
{
Field: "Id",
Direction: datastore.Asc,
},
}
}
events, err := a.eventStore.ListEvents(ctx, opts)
if err != nil {
a.logger.Error("failed to list events", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to list events")
}
return &pipedservice.ListEventsResponse{
Events: events,
}, nil
}
func (a *PipedAPI) ReportEventsHandled(ctx context.Context, req *pipedservice.ReportEventsHandledRequest) (*pipedservice.ReportEventsHandledResponse, error) {
_, _, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
for _, id := range req.EventIds {
if err := a.eventStore.MarkEventHandled(ctx, id); err != nil {
switch err {
case datastore.ErrNotFound:
return nil, status.Errorf(codes.NotFound, "event %q is not found", id)
default:
a.logger.Error("failed to mark event as handled",
zap.String("event-id", id),
zap.Error(err),
)
return nil, status.Errorf(codes.Internal, "failed to mark event %q as handled", id)
}
}
}
return &pipedservice.ReportEventsHandledResponse{}, nil
}
func (a *PipedAPI) GetLatestAnalysisResult(ctx context.Context, req *pipedservice.GetLatestAnalysisResultRequest) (*pipedservice.GetLatestAnalysisResultResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil {
return nil, err
}
result, err := a.analysisResultStore.GetLatestAnalysisResult(ctx, req.ApplicationId)
if errors.Is(err, filestore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "the most recent analysis result is not found")
}
if err != nil {
a.logger.Error("failed to get the most recent analysis result", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to get the most recent analysis result")
}
return &pipedservice.GetLatestAnalysisResultResponse{
AnalysisResult: result,
}, nil
}
func (a *PipedAPI) PutLatestAnalysisResult(ctx context.Context, req *pipedservice.PutLatestAnalysisResultRequest) (*pipedservice.PutLatestAnalysisResultResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil {
return nil, err
}
err = a.analysisResultStore.PutLatestAnalysisResult(ctx, req.ApplicationId, req.AnalysisResult)
if err != nil {
a.logger.Error("failed to put the most recent analysis result", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to put the most recent analysis result")
}
return &pipedservice.PutLatestAnalysisResultResponse{}, nil
}
func (a *PipedAPI) GetDesiredVersion(ctx context.Context, _ *pipedservice.GetDesiredVersionRequest) (*pipedservice.GetDesiredVersionResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger)
if err != nil {
return nil, err
}
return &pipedservice.GetDesiredVersionResponse{
Version: piped.DesiredVersion,
}, nil
}
func (a *PipedAPI) UpdateApplicationConfigurations(ctx context.Context, req *pipedservice.UpdateApplicationConfigurationsRequest) (*pipedservice.UpdateApplicationConfigurationsResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
// Scan all of them to guarantee in advance that there is no invalid request.
for _, appInfo := range req.Applications {
if err := a.validateAppBelongsToPiped(ctx, appInfo.Id, pipedID); err != nil {
return nil, err
}
}
for _, appInfo := range req.Applications {
updater := func(app *model.Application) error {
app.Name = appInfo.Name
app.Labels = appInfo.Labels
return nil
}
if err := a.applicationStore.UpdateApplication(ctx, appInfo.Id, updater); err != nil {
a.logger.Error("failed to update application", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to update application")
}
}
return &pipedservice.UpdateApplicationConfigurationsResponse{}, nil
}
func (a *PipedAPI) ReportUnregisteredApplicationConfigurations(ctx context.Context, req *pipedservice.ReportUnregisteredApplicationConfigurationsRequest) (*pipedservice.ReportUnregisteredApplicationConfigurationsResponse, error) {
projectID, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
// Cache an encoded slice of *model.ApplicationInfo.
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
if err := enc.Encode(req.Applications); err != nil {
a.logger.Error("failed to encode the unregistered apps", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to encode the unregistered apps")
}
key := makeUnregisteredAppsCacheKey(projectID)
c := rediscache.NewHashCache(a.redis, key)
if err := c.Put(pipedID, buf.Bytes()); err != nil {
return nil, status.Error(codes.Internal, "failed to put the unregistered apps to the cache")
}
return &pipedservice.ReportUnregisteredApplicationConfigurationsResponse{}, nil
}
// CreateDeploymentChain creates a new deployment chain object and all required commands to
// trigger deployment for applications in the chain.
func (a *PipedAPI) CreateDeploymentChain(ctx context.Context, req *pipedservice.CreateDeploymentChainRequest) (*pipedservice.CreateDeploymentChainResponse, error) {
firstDeployment := req.FirstDeployment
projectID, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateAppBelongsToPiped(ctx, firstDeployment.ApplicationId, pipedID); err != nil {
return nil, err
}
buildChainNodes := func(matcher *pipedservice.CreateDeploymentChainRequest_ApplicationMatcher) ([]*model.ChainNode, []*model.Application, error) {
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: datastore.OperatorEqual,
Value: projectID,
},
}
if matcher.Name != "" {
filters = append(filters, datastore.ListFilter{
Field: "Name",
Operator: datastore.OperatorEqual,
Value: matcher.Name,
})
}
// TODO: Support find node apps by appKind and appLabels.
apps, _, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{
Filters: filters,
})
if err != nil {
return nil, nil, err
}
nodes := make([]*model.ChainNode, 0, len(apps))
for _, app := range apps {
nodes = append(nodes, &model.ChainNode{
ApplicationRef: &model.ChainApplicationRef{
ApplicationId: app.Id,
ApplicationName: app.Name,
},
})
}
return nodes, apps, nil
}
chainBlocks := make([]*model.ChainBlock, 0, len(req.Matchers)+1)
// Add the first deployment which created by piped as the first block of the chain.
chainBlocks = append(chainBlocks, &model.ChainBlock{
Nodes: []*model.ChainNode{
{
ApplicationRef: &model.ChainApplicationRef{
ApplicationId: firstDeployment.ApplicationId,
ApplicationName: firstDeployment.ApplicationName,
},
DeploymentRef: &model.ChainDeploymentRef{
DeploymentId: firstDeployment.Id,
Status: firstDeployment.Status,
StatusReason: firstDeployment.StatusReason,
},
},
},
StartedAt: time.Now().Unix(),
})
blockAppsMap := make(map[int][]*model.Application, len(req.Matchers))
for i, filter := range req.Matchers {
nodes, blockApps, err := buildChainNodes(filter)
if err != nil {
return nil, err
}
blockAppsMap[i+1] = blockApps
chainBlocks = append(chainBlocks, &model.ChainBlock{
Nodes: nodes,
StartedAt: time.Now().Unix(),
})
}
dc := model.DeploymentChain{
Id: uuid.New().String(),
ProjectId: projectID,
Blocks: chainBlocks,
}
// Create a new deployment chain instance to control newly triggered deployment chain.
if err := a.deploymentChainStore.AddDeploymentChain(ctx, &dc); err != nil {
a.logger.Error("failed to create deployment chain", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to trigger new deployment chain")
}
firstDeployment.DeploymentChainId = dc.Id
// Trigger new deployment for the first application by store first deployment to datastore.
if err := a.deploymentStore.AddDeployment(ctx, firstDeployment); err != nil {
a.logger.Error("failed to create deployment", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to trigger new deployment for the first application in chain")
}
// Make sync application command for applications of the chain.
for blockIndex, apps := range blockAppsMap {
for _, app := range apps {
cmd := model.Command{
Id: uuid.New().String(),
PipedId: app.PipedId,
ApplicationId: app.Id,
ProjectId: app.ProjectId,
Commander: dc.Id,
Type: model.Command_CHAIN_SYNC_APPLICATION,
ChainSyncApplication: &model.Command_ChainSyncApplication{
DeploymentChainId: dc.Id,
BlockIndex: uint32(blockIndex),
ApplicationId: app.Id,
SyncStrategy: model.SyncStrategy_AUTO,
},
}
if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil {
a.logger.Error("failed to create command to trigger application in chain", zap.Error(err))
return nil, status.Error(codes.Internal, "failed to command to trigger for applications in chain")
}
}
}
return &pipedservice.CreateDeploymentChainResponse{}, nil
}
// InChainDeploymentPlannable hecks the completion and status of the previous block in the deployment chain.
// An in chain deployment is treated as plannable in case:
// - It's the first deployment of its deployment chain.
// - All deployments of its previous block in chain are at DEPLOYMENT_SUCCESS state.
func (a *PipedAPI) InChainDeploymentPlannable(ctx context.Context, req *pipedservice.InChainDeploymentPlannableRequest) (*pipedservice.InChainDeploymentPlannableResponse, error) {
_, pipedID, _, err := rpcauth.ExtractPipedToken(ctx)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil {
return nil, err
}
dc, err := a.deploymentChainStore.GetDeploymentChain(ctx, req.DeploymentChainId)
if err != nil {
return nil, status.Error(codes.InvalidArgument, "unable to find the deployment chain which this deployment belongs to")
}
// Deployment of blocks[0] in the chain means it's the first deployment of the chain;
// hence it should be processed without any lock.
if req.DeploymentChainBlockIndex == 0 {
return &pipedservice.InChainDeploymentPlannableResponse{
Plannable: true,
}, nil
}
if req.DeploymentChainBlockIndex >= uint32(len(dc.Blocks)) {
return nil, status.Error(codes.InvalidArgument, "invalid deployment with chain block index provided")
}
prevBlockSuccessfullyCompleted, err := dc.IsSuccessfullyCompletedBlock(req.DeploymentChainBlockIndex - 1)
if err != nil {
return nil, status.Error(codes.InvalidArgument, "unable to process the previous block of this deployment in chain")
}
return &pipedservice.InChainDeploymentPlannableResponse{
Plannable: prevBlockSuccessfullyCompleted,
}, nil
}
// validateAppBelongsToPiped checks if the given application belongs to the given piped.
// It gives back an error unless the application belongs to the piped.
func (a *PipedAPI) validateAppBelongsToPiped(ctx context.Context, appID, pipedID string) error {
pid, err := a.appPipedCache.Get(appID)
if err == nil {
if pid != pipedID {
return status.Error(codes.PermissionDenied, "requested application doesn't belong to the piped")
}
return nil
}
app, err := a.applicationStore.GetApplication(ctx, appID)
if errors.Is(err, datastore.ErrNotFound) {
return status.Error(codes.NotFound, "the application is not found")
}
if err != nil {
a.logger.Error("failed to get application", zap.Error(err))
return status.Error(codes.Internal, "failed to get application")
}
a.appPipedCache.Put(appID, app.PipedId)
if app.PipedId != pipedID {
return status.Error(codes.PermissionDenied, "requested application doesn't belong to the piped")
}
return nil
}
// validateDeploymentBelongsToPiped checks if the given deployment belongs to the given piped.
// It gives back an error unless the deployment belongs to the piped.
func (a *PipedAPI) validateDeploymentBelongsToPiped(ctx context.Context, deploymentID, pipedID string) error {
pid, err := a.deploymentPipedCache.Get(deploymentID)
if err == nil {
if pid != pipedID {
return status.Error(codes.PermissionDenied, "requested deployment doesn't belong to the piped")
}
return nil
}
deployment, err := a.deploymentStore.GetDeployment(ctx, deploymentID)
if errors.Is(err, datastore.ErrNotFound) {
return status.Error(codes.NotFound, "the deployment is not found")
}
if err != nil {
a.logger.Error("failed to get deployment", zap.Error(err))
return status.Error(codes.Internal, "failed to get deployment")
}
a.deploymentPipedCache.Put(deploymentID, deployment.PipedId)
if deployment.PipedId != pipedID {
return status.Error(codes.PermissionDenied, "requested deployment doesn't belong to the piped")
}
return nil
}
// validateEnvBelongsToProject checks if the given environment belongs to the given project.
// It gives back an error unless the environment belongs to the project.
func (a *PipedAPI) validateEnvBelongsToProject(ctx context.Context, envID, projectID string) error {
pid, err := a.envProjectCache.Get(envID)
if err == nil {
if pid != projectID {
return status.Error(codes.PermissionDenied, "requested environment doesn't belong to the project")
}
return nil
}
env, err := a.environmentStore.GetEnvironment(ctx, envID)
if errors.Is(err, datastore.ErrNotFound) {
return status.Error(codes.NotFound, "the environment is not found")
}
if err != nil {
a.logger.Error("failed to get environment", zap.Error(err))
return status.Error(codes.Internal, "failed to get environment")
}
a.envProjectCache.Put(envID, env.ProjectId)
if env.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "requested environment doesn't belong to the project")
}
return nil
}
| 1 | 22,992 | Isn't this PENDING? | pipe-cd-pipe | go |
@@ -30,16 +30,14 @@ function isLandmark(virtualNode) {
var explictRole = (node.getAttribute('role') || '').trim().toLowerCase();
if (explictRole) {
- if (explictRole === 'form') {
- return !!aria.labelVirtual(virtualNode);
- }
return landmarkRoles.includes(explictRole);
} else {
// Check if the node matches any of the CSS selectors of implicit landmarks
return implicitLandmarks.some(implicitSelector => {
let matches = axe.utils.matchesSelector(node, implicitSelector);
if (node.tagName.toLowerCase() === 'form') {
- return matches && !!aria.labelVirtual(virtualNode);
+ let title = node.getAttribute('title') || '';
+ return matches && (!!aria.labelVirtual(virtualNode) || !!axe.commons.text.sanitize(title) );
}
return matches;
}); | 1 | const { dom, aria } = axe.commons;
// Return the skplink, if any
function getSkiplink(virtualNode) {
const firstLink = axe.utils.querySelectorAll(virtualNode, 'a[href]')[0];
if (
firstLink &&
axe.commons.dom.getElementByReference(firstLink.actualNode, 'href')
) {
return firstLink.actualNode;
}
}
const skipLink = getSkiplink(virtualNode);
const landmarkRoles = aria.getRolesByType('landmark');
// Create a list of nodeNames that have a landmark as an implicit role
const implicitLandmarks = landmarkRoles
.reduce((arr, role) => arr.concat(aria.implicitNodes(role)), [])
.filter(r => r !== null);
// Check if the current element is the skiplink
function isSkipLink(vNode) {
return skipLink && skipLink === vNode.actualNode;
}
// Check if the current element is a landmark
function isLandmark(virtualNode) {
var node = virtualNode.actualNode;
var explictRole = (node.getAttribute('role') || '').trim().toLowerCase();
if (explictRole) {
if (explictRole === 'form') {
return !!aria.labelVirtual(virtualNode);
}
return landmarkRoles.includes(explictRole);
} else {
// Check if the node matches any of the CSS selectors of implicit landmarks
return implicitLandmarks.some(implicitSelector => {
let matches = axe.utils.matchesSelector(node, implicitSelector);
if (node.tagName.toLowerCase() === 'form') {
return matches && !!aria.labelVirtual(virtualNode);
}
return matches;
});
}
}
/**
* Find all visible elements not wrapped inside a landmark or skiplink
*/
function findRegionlessElms(virtualNode) {
const node = virtualNode.actualNode;
// End recursion if the element is a landmark, skiplink, or hidden content
if (
isLandmark(virtualNode) ||
isSkipLink(virtualNode) ||
!dom.isVisible(node, true)
) {
return [];
// Return the node is a content element
} else if (dom.hasContent(node, /* noRecursion: */ true)) {
return [node];
// Recursively look at all child elements
} else {
return virtualNode.children
.filter(({ actualNode }) => actualNode.nodeType === 1)
.map(findRegionlessElms)
.reduce((a, b) => a.concat(b), []); // flatten the results
}
}
var regionlessNodes = findRegionlessElms(virtualNode);
this.relatedNodes(regionlessNodes);
return regionlessNodes.length === 0;
| 1 | 12,960 | minor detail, why aim to sanitize title if tile is empty (in some cases)? worth adding an && to check for that. | dequelabs-axe-core | js |
@@ -28,9 +28,8 @@ package com.salesforce.androidsdk.smartsync.target;
import android.text.TextUtils;
-import com.salesforce.androidsdk.smartstore.store.QuerySpec;
-import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartsync.manager.SyncManager;
+import com.salesforce.androidsdk.smartsync.target.ParentChildrenSyncTargetHelper.RelationshipType;
import com.salesforce.androidsdk.smartsync.util.ChildrenInfo;
import com.salesforce.androidsdk.smartsync.util.Constants;
import com.salesforce.androidsdk.smartsync.util.ParentInfo; | 1 | /*
* Copyright (c) 2017-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartsync.target;
import android.text.TextUtils;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartsync.manager.SyncManager;
import com.salesforce.androidsdk.smartsync.util.ChildrenInfo;
import com.salesforce.androidsdk.smartsync.util.Constants;
import com.salesforce.androidsdk.smartsync.util.ParentInfo;
import com.salesforce.androidsdk.smartsync.util.SOQLBuilder;
import com.salesforce.androidsdk.util.JSONObjectHelper;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Target for sync that downloads parent with children records
*/
public class ParentChildrenSyncDownTarget extends SoqlSyncDownTarget {
private static final String TAG = "ParentChildrenSyncDownTarget";
public static final String PARENT = "parent";
public static final String CHILDREN = "children";
public static final String RELATIONSHIP_TYPE = "relationshipType";
public static final String PARENT_FIELDLIST = "parentFieldlist";
public static final String PARENT_SOQL_FILTER = "parentSoqlFilter";
public static final String CHILDREN_FIELDLIST = "childrenFieldlist";
private ParentInfo parentInfo;
private List<String> parentFieldlist;
private String parentSoqlFilter;
private ChildrenInfo childrenInfo;
private List<String> childrenFieldlist;
private RelationshipType relationshipType;
/**
* Enum for relationship types
*/
public enum RelationshipType {
MASTER_DETAIL,
LOOKUP;
}
/**
* Construct ParentChildrenSyncDownTarget from json
* @param target
* @throws JSONException
*/
public ParentChildrenSyncDownTarget(JSONObject target) throws JSONException {
this(
new ParentInfo(target.getJSONObject(PARENT)),
JSONObjectHelper.<String>toList(target.optJSONArray(PARENT_FIELDLIST)),
target.getString(PARENT_SOQL_FILTER),
new ChildrenInfo(target.getJSONObject(CHILDREN)),
JSONObjectHelper.<String>toList(target.optJSONArray(CHILDREN_FIELDLIST)),
RelationshipType.valueOf(target.getString(RELATIONSHIP_TYPE))
);
}
/**
* Construct ParentChildrenSyncDownTarget from parentType, childrenType etc
*/
public ParentChildrenSyncDownTarget(ParentInfo parentInfo, List<String> parentFieldlist, String parentSoqlFilter, ChildrenInfo childrenInfo, List<String> childrenFieldlist, RelationshipType relationshipType) {
super(parentInfo.idFieldName, parentInfo.modificationDateFieldName, null);
this.queryType = QueryType.parent_children;
this.parentInfo = parentInfo;
this.parentFieldlist = parentFieldlist;
this.parentSoqlFilter = parentSoqlFilter;
this.childrenInfo = childrenInfo;
this.childrenFieldlist = childrenFieldlist;
this.relationshipType = relationshipType;
}
/**
* Construct ParentChildrenSyncDownTarget from soql query - not allowed
*/
public ParentChildrenSyncDownTarget(String query) {
super(query);
throw new UnsupportedOperationException("Cannot construct ParentChildrenSyncDownTarget from SOQL query");
}
/**
* @return json representation of target
* @throws JSONException
*/
public JSONObject asJSON() throws JSONException {
JSONObject target = super.asJSON();
target.put(PARENT, parentInfo.asJSON());
target.put(PARENT_FIELDLIST, new JSONArray(parentFieldlist));
target.put(PARENT_SOQL_FILTER, parentSoqlFilter);
target.put(CHILDREN, childrenInfo.asJSON());
target.put(CHILDREN_FIELDLIST, new JSONArray(childrenFieldlist));
target.put(RELATIONSHIP_TYPE, relationshipType.name());
return target;
}
@Override
protected String getSoqlForRemoteIds() {
// This is for clean re-sync ghosts
//
// This is the soql to identify parents
List<String> fields = new ArrayList<>();
fields.add(getIdFieldName());
SOQLBuilder builder = SOQLBuilder.getInstanceWithFields(fields);
builder.from(parentInfo.sobjectType);
builder.where(parentSoqlFilter);
return builder.build();
}
protected String getSoqlForRemoteChildrenIds() {
// This is for clean re-sync ghosts
//
// This is the soql to identify children
// We are doing
// select Id, (select Id from children) from Parents where soqlParentFilter
// It could be better to do
// select Id from child where qualified-soqlParentFilter (e.g. if filter is Name = 'A' then we would use Parent.Name = 'A')
// But "qualifying" parentSoqlFilter without parsing it could prove tricky
// Nested query
List<String> nestedFields = new ArrayList<>();
nestedFields.add(childrenInfo.idFieldName);
SOQLBuilder builderNested = SOQLBuilder.getInstanceWithFields(nestedFields);
builderNested.from(childrenInfo.sobjectTypePlural);
// Parent query
List<String> fields = new ArrayList<>();
fields.add(getIdFieldName());
fields.add("(" + builderNested.build() + ")");
SOQLBuilder builder = SOQLBuilder.getInstanceWithFields(fields);
builder.from(parentInfo.sobjectType);
builder.where(parentSoqlFilter);
return builder.build();
}
@Override
public int cleanGhosts(SyncManager syncManager, String soupName) throws JSONException, IOException {
// Taking care of ghost parents
int localIdsSize = super.cleanGhosts(syncManager, soupName);
// Taking care of ghost children
// NB: ParentChildrenSyncDownTarget's getNonDirtyRecordIdsSql does a join between parent and children soups
// We only want to look at the children soup, so using SoqlSyncDownTarget's getNonDirtyRecordIdsSql
final Set<String> localChildrenIds = getIdsWithQuery(syncManager, super.getNonDirtyRecordIdsSql(childrenInfo.soupName, childrenInfo.idFieldName));
final Set<String> remoteChildrenIds = getChildrenRemoteIdsWithSoql(syncManager, getSoqlForRemoteChildrenIds());
if (remoteChildrenIds != null) {
localChildrenIds.removeAll(remoteChildrenIds);
}
if (localChildrenIds.size() > 0) {
deleteRecordsFromLocalStore(syncManager, childrenInfo.soupName, localChildrenIds, childrenInfo.idFieldName);
}
return localIdsSize;
}
protected Set<String> getChildrenRemoteIdsWithSoql(SyncManager syncManager, String soqlForChildrenRemoteIds) throws IOException, JSONException {
final Set<String> remoteChildrenIds = new HashSet<String>();
// Makes network request and parses the response.
JSONArray records = startFetch(syncManager, soqlForChildrenRemoteIds);
remoteChildrenIds.addAll(parseChildrenIdsFromResponse(records));
while (records != null) {
// Fetch next records, if any.
records = continueFetch(syncManager);
remoteChildrenIds.addAll(parseIdsFromResponse(records));
}
return remoteChildrenIds;
}
protected Set<String> parseChildrenIdsFromResponse(JSONArray records) {
final Set<String> remoteChildrenIds = new HashSet<String>();
if (records != null) {
for (int i = 0; i < records.length(); i++) {
final JSONObject record = records.optJSONObject(i);
if (record != null) {
JSONArray childrenRecords = record.optJSONArray(childrenInfo.sobjectTypePlural);
remoteChildrenIds.addAll(parseIdsFromResponse(childrenRecords));
}
}
}
return remoteChildrenIds;
}
@Override
public String getQuery(long maxTimeStamp) {
StringBuilder childrenWhere = new StringBuilder();
StringBuilder parentWhere = new StringBuilder();
if (maxTimeStamp > 0) {
// This is for re-sync
//
// Ideally we should target parent-children 'groups' where the parent changed or a child changed
//
// But that is not possible with SOQL:
// select fields, (select childrenFields from children where lastModifiedDate > xxx)
// from parent
// where lastModifiedDate > xxx
// or Id in (select parent-id from children where lastModifiedDate > xxx)
// Gives the following error: semi join sub-selects are not allowed with the 'OR' operator
//
// Also if we do:
// select fields, (select childrenFields from children where lastModifiedDate > xxx)
// from parent
// where Id in (select parent-id from children where lastModifiedDate > xxx or parent.lastModifiedDate > xxx)
// Then we miss parents without children
//
// So we target parent-children 'goups' where the parent changed
// And we only download the changed children
childrenWhere.append(buildModificationDateFilter(childrenInfo.modificationDateFieldName, maxTimeStamp));
parentWhere.append(buildModificationDateFilter(getModificationDateFieldName(), maxTimeStamp))
.append(TextUtils.isEmpty(parentSoqlFilter) ? "" : " and ");
}
parentWhere.append(parentSoqlFilter);
// Nested query
List<String> nestedFields = new ArrayList<>(childrenFieldlist);
if (!nestedFields.contains(childrenInfo.idFieldName)) nestedFields.add(childrenInfo.idFieldName);
if (!nestedFields.contains(childrenInfo.modificationDateFieldName)) nestedFields.add(childrenInfo.modificationDateFieldName);
SOQLBuilder builderNested = SOQLBuilder.getInstanceWithFields(nestedFields);
builderNested.from(childrenInfo.sobjectTypePlural);
builderNested.where(childrenWhere.toString());
// Parent query
List<String> fields = new ArrayList<>(parentFieldlist);
if (!fields.contains(getIdFieldName())) fields.add(getIdFieldName());
if (!fields.contains(getModificationDateFieldName())) fields.add(getModificationDateFieldName());
fields.add("(" + builderNested.build() + ")");
SOQLBuilder builder = SOQLBuilder.getInstanceWithFields(fields);
builder.from(parentInfo.sobjectType);
builder.where(parentWhere.toString());
return builder.build();
}
private StringBuilder buildModificationDateFilter(String modificationDateFieldName, long maxTimeStamp) {
StringBuilder filter = new StringBuilder();
filter.append(modificationDateFieldName)
.append(" > ")
.append(Constants.TIMESTAMP_FORMAT.format(new Date(maxTimeStamp)));
return filter;
}
@Override
protected JSONArray getRecordsFromResponseJson(JSONObject responseJson) throws JSONException {
JSONArray records = responseJson.getJSONArray(Constants.RECORDS);
for (int i=0; i<records.length(); i++) {
JSONObject record = records.getJSONObject(i);
JSONArray childrenRecords = (record.has(childrenInfo.sobjectTypePlural) && !record.isNull(childrenInfo.sobjectTypePlural)
? record.getJSONObject(childrenInfo.sobjectTypePlural).getJSONArray(Constants.RECORDS)
: new JSONArray());
// Cleaning up record
record.put(childrenInfo.sobjectTypePlural, childrenRecords);
// XXX what if not all children were fetched
}
return records;
}
@Override
public long getLatestModificationTimeStamp(JSONArray records) throws JSONException {
// NB: method is called during sync down so for this target records contain parent and children
// Compute max time stamp of parents
long maxTimeStamp = super.getLatestModificationTimeStamp(records);
// Compute max time stamp of parents and children
for (int i=0; i<records.length(); i++) {
JSONObject record = records.getJSONObject(i);
JSONArray children = record.getJSONArray(childrenInfo.sobjectTypePlural);
maxTimeStamp = Math.max(maxTimeStamp, getLatestModificationTimeStamp(children, childrenInfo.modificationDateFieldName));
}
return maxTimeStamp;
}
@Override
protected String getDirtyRecordIdsSql(String soupName, String idField) {
return String.format(
"SELECT DISTINCT {%s:%s} FROM {%s},{%s} WHERE {%s:%s} = {%s:%s} AND ({%s:%s} = 'true' OR {%s:%s} = 'true')",
soupName, idField,
childrenInfo.soupName, soupName,
childrenInfo.soupName, childrenInfo.parentLocalIdFieldName,
soupName, SmartStore.SOUP_ENTRY_ID,
soupName, LOCAL,
childrenInfo.soupName, LOCAL);
}
@Override
protected String getNonDirtyRecordIdsSql(String soupName, String idField) {
return String.format(
"SELECT {%s:%s} FROM {%s} WHERE {%s:%s} NOT IN (%s)",
soupName, idField,
soupName,
soupName, SmartStore.SOUP_ENTRY_ID,
getDirtyRecordIdsSql(soupName, SmartStore.SOUP_ENTRY_ID)
);
}
@Override
public void saveRecordsToLocalStore(SyncManager syncManager, String soupName, JSONArray records) throws JSONException {
// NB: method is called during sync down so for this target records contain parent and children
SmartStore smartStore = syncManager.getSmartStore();
synchronized(smartStore.getDatabase()) {
try {
smartStore.beginTransaction();
for (int i=0; i<records.length(); i++) {
JSONObject record = records.getJSONObject(i);
JSONObject parent = new JSONObject(record.toString());
// Separating parent from children
JSONArray children = (JSONArray) parent.remove(childrenInfo.sobjectTypePlural);
// Saving parent
cleanAndSaveInLocalStore(syncManager, soupName, parent, false);
// Put server id / local id of parent in children
for (int j = 0; j < children.length(); j++) {
JSONObject child = children.getJSONObject(j);
child.put(childrenInfo.parentLocalIdFieldName, parent.get(SmartStore.SOUP_ENTRY_ID));
child.put(childrenInfo.parentIdFieldName, parent.get(getIdFieldName()));
// Saving child
cleanAndSaveInLocalStore(syncManager, childrenInfo.soupName, child, false);
}
}
smartStore.setTransactionSuccessful();
}
finally {
smartStore.endTransaction();
}
}
}
@Override
public void deleteFromLocalStore(SyncManager syncManager, String soupName, JSONObject record) throws JSONException {
if (relationshipType == RelationshipType.MASTER_DETAIL) {
deleteChildrenFromLocalStore(syncManager, soupName, new String[]{record.getString(SmartStore.SOUP_ENTRY_ID)}, SmartStore.SOUP_ENTRY_ID);
}
super.deleteFromLocalStore(syncManager, soupName, record);
}
@Override
public void deleteRecordsFromLocalStore(SyncManager syncManager, String soupName, Set<String> ids, String idField) {
if (relationshipType == RelationshipType.MASTER_DETAIL) {
deleteChildrenFromLocalStore(syncManager, soupName, ids.toArray(new String[0]), idField);
}
super.deleteRecordsFromLocalStore(syncManager, soupName, ids, idField);
}
protected void deleteChildrenFromLocalStore(SyncManager syncManager, String soupName, String[] ids, String idField) {
String smartSql = String.format(
"SELECT {%s:%s} FROM {%s},{%s} WHERE {%s:%s} = {%s:%s} AND {%s:%s} IN (%s)",
childrenInfo.soupName, SmartStore.SOUP_ENTRY_ID,
childrenInfo.soupName, soupName,
childrenInfo.soupName, childrenInfo.parentLocalIdFieldName,
soupName, SmartStore.SOUP_ENTRY_ID,
soupName, idField,
"'" + TextUtils.join("', '", ids) + "'");
syncManager.getSmartStore().deleteByQuery(childrenInfo.soupName, QuerySpec.buildSmartQuerySpec(smartSql, Integer.MAX_VALUE));
}
}
| 1 | 15,915 | Code shared by ParentChildrenSyncDownTarget and ParentChildrenSyncUpTarget moved to ParentChildrenSyncTargetHelper | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -75,12 +75,12 @@ class Local(Provisioning):
def startup(self):
self.start_time = time.time()
- self.available_slots = self.settings.get("capacity", None)
- if not self.available_slots:
- if self.settings.get("sequential", False):
- self.available_slots = 1
- else:
- self.available_slots = sys.maxsize # no limit
+ if self.settings.get("sequential", False):
+ self.available_slots = 1
+ else:
+ self.available_slots = self.settings.get("capacity", None)
+ if not self.available_slots:
+ self.available_slots = sys.maxsize # no limit
for executor in self.executors:
start_at = executor.execution.get('start-at', 0) | 1 | """
Implementations for `Provisioning` classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import sys
import time
import traceback
from bzt import ToolError
from bzt.engine import Provisioning, SelfDiagnosable
from bzt.six import reraise
from bzt.utils import dehumanize_time
class Local(Provisioning):
"""
Local provisioning means we start all the tools locally
"""
def __init__(self):
super(Local, self).__init__()
self.extend_configs = True
self.start_time = None
self.available_slots = None
self.finished_modules = []
self.started_modules = []
def _get_start_shift(self, shift):
if not shift:
return 0
time_formats = ['%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M',
'%H:%M:%S',
'%H:%M']
for time_format in time_formats:
try:
date = datetime.datetime.strptime(shift, time_format)
except ValueError:
continue
except TypeError:
self.log.warning('Start time must be string type ("%s"), ignored "%s"', time_format[0], shift)
break
today = datetime.date.today()
if today > date.date():
date = datetime.datetime(today.year, today.month, today.day, date.hour, date.minute, date.second)
return time.mktime(date.timetuple()) - self.start_time
else:
self.log.warning('Unrecognized time format: %s ("%s" required), ignored', shift, time_formats[0])
return 0
def prepare(self):
super(Local, self).prepare()
for executor in self.executors:
self.log.debug("Preparing executor: %s", executor)
executor.prepare()
def startup(self):
self.start_time = time.time()
self.available_slots = self.settings.get("capacity", None)
if not self.available_slots:
if self.settings.get("sequential", False):
self.available_slots = 1
else:
self.available_slots = sys.maxsize # no limit
for executor in self.executors:
start_at = executor.execution.get('start-at', 0)
start_shift = self._get_start_shift(start_at)
delay = dehumanize_time(executor.execution.get('delay', 0))
executor.delay = delay + start_shift
msg = "Delay setup for %s: %s(start-at) + %s(delay) = %s"
self.log.debug(msg, executor, start_shift, delay, executor.delay)
def _start_modules(self):
if self.available_slots:
non_started_executors = [e for e in self.executors if e not in self.started_modules]
for executor in non_started_executors:
self.engine.logging_level_up()
if time.time() >= self.start_time + executor.delay:
executor.startup()
self.started_modules.append(executor)
self.available_slots -= 1
msg = "Starting execution: %s, rest of available slots: %s"
self.log.debug(msg, executor, self.available_slots)
if not self.available_slots:
break
self.engine.logging_level_down()
def check(self):
"""
Check executors for finish. Return True if all of them has finished.
"""
finished = True
self._start_modules()
for executor in self.executors:
if executor in self.finished_modules:
continue
if executor not in self.started_modules:
finished = False
continue
if executor.check():
self.finished_modules.append(executor)
self.available_slots += 1
self.log.debug("%s finished", executor)
else:
finished = False
return finished
def shutdown(self):
"""
Call shutdown on executors
"""
exc_info = exc_value = None
for executor in self.started_modules:
self.log.debug("Shutdown %s", executor)
try:
executor.shutdown()
except BaseException as exc:
msg = "Exception in shutdown of %s: %s %s"
self.log.debug(msg, executor.__class__.__name__, exc, traceback.format_exc())
if not exc_info:
exc_info = sys.exc_info()
if not exc_value:
exc_value = exc
if exc_info:
reraise(exc_info, exc_value)
def post_process(self):
"""
Post-process executors
"""
exc_info = exc_value = None
for executor in self.executors:
self.log.debug("Post-process %s", executor)
try:
executor.post_process()
if executor in self.started_modules and not executor.has_results():
msg = "Empty results, most likely %s (%s) failed. " \
"Actual reason for this can be found in logs under %s"
message = msg % (executor.label, executor.__class__.__name__, self.engine.artifacts_dir)
diagnostics = None
if isinstance(executor, SelfDiagnosable):
diagnostics = executor.get_error_diagnostics()
raise ToolError(message, diagnostics)
except BaseException as exc:
msg = "Exception in post_process of %s: %s %s"
self.log.debug(msg, executor.__class__.__name__, exc, traceback.format_exc())
if not exc_info:
exc_info = sys.exc_info()
if not exc_value:
exc_value = exc
if exc_info:
reraise(exc_info, exc_value)
| 1 | 15,503 | The bug is not about the priority , But we use 2 config items CAPACITY and SEQUENTIAL to control 1 action: thoughput. If 'sequential' is given to 'False' as default in configfile and then I use 'capacity' to 10 in my test.yml , the 'capacity' 's priority should be higher than default 'sequential'. So, I suggest we use only one 'capacity', it is enough. '1' is sequential, more than 1 is capacity, None is sys.maxsize. | Blazemeter-taurus | py |
@@ -9,10 +9,13 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags"
mockplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/mock"
)
func TestStartLogCapture(t *testing.T) {
+ tf.IntegrationTest(t)
+
assert := assert.New(t)
require := require.New(t)
ctx := context.Background() | 1 | package fast
import (
"context"
"io/ioutil"
"testing"
iptb "github.com/ipfs/iptb/testbed"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
mockplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/mock"
)
func TestStartLogCapture(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
ctx := context.Background()
dir := "mockdir"
ns := iptb.NodeSpec{
Type: mockplugin.PluginName,
Dir: dir,
Attrs: nil,
}
c, err := ns.Load()
assert.NoError(err)
fc, ok := c.(IPTBCoreExt)
require.True(ok)
mfc := NewFilecoinProcess(ctx, fc, EnvironmentOpts{})
err = mfc.setupStderrCapturing()
require.NoError(err)
t.Run("test capture logs", func(t *testing.T) {
capture, err := mfc.StartLogCapture()
require.NoError(err)
_, err = mfc.RunCmdWithStdin(ctx, nil, "add-to-daemonstderr", "hello")
require.NoError(err)
err = mfc.lp.Pull()
require.NoError(err)
capture.Stop()
bb, err := ioutil.ReadAll(capture)
require.NoError(err)
require.Equal("hello\n", string(bb))
})
err = mfc.teardownStderrCapturing()
require.NoError(err)
}
| 1 | 18,525 | I don't know if I'd call any of these FAST tests integration tests. They are unit tests for FAST. They use a mock plugin which doesn't actually start any external processes, etc. | filecoin-project-venus | go |
@@ -27,10 +27,12 @@ const (
invalidVersion version = "invalid.version"
)
+// CurrentVersion returns the version of the calling instance
func CurrentVersion() version {
return version(mver.GetVersion())
}
+// Version returns the version in version type if present
func Version(version string) version {
switch version {
case "0.7.0": | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
mver "github.com/openebs/maya/pkg/version"
)
type version string
const (
version070 version = "0.7.0"
invalidVersion version = "invalid.version"
)
func CurrentVersion() version {
return version(mver.GetVersion())
}
func Version(version string) version {
switch version {
case "0.7.0":
return version070
default:
return invalidVersion
}
return invalidVersion
}
| 1 | 9,661 | Can you provide corresponding UT for this. | openebs-maya | go |
@@ -29,6 +29,8 @@ import (
"testing"
"time"
+ "golang.org/x/net/context"
+
"github.com/yarpc/yarpc-go/encoding/raw"
"github.com/yarpc/yarpc-go/transport"
"github.com/yarpc/yarpc-go/transport/transporttest" | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/yarpc/yarpc-go/encoding/raw"
"github.com/yarpc/yarpc-go/transport"
"github.com/yarpc/yarpc-go/transport/transporttest"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestHandlerSucces(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
headers := make(http.Header)
headers.Set(CallerHeader, "moe")
headers.Set(EncodingHeader, "raw")
headers.Set(TTLMSHeader, "1000")
headers.Set(ProcedureHeader, "nyuck")
headers.Set(ServiceHeader, "curly")
headers.Set(BaggageHeaderPrefix+"Foo", "bar")
headers.Set(BaggageHeaderPrefix+"BAR", "baz")
rpcHandler := transporttest.NewMockHandler(mockCtrl)
httpHandler := handler{Handler: rpcHandler}
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t,
transporttest.ContextTTL(time.Second),
transporttest.ContextBaggage{
"foo": "bar",
"bar": "baz",
},
),
transport.Options{},
transporttest.NewRequestMatcher(
t, &transport.Request{
Caller: "moe",
Service: "curly",
Encoding: raw.Encoding,
Procedure: "nyuck",
Body: bytes.NewReader([]byte("Nyuck Nyuck")),
},
),
gomock.Any(),
).Return(nil)
req := &http.Request{
Method: "POST",
Header: headers,
Body: ioutil.NopCloser(bytes.NewReader([]byte("Nyuck Nyuck"))),
}
rw := httptest.NewRecorder()
httpHandler.ServeHTTP(rw, req)
code := rw.Code
assert.Equal(t, code, 200, "expected 200 code")
assert.Equal(t, rw.Body.String(), "")
}
func TestHandlerHeaders(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
tests := []struct {
giveHeaders http.Header
wantTTL time.Duration
wantHeaders map[string]string
wantBaggage map[string]string
}{
{
giveHeaders: http.Header{
TTLMSHeader: {"1000"},
"Rpc-Header-Foo": {"bar"},
"Context-Foo": {"Baz"},
},
wantTTL: time.Second,
wantHeaders: map[string]string{
"foo": "bar",
},
wantBaggage: map[string]string{
"foo": "Baz",
},
},
{
giveHeaders: http.Header{
TTLMSHeader: {"100"},
"Rpc-Foo": {"ignored"},
"ContextFoo": {"ignored"},
"Context-Rpc-Service": {"hello"},
},
wantTTL: 100 * time.Millisecond,
wantHeaders: map[string]string{},
wantBaggage: map[string]string{"rpc-service": "hello"},
},
}
for _, tt := range tests {
rpcHandler := transporttest.NewMockHandler(mockCtrl)
httpHandler := handler{Handler: rpcHandler}
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t,
transporttest.ContextTTL(tt.wantTTL),
transporttest.ContextBaggage(tt.wantBaggage),
),
transport.Options{},
transporttest.NewRequestMatcher(t,
&transport.Request{
Caller: "caller",
Service: "service",
Encoding: raw.Encoding,
Procedure: "hello",
Headers: transport.HeadersFromMap(tt.wantHeaders),
Body: bytes.NewReader([]byte("world")),
}),
gomock.Any(),
).Return(nil)
headers := http.Header{}
for k, vs := range tt.giveHeaders {
for _, v := range vs {
headers.Add(k, v)
}
}
headers.Set(CallerHeader, "caller")
headers.Set(ServiceHeader, "service")
headers.Set(EncodingHeader, "raw")
headers.Set(ProcedureHeader, "hello")
req := &http.Request{
Method: "POST",
Header: headers,
Body: ioutil.NopCloser(bytes.NewReader([]byte("world"))),
}
rw := httptest.NewRecorder()
httpHandler.ServeHTTP(rw, req)
assert.Equal(t, 200, rw.Code, "expected 200 status code")
}
}
func TestHandlerFailures(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
baseHeaders := make(http.Header)
baseHeaders.Set(CallerHeader, "somecaller")
baseHeaders.Set(EncodingHeader, "raw")
baseHeaders.Set(TTLMSHeader, "1000")
baseHeaders.Set(ProcedureHeader, "hello")
baseHeaders.Set(ServiceHeader, "fake")
headersWithBadTTL := headerCopyWithout(baseHeaders, TTLMSHeader)
headersWithBadTTL.Set(TTLMSHeader, "not a number")
tests := []struct {
req *http.Request
msg string
}{
{&http.Request{Method: "GET"}, "404 page not found\n"},
{
&http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, CallerHeader),
},
"BadRequest: missing caller name\n",
},
{
&http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, ServiceHeader),
},
"BadRequest: missing service name\n",
},
{
&http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, ProcedureHeader),
},
"BadRequest: missing procedure\n",
},
{
&http.Request{
Method: "POST",
Header: headerCopyWithout(baseHeaders, TTLMSHeader),
},
"BadRequest: missing TTL\n",
},
{
&http.Request{
Method: "POST",
},
"BadRequest: missing service name, procedure, caller name, TTL, and encoding\n",
},
{
&http.Request{
Method: "POST",
Header: headersWithBadTTL,
},
`BadRequest: invalid TTL "not a number" for procedure "hello" of service "fake": must be positive integer` + "\n",
},
}
for _, tt := range tests {
req := tt.req
if req.Body == nil {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
}
h := handler{Handler: transporttest.NewMockHandler(mockCtrl)}
rw := httptest.NewRecorder()
h.ServeHTTP(rw, tt.req)
code := rw.Code
assert.True(t, code >= 400 && code < 500, "expected 400 level code")
assert.Equal(t, rw.Body.String(), tt.msg)
}
}
func TestHandlerInternalFailure(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
headers := make(http.Header)
headers.Set(CallerHeader, "somecaller")
headers.Set(EncodingHeader, "raw")
headers.Set(TTLMSHeader, "1000")
headers.Set(ProcedureHeader, "hello")
headers.Set(ServiceHeader, "fake")
request := http.Request{
Method: "POST",
Header: headers,
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
rpcHandler := transporttest.NewMockHandler(mockCtrl)
rpcHandler.EXPECT().Handle(
transporttest.NewContextMatcher(t, transporttest.ContextTTL(time.Second)),
transport.Options{},
transporttest.NewRequestMatcher(
t, &transport.Request{
Caller: "somecaller",
Service: "fake",
Encoding: raw.Encoding,
Procedure: "hello",
Body: bytes.NewReader([]byte{}),
},
),
gomock.Any(),
).Return(fmt.Errorf("great sadness"))
httpHandler := handler{Handler: rpcHandler}
httpResponse := httptest.NewRecorder()
httpHandler.ServeHTTP(httpResponse, &request)
code := httpResponse.Code
assert.True(t, code >= 500 && code < 600, "expected 500 level response")
assert.Equal(t,
`UnexpectedError: error for procedure "hello" of service "fake": great sadness`+"\n",
httpResponse.Body.String())
}
func headerCopyWithout(headers http.Header, names ...string) http.Header {
newHeaders := make(http.Header)
for k, vs := range headers {
for _, v := range vs {
newHeaders.Add(k, v)
}
}
for _, k := range names {
newHeaders.Del(k)
}
return newHeaders
}
func TestResponseWriter(t *testing.T) {
recorder := httptest.NewRecorder()
writer := newResponseWriter(recorder)
headers := transport.HeadersFromMap(map[string]string{
"foo": "bar",
"shard-key": "123",
})
writer.AddHeaders(headers)
_, err := writer.Write([]byte("hello"))
require.NoError(t, err)
assert.Equal(t, "bar", recorder.Header().Get("rpc-header-foo"))
assert.Equal(t, "123", recorder.Header().Get("rpc-header-shard-key"))
assert.Equal(t, "hello", recorder.Body.String())
}
| 1 | 10,525 | nit: this should be below the yarpc group | yarpc-yarpc-go | go |
@@ -44,6 +44,10 @@ func New(name, tip string, labelNames []string, defaultLabels []string) (*TimerF
labelNames,
)
err := prometheus.Register(vect)
+ switch err.(type) {
+ case prometheus.AlreadyRegisteredError:
+ err = nil
+ }
return &TimerFactory{
labelNames: labelNames, | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package prometheustimer
import (
"errors"
"github.com/facebookgo/clock"
"github.com/prometheus/client_golang/prometheus"
"github.com/iotexproject/iotex-core/pkg/log"
)
type (
// TimerFactory defines a timer factory to generate timer
TimerFactory struct {
labelNames []string
defaultLabels []string
vect *prometheus.GaugeVec
clk clock.Clock
}
// Timer defines a timer to measure performance
Timer struct {
factory *TimerFactory
labels []string
startTime int64
}
)
// New returns a new Timer
func New(name, tip string, labelNames []string, defaultLabels []string) (*TimerFactory, error) {
if len(labelNames) != len(defaultLabels) {
return nil, errors.New("label names do not match default labels")
}
vect := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: name,
Help: tip,
},
labelNames,
)
err := prometheus.Register(vect)
return &TimerFactory{
labelNames: labelNames,
defaultLabels: defaultLabels,
vect: vect,
clk: clock.New(),
}, err
}
// NewTimer returns a timer with start time as now
func (factory *TimerFactory) NewTimer(labels ...string) *Timer {
if factory == nil {
return &Timer{}
}
if len(labels) > len(factory.labelNames) {
log.L().Error("Two many timer labels")
return &Timer{}
}
return &Timer{
factory: factory,
labels: labels,
startTime: factory.now(),
}
}
// End ends the timer
func (timer *Timer) End() {
f := timer.factory
if f == nil {
return
}
f.log(float64(f.now()-timer.startTime), timer.labels...)
}
func (factory *TimerFactory) log(value float64, labels ...string) {
factory.vect.WithLabelValues(
append(labels, factory.defaultLabels[len(labels):]...)...,
).Set(value)
}
func (factory *TimerFactory) now() int64 {
return factory.clk.Now().UnixNano()
}
| 1 | 14,488 | singleCaseSwitch: should rewrite switch statement to if statement (from `gocritic`) | iotexproject-iotex-core | go |
@@ -442,7 +442,17 @@ configRetry:
log.Infof("Starting the Typha connection")
err := typhaConnection.Start(context.Background())
if err != nil {
- log.WithError(err).Fatal("Failed to connect to Typha")
+ retry := 0
+ for err != nil && retry < 10 {
+ // Set Ready and Live to false
+ healthAggregator.Report(healthName, &health.HealthReport{Live: false, Ready: false})
+ err = typhaConnection.Start(context.Background())
+ log.WithError(err).Warn("Retrying to start Typha")
+ retry++
+ }
+ if err != nil && retry > 10 {
+ log.WithError(err).Fatal("Failed to connect to Typha")
+ }
}
go func() {
typhaConnection.Finished.Wait() | 1 | // Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daemon
import (
"context"
"errors"
"fmt"
"math/rand"
"net/http"
"os"
"os/exec"
"os/signal"
"runtime"
"runtime/debug"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/projectcalico/felix/buildinfo"
"github.com/projectcalico/felix/calc"
"github.com/projectcalico/felix/config"
_ "github.com/projectcalico/felix/config"
dp "github.com/projectcalico/felix/dataplane"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/policysync"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/statusrep"
"github.com/projectcalico/felix/usagerep"
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/libcalico-go/lib/backend"
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
"github.com/projectcalico/libcalico-go/lib/backend/model"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/felixsyncer"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/updateprocessors"
"github.com/projectcalico/libcalico-go/lib/backend/watchersyncer"
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/health"
lclogutils "github.com/projectcalico/libcalico-go/lib/logutils"
"github.com/projectcalico/libcalico-go/lib/set"
"github.com/projectcalico/pod2daemon/binder"
"github.com/projectcalico/typha/pkg/syncclient"
)
const usage = `Felix, the Calico per-host daemon.
Usage:
calico-felix [options]
Options:
-c --config-file=<filename> Config file to load [default: /etc/calico/felix.cfg].
--version Print the version and exit.
`
const (
// Our default value for GOGC if it is not set. This is the percentage that heap usage must
// grow by to trigger a garbage collection. Go's default is 100, meaning that 50% of the
// heap can be lost to garbage. We reduce it to this value to trade increased CPU usage for
// lower occupancy.
defaultGCPercent = 20
// String sent on the failure report channel to indicate we're shutting down for config
// change.
reasonConfigChanged = "config changed"
// Process return code used to report a config change. This is the same as the code used
// by SIGHUP, which means that the wrapper script also restarts Felix on a SIGHUP.
configChangedRC = 129
)
// Run is the entry point to run a Felix instance.
//
// Its main role is to sequence Felix's startup by:
//
// Initialising early logging config (log format and early debug settings).
//
// Parsing command line parameters.
//
// Loading datastore configuration from the environment or config file.
//
// Loading more configuration from the datastore (this is retried until success).
//
// Starting the configured internal (golang) or external dataplane driver.
//
// Starting the background processing goroutines, which load and keep in sync with the
// state from the datastore, the "calculation graph".
//
// Starting the usage reporting and prometheus metrics endpoint threads (if configured).
//
// Then, it defers to monitorAndManageShutdown(), which blocks until one of the components
// fails, then attempts a graceful shutdown. At that point, all the processing is in
// background goroutines.
//
// To avoid having to maintain rarely-used code paths, Felix handles updates to its
// main config parameters by exiting and allowing itself to be restarted by the init
// daemon.
func Run(configFile string) {
// Go's RNG is not seeded by default. Do that now.
rand.Seed(time.Now().UTC().UnixNano())
// Special-case handling for environment variable-configured logging:
// Initialise early so we can trace out config parsing.
logutils.ConfigureEarlyLogging()
ctx := context.Background()
if os.Getenv("GOGC") == "" {
// Tune the GC to trade off a little extra CPU usage for significantly lower
// occupancy at high scale. This is worthwhile because Felix runs per-host so
// any occupancy improvement is multiplied by the number of hosts.
log.Debugf("No GOGC value set, defaulting to %d%%.", defaultGCPercent)
debug.SetGCPercent(defaultGCPercent)
}
buildInfoLogCxt := log.WithFields(log.Fields{
"version": buildinfo.GitVersion,
"buildDate": buildinfo.BuildDate,
"gitCommit": buildinfo.GitRevision,
"GOMAXPROCS": runtime.GOMAXPROCS(0),
})
buildInfoLogCxt.Info("Felix starting up")
// Health monitoring, for liveness and readiness endpoints. The following loop can take a
// while before the datastore reports itself as ready - for example when there is data that
// needs to be migrated from a previous version - and we still want to Felix to report
// itself as live (but not ready) while we are waiting for that. So we create the
// aggregator upfront and will start serving health status over HTTP as soon as we see _any_
// config that indicates that.
healthAggregator := health.NewHealthAggregator()
const healthName = "felix-startup"
// Register this function as a reporter of liveness and readiness, with no timeout.
healthAggregator.RegisterReporter(healthName, &health.HealthReport{Live: true, Ready: true}, 0)
// Load the configuration from all the different sources including the
// datastore and merge. Keep retrying on failure. We'll sit in this
// loop until the datastore is ready.
log.Info("Loading configuration...")
var backendClient bapi.Client
var configParams *config.Config
var typhaAddr string
var numClientsCreated int
configRetry:
for {
if numClientsCreated > 60 {
// If we're in a restart loop, periodically exit (so we can be restarted) since
// - it may solve the problem if there's something wrong with our process
// - it prevents us from leaking connections to the datastore.
exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections")
}
// Make an initial report that says we're live but not yet ready.
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: false})
// Load locally-defined config, including the datastore connection
// parameters. First the environment variables.
configParams = config.New()
envConfig := config.LoadConfigFromEnvironment(os.Environ())
// Then, the config file.
log.Infof("Loading config file: %v", configFile)
fileConfig, err := config.LoadConfigFile(configFile)
if err != nil {
log.WithError(err).WithField("configFile", configFile).Error(
"Failed to load configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Parse and merge the local config.
configParams.UpdateFrom(envConfig, config.EnvironmentVariable)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration environment variable")
time.Sleep(1 * time.Second)
continue configRetry
}
configParams.UpdateFrom(fileConfig, config.ConfigFile)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Each time round this loop, check that we're serving health reports if we should
// be, or cancel any existing server if we should not be serving any more.
healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort)
// We should now have enough config to connect to the datastore
// so we can load the remainder of the config.
datastoreConfig := configParams.DatastoreConfig()
// Can't dump the whole config because it may have sensitive information...
log.WithField("datastore", datastoreConfig.Spec.DatastoreType).Info("Connecting to datastore")
backendClient, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to create datastore client")
time.Sleep(1 * time.Second)
continue configRetry
}
log.Info("Created datastore client")
numClientsCreated++
for {
globalConfig, hostConfig, err := loadConfigFromDatastore(
ctx, backendClient, configParams.FelixHostname)
if err == ErrNotReady {
log.Warn("Waiting for datastore to be initialized (or migrated)")
time.Sleep(1 * time.Second)
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true})
continue
} else if err != nil {
log.WithError(err).Error("Failed to get config from datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
configParams.UpdateFrom(globalConfig, config.DatastoreGlobal)
configParams.UpdateFrom(hostConfig, config.DatastorePerHost)
break
}
configParams.Validate()
if configParams.Err != nil {
log.WithError(configParams.Err).Error(
"Failed to parse/validate configuration from datastore.")
time.Sleep(1 * time.Second)
continue configRetry
}
// We now have some config flags that affect how we configure the syncer.
// After loading the config from the datastore, reconnect, possibly with new
// config. We don't need to re-load the configuration _again_ because the
// calculation graph will spot if the config has changed since we were initialised.
datastoreConfig = configParams.DatastoreConfig()
backendClient, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to (re)connect to datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
numClientsCreated++
// If we're configured to discover Typha, do that now so we can retry if we fail.
typhaAddr, err = discoverTyphaAddr(configParams)
if err != nil {
log.WithError(err).Error("Typha discovery enabled but discovery failed.")
time.Sleep(1 * time.Second)
continue configRetry
}
break configRetry
}
if numClientsCreated > 2 {
// We don't have a way to close datastore connection so, if we reconnected after
// a failure to load config, restart felix to avoid leaking connections.
exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections")
}
// We're now both live and ready.
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true})
// Enable or disable the health HTTP server according to coalesced config.
healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort)
// If we get here, we've loaded the configuration successfully.
// Update log levels before we do anything else.
logutils.ConfigureLogging(configParams)
// Since we may have enabled more logging, log with the build context
// again.
buildInfoLogCxt.WithField("config", configParams).Info(
"Successfully loaded configuration.")
// Start up the dataplane driver. This may be the internal go-based driver or an external
// one.
var dpDriver dp.DataplaneDriver
var dpDriverCmd *exec.Cmd
failureReportChan := make(chan string)
configChangedRestartCallback := func() { failureReportChan <- reasonConfigChanged }
dpDriver, dpDriverCmd = dp.StartDataplaneDriver(configParams, healthAggregator, configChangedRestartCallback)
// Initialise the glue logic that connects the calculation graph to/from the dataplane driver.
log.Info("Connect to the dataplane driver.")
var connToUsageRepUpdChan chan map[string]string
if configParams.UsageReportingEnabled {
// Make a channel for the connector to use to send updates to the usage reporter.
// (Otherwise, we pass in a nil channel, which disables such updates.)
connToUsageRepUpdChan = make(chan map[string]string, 1)
}
dpConnector := newConnector(configParams, connToUsageRepUpdChan, backendClient, dpDriver, failureReportChan)
// If enabled, create a server for the policy sync API. This allows clients to connect to
// Felix over a socket and receive policy updates.
var policySyncServer *policysync.Server
var policySyncProcessor *policysync.Processor
var policySyncAPIBinder binder.Binder
calcGraphClientChannels := []chan<- interface{}{dpConnector.ToDataplane}
if configParams.PolicySyncPathPrefix != "" {
log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info(
"Policy sync API enabled. Creating the policy sync server.")
toPolicySync := make(chan interface{})
policySyncUIDAllocator := policysync.NewUIDAllocator()
policySyncProcessor = policysync.NewProcessor(toPolicySync)
policySyncServer = policysync.NewServer(
policySyncProcessor.JoinUpdates,
policySyncUIDAllocator.NextUID,
)
policySyncAPIBinder = binder.NewBinder(configParams.PolicySyncPathPrefix)
policySyncServer.RegisterGrpc(policySyncAPIBinder.Server())
calcGraphClientChannels = append(calcGraphClientChannels, toPolicySync)
}
// Now create the calculation graph, which receives updates from the
// datastore and outputs dataplane updates for the dataplane driver.
//
// The Syncer has its own thread and we use an extra thread for the
// Validator, just to pipeline that part of the calculation then the
// main calculation graph runs in a single thread for simplicity.
// The output of the calculation graph arrives at the dataplane
// connection via channel.
//
// Syncer -chan-> Validator -chan-> Calc graph -chan-> dataplane
// KVPair KVPair protobufs
// Get a Syncer from the datastore, or a connection to our remote sync daemon, Typha,
// which will feed the calculation graph with updates, bringing Felix into sync.
var syncer Startable
var typhaConnection *syncclient.SyncerClient
syncerToValidator := calc.NewSyncerCallbacksDecoupler()
if typhaAddr != "" {
// Use a remote Syncer, via the Typha server.
log.WithField("addr", typhaAddr).Info("Connecting to Typha.")
typhaConnection = syncclient.New(
typhaAddr,
buildinfo.GitVersion,
configParams.FelixHostname,
fmt.Sprintf("Revision: %s; Build date: %s",
buildinfo.GitRevision, buildinfo.BuildDate),
syncerToValidator,
&syncclient.Options{
ReadTimeout: configParams.TyphaReadTimeout,
WriteTimeout: configParams.TyphaWriteTimeout,
KeyFile: configParams.TyphaKeyFile,
CertFile: configParams.TyphaCertFile,
CAFile: configParams.TyphaCAFile,
ServerCN: configParams.TyphaCN,
ServerURISAN: configParams.TyphaURISAN,
},
)
} else {
// Use the syncer locally.
syncer = felixsyncer.New(backendClient, syncerToValidator)
}
log.WithField("syncer", syncer).Info("Created Syncer")
// Create the ipsets/active policy calculation graph, which will
// do the dynamic calculation of ipset memberships and active policies
// etc.
asyncCalcGraph := calc.NewAsyncCalcGraph(
configParams,
calcGraphClientChannels,
healthAggregator,
)
if configParams.UsageReportingEnabled {
// Usage reporting enabled, add stats collector to graph. When it detects an update
// to the stats, it makes a callback, which we use to send an update on a channel.
// We use a buffered channel here to avoid blocking the calculation graph.
statsChanIn := make(chan calc.StatsUpdate, 1)
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
statsChanIn <- stats
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.CalcGraph)
// Rather than sending the updates directly to the usage reporting thread, we
// decouple with an extra goroutine. This prevents blocking the calculation graph
// goroutine if the usage reporting goroutine is blocked on IO, for example.
// Using a buffered channel wouldn't work here because the usage reporting
// goroutine can block for a long time on IO so we could build up a long queue.
statsChanOut := make(chan calc.StatsUpdate)
go func() {
var statsChanOutOrNil chan calc.StatsUpdate
var stats calc.StatsUpdate
for {
select {
case stats = <-statsChanIn:
// Got a stats update, activate the output channel.
log.WithField("stats", stats).Debug("Buffer: stats update received")
statsChanOutOrNil = statsChanOut
case statsChanOutOrNil <- stats:
// Passed on the update, deactivate the output channel until
// the next update.
log.WithField("stats", stats).Debug("Buffer: stats update sent")
statsChanOutOrNil = nil
}
}
}()
usageRep := usagerep.New(
configParams.UsageReportingInitialDelaySecs,
configParams.UsageReportingIntervalSecs,
statsChanOut,
connToUsageRepUpdChan,
)
go usageRep.PeriodicallyReportUsage(context.Background())
} else {
// Usage reporting disabled, but we still want a stats collector for the
// felix_cluster_* metrics. Register a no-op function as the callback.
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.CalcGraph)
}
// Create the validator, which sits between the syncer and the
// calculation graph.
validator := calc.NewValidationFilter(asyncCalcGraph)
// Start the background processing threads.
if syncer != nil {
log.Infof("Starting the datastore Syncer")
syncer.Start()
} else {
log.Infof("Starting the Typha connection")
err := typhaConnection.Start(context.Background())
if err != nil {
log.WithError(err).Fatal("Failed to connect to Typha")
}
go func() {
typhaConnection.Finished.Wait()
failureReportChan <- "Connection to Typha failed"
}()
}
go syncerToValidator.SendTo(validator)
asyncCalcGraph.Start()
log.Infof("Started the processing graph")
var stopSignalChans []chan<- bool
if configParams.EndpointReportingEnabled {
delay := configParams.EndpointReportingDelaySecs
log.WithField("delay", delay).Info(
"Endpoint status reporting enabled, starting status reporter")
dpConnector.statusReporter = statusrep.NewEndpointStatusReporter(
configParams.FelixHostname,
configParams.OpenstackRegion,
dpConnector.StatusUpdatesFromDataplane,
dpConnector.InSync,
dpConnector.datastore,
delay,
delay*180,
)
dpConnector.statusReporter.Start()
}
// Start communicating with the dataplane driver.
dpConnector.Start()
if policySyncProcessor != nil {
log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info(
"Policy sync API enabled. Starting the policy sync server.")
policySyncProcessor.Start()
sc := make(chan bool)
stopSignalChans = append(stopSignalChans, sc)
go policySyncAPIBinder.SearchAndBind(sc)
}
// Send the opening message to the dataplane driver, giving it its
// config.
dpConnector.ToDataplane <- &proto.ConfigUpdate{
Config: configParams.RawValues(),
}
if configParams.PrometheusMetricsEnabled {
log.Info("Prometheus metrics enabled. Starting server.")
gaugeHost := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "felix_host",
Help: "Configured Felix hostname (as a label), typically used in grouping/aggregating stats; the label defaults to the hostname of the host but can be overridden by configuration. The value of the gauge is always set to 1.",
ConstLabels: prometheus.Labels{"host": configParams.FelixHostname},
})
gaugeHost.Set(1)
prometheus.MustRegister(gaugeHost)
go servePrometheusMetrics(configParams)
}
// Register signal handlers to dump memory/CPU profiles.
logutils.RegisterProfilingSignalHandlers(configParams)
// Now monitor the worker process and our worker threads and shut
// down the process gracefully if they fail.
monitorAndManageShutdown(failureReportChan, dpDriverCmd, stopSignalChans)
}
func servePrometheusMetrics(configParams *config.Config) {
for {
log.WithField("port", configParams.PrometheusMetricsPort).Info("Starting prometheus metrics endpoint")
if configParams.PrometheusGoMetricsEnabled && configParams.PrometheusProcessMetricsEnabled {
log.Info("Including Golang & Process metrics")
} else {
if !configParams.PrometheusGoMetricsEnabled {
log.Info("Discarding Golang metrics")
prometheus.Unregister(prometheus.NewGoCollector())
}
if !configParams.PrometheusProcessMetricsEnabled {
log.Info("Discarding process metrics")
prometheus.Unregister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
}
}
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(fmt.Sprintf(":%v", configParams.PrometheusMetricsPort), nil)
log.WithError(err).Error(
"Prometheus metrics endpoint failed, trying to restart it...")
time.Sleep(1 * time.Second)
}
}
func monitorAndManageShutdown(failureReportChan <-chan string, driverCmd *exec.Cmd, stopSignalChans []chan<- bool) {
// Ask the runtime to tell us if we get a term/int signal.
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
signal.Notify(signalChan, syscall.SIGINT)
signal.Notify(signalChan, syscall.SIGHUP)
// Start a background thread to tell us when the dataplane driver stops.
// If the driver stops unexpectedly, we'll terminate this process.
// If this process needs to stop, we'll kill the driver and then wait
// for the message from the background thread.
driverStoppedC := make(chan bool)
go func() {
if driverCmd == nil {
log.Info("No driver process to monitor")
return
}
err := driverCmd.Wait()
log.WithError(err).Warn("Driver process stopped")
driverStoppedC <- true
}()
// Wait for one of the channels to give us a reason to shut down.
driverAlreadyStopped := driverCmd == nil
receivedFatalSignal := false
var reason string
select {
case <-driverStoppedC:
reason = "Driver stopped"
driverAlreadyStopped = true
case sig := <-signalChan:
if sig == syscall.SIGHUP {
log.Warning("Received a SIGHUP, treating as a request to reload config")
reason = reasonConfigChanged
} else {
reason = fmt.Sprintf("Received OS signal %v", sig)
receivedFatalSignal = true
}
case reason = <-failureReportChan:
}
logCxt := log.WithField("reason", reason)
logCxt.Warn("Felix is shutting down")
// Notify other components to stop.
for _, c := range stopSignalChans {
select {
case c <- true:
default:
}
}
if !driverAlreadyStopped {
// Driver may still be running, just in case the driver is
// unresponsive, start a thread to kill this process if we
// don't manage to kill the driver.
logCxt.Info("Driver still running, trying to shut it down...")
giveUpOnSigTerm := make(chan bool)
go func() {
time.Sleep(4 * time.Second)
giveUpOnSigTerm <- true
time.Sleep(1 * time.Second)
log.Fatal("Failed to wait for driver to exit, giving up.")
}()
// Signal to the driver to exit.
driverCmd.Process.Signal(syscall.SIGTERM)
select {
case <-driverStoppedC:
logCxt.Info("Driver shut down after SIGTERM")
case <-giveUpOnSigTerm:
logCxt.Error("Driver did not respond to SIGTERM, sending SIGKILL")
driverCmd.Process.Kill()
<-driverStoppedC
logCxt.Info("Driver shut down after SIGKILL")
}
}
if !receivedFatalSignal {
// We're exiting due to a failure or a config change, wait
// a couple of seconds to ensure that we don't go into a tight
// restart loop (which would make the init daemon in calico/node give
// up trying to restart us).
logCxt.Info("Sleeping to avoid tight restart loop.")
go func() {
time.Sleep(2 * time.Second)
if reason == reasonConfigChanged {
exitWithCustomRC(configChangedRC, "Exiting for config change")
return
}
logCxt.Fatal("Exiting.")
}()
for {
sig := <-signalChan
if sig == syscall.SIGHUP {
logCxt.Warning("Ignoring SIGHUP because we're already shutting down")
continue
}
logCxt.WithField("signal", sig).Fatal(
"Signal received while shutting down, exiting immediately")
}
}
logCxt.Fatal("Exiting immediately")
}
func exitWithCustomRC(rc int, message string) {
// Since log writing is done a background thread, we set the force-flush flag on this log to ensure that
// all the in-flight logs get written before we exit.
log.WithFields(log.Fields{
"rc": rc,
lclogutils.FieldForceFlush: true,
}).Info(message)
os.Exit(rc)
}
var (
ErrNotReady = errors.New("datastore is not ready or has not been initialised")
)
func loadConfigFromDatastore(
ctx context.Context, client bapi.Client, hostname string,
) (globalConfig, hostConfig map[string]string, err error) {
// The configuration is split over 3 different resource types and 4 different resource
// instances in the v3 data model:
// - ClusterInformation (global): name "default"
// - FelixConfiguration (global): name "default"
// - FelixConfiguration (per-host): name "node.<hostname>"
// - Node (per-host): name: <hostname>
// Get the global values and host specific values separately. We re-use the updateprocessor
// logic to convert the single v3 resource to a set of v1 key/values.
hostConfig = make(map[string]string)
globalConfig = make(map[string]string)
var ready bool
err = getAndMergeConfig(
ctx, client, globalConfig,
apiv3.KindClusterInformation, "default",
updateprocessors.NewClusterInfoUpdateProcessor(),
&ready,
)
if err != nil {
return
}
if !ready {
// The ClusterInformation struct should contain the ready flag, if it is not set, abort.
err = ErrNotReady
return
}
err = getAndMergeConfig(
ctx, client, globalConfig,
apiv3.KindFelixConfiguration, "default",
updateprocessors.NewFelixConfigUpdateProcessor(),
&ready,
)
if err != nil {
return
}
err = getAndMergeConfig(
ctx, client, hostConfig,
apiv3.KindFelixConfiguration, "node."+hostname,
updateprocessors.NewFelixConfigUpdateProcessor(),
&ready,
)
if err != nil {
return
}
err = getAndMergeConfig(
ctx, client, hostConfig,
apiv3.KindNode, hostname,
updateprocessors.NewFelixNodeUpdateProcessor(),
&ready,
)
if err != nil {
return
}
return
}
// getAndMergeConfig gets the v3 resource configuration extracts the separate config values
// (where each configuration value is stored in a field of the v3 resource Spec) and merges into
// the supplied map, as required by our v1-style configuration loader.
func getAndMergeConfig(
ctx context.Context, client bapi.Client, config map[string]string,
kind string, name string,
configConverter watchersyncer.SyncerUpdateProcessor,
ready *bool,
) error {
logCxt := log.WithFields(log.Fields{"kind": kind, "name": name})
cfg, err := client.Get(ctx, model.ResourceKey{
Kind: kind,
Name: name,
Namespace: "",
}, "")
if err != nil {
switch err.(type) {
case cerrors.ErrorResourceDoesNotExist:
logCxt.Info("No config of this type")
return nil
default:
logCxt.WithError(err).Info("Failed to load config from datastore")
return err
}
}
// Re-use the update processor logic implemented for the Syncer. We give it a v3 config
// object in a KVPair and it uses the annotations defined on it to split it into v1-style
// KV pairs. Log any errors - but don't fail completely to avoid cyclic restarts.
v1kvs, err := configConverter.Process(cfg)
if err != nil {
logCxt.WithError(err).Error("Failed to convert configuration")
}
// Loop through the converted values and update our config map with values from either the
// Global or Host configs.
for _, v1KV := range v1kvs {
if _, ok := v1KV.Key.(model.ReadyFlagKey); ok {
logCxt.WithField("ready", v1KV.Value).Info("Loaded ready flag")
if v1KV.Value == true {
*ready = true
}
} else if v1KV.Value != nil {
switch k := v1KV.Key.(type) {
case model.GlobalConfigKey:
config[k.Name] = v1KV.Value.(string)
case model.HostConfigKey:
config[k.Name] = v1KV.Value.(string)
default:
logCxt.WithField("KV", v1KV).Debug("Skipping config - not required for initial loading")
}
}
}
return nil
}
type DataplaneConnector struct {
config *config.Config
configUpdChan chan<- map[string]string
ToDataplane chan interface{}
StatusUpdatesFromDataplane chan interface{}
InSync chan bool
failureReportChan chan<- string
dataplane dp.DataplaneDriver
datastore bapi.Client
statusReporter *statusrep.EndpointStatusReporter
datastoreInSync bool
firstStatusReportSent bool
}
type Startable interface {
Start()
}
func newConnector(configParams *config.Config,
configUpdChan chan<- map[string]string,
datastore bapi.Client,
dataplane dp.DataplaneDriver,
failureReportChan chan<- string,
) *DataplaneConnector {
felixConn := &DataplaneConnector{
config: configParams,
configUpdChan: configUpdChan,
datastore: datastore,
ToDataplane: make(chan interface{}),
StatusUpdatesFromDataplane: make(chan interface{}),
InSync: make(chan bool, 1),
failureReportChan: failureReportChan,
dataplane: dataplane,
}
return felixConn
}
func (fc *DataplaneConnector) readMessagesFromDataplane() {
defer func() {
fc.shutDownProcess("Failed to read messages from dataplane")
}()
log.Info("Reading from dataplane driver pipe...")
ctx := context.Background()
for {
payload, err := fc.dataplane.RecvMessage()
if err != nil {
log.WithError(err).Error("Failed to read from front-end socket")
fc.shutDownProcess("Failed to read from front-end socket")
}
log.WithField("payload", payload).Debug("New message from dataplane")
switch msg := payload.(type) {
case *proto.ProcessStatusUpdate:
fc.handleProcessStatusUpdate(ctx, msg)
case *proto.WorkloadEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.WorkloadEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
default:
log.WithField("msg", msg).Warning("Unknown message from dataplane")
}
log.Debug("Finished handling message from front-end")
}
}
func (fc *DataplaneConnector) handleProcessStatusUpdate(ctx context.Context, msg *proto.ProcessStatusUpdate) {
log.Debugf("Status update from dataplane driver: %v", *msg)
statusReport := model.StatusReport{
Timestamp: msg.IsoTimestamp,
UptimeSeconds: msg.Uptime,
FirstUpdate: !fc.firstStatusReportSent,
}
kv := model.KVPair{
Key: model.ActiveStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)},
Value: &statusReport,
TTL: fc.config.ReportingTTLSecs,
}
applyCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
_, err := fc.datastore.Apply(applyCtx, &kv)
cancel()
if err != nil {
if _, ok := err.(cerrors.ErrorOperationNotSupported); ok {
log.Debug("Datastore doesn't support status reports.")
return // and it won't support the last status key either.
} else {
log.Warningf("Failed to write status to datastore: %v", err)
}
} else {
fc.firstStatusReportSent = true
}
kv = model.KVPair{
Key: model.LastStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)},
Value: &statusReport,
}
applyCtx, cancel = context.WithTimeout(ctx, 2*time.Second)
_, err = fc.datastore.Apply(applyCtx, &kv)
cancel()
if err != nil {
log.Warningf("Failed to write status to datastore: %v", err)
}
}
var handledConfigChanges = set.From("CalicoVersion", "ClusterGUID", "ClusterType")
func (fc *DataplaneConnector) sendMessagesToDataplaneDriver() {
defer func() {
fc.shutDownProcess("Failed to send messages to dataplane")
}()
var config map[string]string
for {
msg := <-fc.ToDataplane
switch msg := msg.(type) {
case *proto.InSync:
log.Info("Datastore now in sync.")
if !fc.datastoreInSync {
log.Info("Datastore in sync for first time, sending message to status reporter.")
fc.datastoreInSync = true
fc.InSync <- true
}
case *proto.ConfigUpdate:
if config != nil {
log.WithFields(log.Fields{
"old": config,
"new": msg.Config,
}).Info("Config updated, checking whether we need to restart")
restartNeeded := false
for kNew, vNew := range msg.Config {
logCxt := log.WithFields(log.Fields{"key": kNew, "new": vNew})
if vOld, prs := config[kNew]; !prs {
logCxt = logCxt.WithField("updateType", "add")
} else if vNew != vOld {
logCxt = logCxt.WithFields(log.Fields{"old": vOld, "updateType": "update"})
} else {
continue
}
if handledConfigChanges.Contains(kNew) {
logCxt.Info("Config change can be handled without restart")
continue
}
logCxt.Warning("Config change requires restart")
restartNeeded = true
}
for kOld, vOld := range config {
logCxt := log.WithFields(log.Fields{"key": kOld, "old": vOld, "updateType": "delete"})
if _, prs := msg.Config[kOld]; prs {
// Key was present in the message so we've handled above.
continue
}
if handledConfigChanges.Contains(kOld) {
logCxt.Info("Config change can be handled without restart")
continue
}
logCxt.Warning("Config change requires restart")
restartNeeded = true
}
if restartNeeded {
fc.shutDownProcess("config changed")
}
}
// Take a copy of the config to compare against next time.
config = make(map[string]string)
for k, v := range msg.Config {
config[k] = v
}
if fc.configUpdChan != nil {
// Send the config over to the usage reporter.
fc.configUpdChan <- config
}
case *calc.DatastoreNotReady:
log.Warn("Datastore became unready, need to restart.")
fc.shutDownProcess("datastore became unready")
}
if err := fc.dataplane.SendMessage(msg); err != nil {
fc.shutDownProcess("Failed to write to dataplane driver")
}
}
}
func (fc *DataplaneConnector) shutDownProcess(reason string) {
// Send a failure report to the managed shutdown thread then give it
// a few seconds to do the shutdown.
fc.failureReportChan <- reason
time.Sleep(5 * time.Second)
// The graceful shutdown failed, terminate the process.
log.Panic("Managed shutdown failed. Panicking.")
}
func (fc *DataplaneConnector) Start() {
// Start a background thread to write to the dataplane driver.
go fc.sendMessagesToDataplaneDriver()
// Start background thread to read messages from dataplane driver.
go fc.readMessagesFromDataplane()
}
var ErrServiceNotReady = errors.New("Kubernetes service missing IP or port.")
func discoverTyphaAddr(configParams *config.Config) (string, error) {
if configParams.TyphaAddr != "" {
// Explicit address; trumps other sources of config.
return configParams.TyphaAddr, nil
}
if configParams.TyphaK8sServiceName == "" {
// No explicit address, and no service name, not using Typha.
return "", nil
}
// If we get here, we need to look up the Typha service using the k8s API.
// TODO Typha: support Typha lookup without using rest.InClusterConfig().
k8sconf, err := rest.InClusterConfig()
if err != nil {
log.WithError(err).Error("Unable to create Kubernetes config.")
return "", err
}
clientset, err := kubernetes.NewForConfig(k8sconf)
if err != nil {
log.WithError(err).Error("Unable to create Kubernetes client set.")
return "", err
}
svcClient := clientset.CoreV1().Services(configParams.TyphaK8sNamespace)
svc, err := svcClient.Get(configParams.TyphaK8sServiceName, v1.GetOptions{})
if err != nil {
log.WithError(err).Error("Unable to get Typha service from Kubernetes.")
return "", err
}
host := svc.Spec.ClusterIP
log.WithField("clusterIP", host).Info("Found Typha ClusterIP.")
if host == "" {
log.WithError(err).Error("Typha service had no ClusterIP.")
return "", ErrServiceNotReady
}
for _, p := range svc.Spec.Ports {
if p.Name == "calico-typha" {
log.WithField("port", p).Info("Found Typha service port.")
typhaAddr := fmt.Sprintf("%s:%v", host, p.Port)
return typhaAddr, nil
}
}
log.Error("Didn't find Typha service port.")
return "", ErrServiceNotReady
}
| 1 | 16,830 | Please log once here at Error level "Failed to connect to Typha, will retry..." | projectcalico-felix | c |
@@ -31,7 +31,7 @@ func TestICMPPortUnreachable(t *testing.T) {
_, ipv4, _, _, pktBytes, err := testPacketUDPDefault()
Expect(err).NotTo(HaveOccurred())
- runBpfUnitTest(t, "icmp_port_unreachable.c", func(bpfrun bpfProgRunFn) {
+ runBpfUnitTest(t, "icmp_port_unreachable.c", false, func(bpfrun bpfProgRunFn) {
res, err := bpfrun(pktBytes)
Expect(err).NotTo(HaveOccurred())
Expect(res.Retval).To(Equal(0)) | 1 | // Copyright (c) 2019-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ut_test
import (
"fmt"
"testing"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
. "github.com/onsi/gomega"
"github.com/projectcalico/felix/bpf/nat"
)
func TestICMPPortUnreachable(t *testing.T) {
RegisterTestingT(t)
_, ipv4, _, _, pktBytes, err := testPacketUDPDefault()
Expect(err).NotTo(HaveOccurred())
runBpfUnitTest(t, "icmp_port_unreachable.c", func(bpfrun bpfProgRunFn) {
res, err := bpfrun(pktBytes)
Expect(err).NotTo(HaveOccurred())
Expect(res.Retval).To(Equal(0))
Expect(res.dataOut).To(HaveLen(134)) // eth + ip + 64 + udp + ip + icmp
pktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)
fmt.Printf("pktR = %+v\n", pktR)
checkICMPPortUnreachable(pktR, ipv4)
})
}
func TestNATNoBackendFromHEP(t *testing.T) {
RegisterTestingT(t)
iphdr := *ipv4Default
_, ipv4, l4, _, pktBytes, err := testPacket(nil, &iphdr, nil, nil)
Expect(err).NotTo(HaveOccurred())
udp := l4.(*layers.UDP)
// Test with count as 1 but no backend. This results in a NAT backend lookup failure
natkey := nat.NewNATKey(ipv4.DstIP, uint16(udp.DstPort), uint8(ipv4.Protocol)).AsBytes()
err = natMap.Update(
natkey,
nat.NewNATValue(0, 1, 0, 0).AsBytes(),
)
Expect(err).NotTo(HaveOccurred())
defer func() {
err := natMap.Delete(natkey)
Expect(err).NotTo(HaveOccurred())
}()
runBpfTest(t, "calico_from_host_ep", nil, func(bpfrun bpfProgRunFn) {
res, err := bpfrun(pktBytes)
Expect(err).NotTo(HaveOccurred())
Expect(res.RetvalStr()).To(Equal("TC_ACT_UNSPEC"), "expected program to return TC_ACT_UNSPEC")
pktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)
fmt.Printf("pktR = %+v\n", pktR)
checkICMPPortUnreachable(pktR, ipv4)
})
// Test with count as 0. This results in a no backend after frontend lookup as count is 0.
err = natMap.Update(
natkey,
nat.NewNATValue(0, 0, 0, 0).AsBytes(),
)
Expect(err).NotTo(HaveOccurred())
runBpfTest(t, "calico_from_host_ep", nil, func(bpfrun bpfProgRunFn) {
res, err := bpfrun(pktBytes)
Expect(err).NotTo(HaveOccurred())
Expect(res.RetvalStr()).To(Equal("TC_ACT_UNSPEC"), "expected program to return TC_ACT_UNSPEC")
pktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)
fmt.Printf("pktR = %+v\n", pktR)
checkICMPPortUnreachable(pktR, ipv4)
})
}
func checkICMPPortUnreachable(pktR gopacket.Packet, ipv4 *layers.IPv4) {
ipv4L := pktR.Layer(layers.LayerTypeIPv4)
Expect(ipv4L).NotTo(BeNil())
ipv4R := ipv4L.(*layers.IPv4)
Expect(ipv4R.Protocol).To(Equal(layers.IPProtocolICMPv4))
Expect(ipv4R.SrcIP.String()).To(Equal(intfIP.String()))
Expect(ipv4R.DstIP).To(Equal(ipv4.SrcIP))
icmpL := pktR.Layer(layers.LayerTypeICMPv4)
Expect(ipv4L).NotTo(BeNil())
icmpR := icmpL.(*layers.ICMPv4)
Expect(icmpR.TypeCode).To(Equal(
layers.CreateICMPv4TypeCode(
layers.ICMPv4TypeDestinationUnreachable,
layers.ICMPv4CodePort,
)))
}
| 1 | 19,322 | Do we need the forXDP parameter in runBpfUnitTest? If not, I think better to revert in order to save a few changes. | projectcalico-felix | c |
@@ -6,11 +6,14 @@ from pyramid import httpexceptions
class NameGenerator(generators.Generator):
+ regexp = r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$'
+
def __call__(self):
ascii_letters = ('abcdefghijklmopqrstuvwxyz'
'ABCDEFGHIJKLMOPQRSTUVWXYZ')
- alphabet = ascii_letters + string.digits + '-'
- letters = [random.choice(alphabet) for x in range(8)]
+ alphabet = ascii_letters + string.digits + '-_'
+ letters = [random.choice(ascii_letters + string.digits)]
+ letters += [random.choice(alphabet) for x in range(7)]
return ''.join(letters)
| 1 | import random
import string
from cliquet.storage import generators, exceptions
from pyramid import httpexceptions
class NameGenerator(generators.Generator):
def __call__(self):
ascii_letters = ('abcdefghijklmopqrstuvwxyz'
'ABCDEFGHIJKLMOPQRSTUVWXYZ')
alphabet = ascii_letters + string.digits + '-'
letters = [random.choice(alphabet) for x in range(8)]
return ''.join(letters)
def object_exists_or_404(request, collection_id, object_id, parent_id=''):
storage = request.registry.storage
try:
storage.get(collection_id=collection_id,
parent_id=parent_id,
object_id=object_id)
except exceptions.RecordNotFoundError:
raise httpexceptions.HTTPNotFound()
| 1 | 7,770 | So we fix it only for kinto and not for all cliquet resources? | Kinto-kinto | py |
@@ -11,9 +11,10 @@ import (
"fmt"
"math/big"
- "github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/pkg/errors"
+ "github.com/iotexproject/iotex-core/pkg/util/byteutil"
+
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account" | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package subchain
import (
"context"
"fmt"
"math/big"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/explorer/idl/explorer"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
)
// Protocol defines the protocol to handle multi-chain actions on sub-chain
type Protocol struct {
chainID uint32
mainChainAPI explorer.Explorer
sf factory.Factory
}
// NewProtocol constructs a sub-chain protocol on sub-chain
func NewProtocol(chain blockchain.Blockchain, mainChainAPI explorer.Explorer) *Protocol {
return &Protocol{
chainID: chain.ChainID(),
mainChainAPI: mainChainAPI,
sf: chain.GetFactory(),
}
}
// Handle handles how to mutate the state db given the multi-chain action on sub-chain
func (p *Protocol) Handle(_ context.Context, act action.Action, sm protocol.StateManager) (*action.Receipt, error) {
switch act := act.(type) {
case *action.SettleDeposit:
if err := p.validateDeposit(act, sm); err != nil {
return nil, errors.Wrapf(err, "error when handling deposit settlement action")
}
if err := p.mutateDeposit(act, sm); err != nil {
return nil, errors.Wrapf(err, "error when handling deposit settlement action")
}
}
return nil, nil
}
// Validate validates the multi-chain action on sub-chain
func (p *Protocol) Validate(_ context.Context, act action.Action) error {
switch act := act.(type) {
case *action.SettleDeposit:
if err := p.validateDeposit(act, nil); err != nil {
return errors.Wrapf(err, "error when validating deposit settlement action")
}
}
return nil
}
func (p *Protocol) validateDeposit(deposit *action.SettleDeposit, sm protocol.StateManager) error {
// Validate main-chain state
// TODO: this may not be the type safe casting if index is greater than 2^63
depositsOnMainChain, err := p.mainChainAPI.GetDeposits(int64(p.chainID), int64(deposit.Index()), 1)
if err != nil {
return err
}
if len(depositsOnMainChain) != 1 {
return fmt.Errorf("%d deposits found instead of 1", len(depositsOnMainChain))
}
depositOnMainChain := depositsOnMainChain[0]
if depositOnMainChain.Confirmed {
return fmt.Errorf("deposit %d is already confirmed", deposit.Index())
}
// Validate sub-chain state
var depositIndex DepositIndex
addr := depositAddress(deposit.Index())
if sm == nil {
err = p.sf.State(addr, &depositIndex)
} else {
err = sm.State(addr, &depositIndex)
}
switch errors.Cause(err) {
case nil:
return fmt.Errorf("deposit %d is already settled", deposit.Index())
case state.ErrStateNotExist:
return nil
default:
return errors.Wrapf(err, "error when loading state of %x", addr)
}
}
func (p *Protocol) mutateDeposit(deposit *action.SettleDeposit, sm protocol.StateManager) error {
// Update the deposit index
depositAddr := depositAddress(deposit.Index())
var depositIndex DepositIndex
if err := sm.PutState(depositAddr, &depositIndex); err != nil {
return err
}
// Update the action owner
owner, err := account.LoadOrCreateAccount(sm, deposit.Sender(), big.NewInt(0))
if err != nil {
return err
}
account.SetNonce(deposit, owner)
if err := account.StoreAccount(sm, deposit.Sender(), owner); err != nil {
return err
}
// Update the deposit recipient
recipient, err := account.LoadOrCreateAccount(sm, deposit.Recipient(), big.NewInt(0))
if err != nil {
return err
}
if err := recipient.AddBalance(deposit.Amount()); err != nil {
return err
}
return account.StoreAccount(sm, deposit.Recipient(), recipient)
}
func depositAddress(index uint64) hash.PKHash {
return byteutil.BytesTo20B(hash.Hash160b([]byte(fmt.Sprintf("depositToSubChain.%d", index))))
}
func srcAddressPKHash(srcAddr string) (hash.PKHash, error) {
addr, err := address.IotxAddressToAddress(srcAddr)
if err != nil {
return hash.ZeroPKHash, errors.Wrapf(err, "cannot get the public key hash of address %s", srcAddr)
}
return byteutil.BytesTo20B(addr.Payload()), nil
}
| 1 | 13,833 | No empty line between | iotexproject-iotex-core | go |
@@ -141,6 +141,9 @@ func main() {
fatal(err)
}
}
+ if err := reviseRootDir(context); err != nil {
+ return err
+ }
return logs.ConfigureLogging(createLogConfig(context))
}
| 1 | package main
import (
"fmt"
"io"
"os"
"strings"
"github.com/opencontainers/runc/libcontainer/logs"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// version will be populated by the Makefile, read from
// VERSION file of the source code.
var version = ""
// gitCommit will be the hash that the binary was built from
// and will be populated by the Makefile
var gitCommit = ""
const (
specConfig = "config.json"
usage = `Open Container Initiative runtime
runc is a command line client for running applications packaged according to
the Open Container Initiative (OCI) format and is a compliant implementation of the
Open Container Initiative specification.
runc integrates well with existing process supervisors to provide a production
container runtime environment for applications. It can be used with your
existing process monitoring tools and the container will be spawned as a
direct child of the process supervisor.
Containers are configured using bundles. A bundle for a container is a directory
that includes a specification file named "` + specConfig + `" and a root filesystem.
The root filesystem contains the contents of the container.
To start a new instance of a container:
# runc run [ -b bundle ] <container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host. Providing the bundle directory using "-b" is optional. The default
value for "bundle" is the current directory.`
)
func main() {
app := cli.NewApp()
app.Name = "runc"
app.Usage = usage
var v []string
if version != "" {
v = append(v, version)
}
if gitCommit != "" {
v = append(v, fmt.Sprintf("commit: %s", gitCommit))
}
v = append(v, fmt.Sprintf("spec: %s", specs.Version))
app.Version = strings.Join(v, "\n")
xdgRuntimeDir := ""
root := "/run/runc"
if shouldHonorXDGRuntimeDir() {
if runtimeDir := os.Getenv("XDG_RUNTIME_DIR"); runtimeDir != "" {
root = runtimeDir + "/runc"
xdgRuntimeDir = root
}
}
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output for logging",
},
cli.StringFlag{
Name: "log",
Value: "",
Usage: "set the log file path where internal debug information is written",
},
cli.StringFlag{
Name: "log-format",
Value: "text",
Usage: "set the format used by logs ('text' (default), or 'json')",
},
cli.StringFlag{
Name: "root",
Value: root,
Usage: "root directory for storage of container state (this should be located in tmpfs)",
},
cli.StringFlag{
Name: "criu",
Value: "criu",
Usage: "path to the criu binary used for checkpoint and restore",
},
cli.BoolFlag{
Name: "systemd-cgroup",
Usage: "enable systemd cgroup support, expects cgroupsPath to be of form \"slice:prefix:name\" for e.g. \"system.slice:runc:434234\"",
},
cli.StringFlag{
Name: "rootless",
Value: "auto",
Usage: "ignore cgroup permission errors ('true', 'false', or 'auto')",
},
}
app.Commands = []cli.Command{
checkpointCommand,
createCommand,
deleteCommand,
eventsCommand,
execCommand,
initCommand,
killCommand,
listCommand,
pauseCommand,
psCommand,
restoreCommand,
resumeCommand,
runCommand,
specCommand,
startCommand,
stateCommand,
updateCommand,
}
app.Before = func(context *cli.Context) error {
if !context.IsSet("root") && xdgRuntimeDir != "" {
// According to the XDG specification, we need to set anything in
// XDG_RUNTIME_DIR to have a sticky bit if we don't want it to get
// auto-pruned.
if err := os.MkdirAll(root, 0700); err != nil {
fmt.Fprintln(os.Stderr, "the path in $XDG_RUNTIME_DIR must be writable by the user")
fatal(err)
}
if err := os.Chmod(root, 0700|os.ModeSticky); err != nil {
fmt.Fprintln(os.Stderr, "you should check permission of the path in $XDG_RUNTIME_DIR")
fatal(err)
}
}
return logs.ConfigureLogging(createLogConfig(context))
}
// If the command returns an error, cli takes upon itself to print
// the error on cli.ErrWriter and exit.
// Use our own writer here to ensure the log gets sent to the right location.
cli.ErrWriter = &FatalWriter{cli.ErrWriter}
if err := app.Run(os.Args); err != nil {
fatal(err)
}
}
type FatalWriter struct {
cliErrWriter io.Writer
}
func (f *FatalWriter) Write(p []byte) (n int, err error) {
logrus.Error(string(p))
if !logrusToStderr() {
return f.cliErrWriter.Write(p)
}
return len(p), nil
}
func createLogConfig(context *cli.Context) logs.Config {
logFilePath := context.GlobalString("log")
logPipeFd := ""
if logFilePath == "" {
logPipeFd = "2"
}
config := logs.Config{
LogPipeFd: logPipeFd,
LogLevel: logrus.InfoLevel,
LogFilePath: logFilePath,
LogFormat: context.GlobalString("log-format"),
}
if context.GlobalBool("debug") {
config.LogLevel = logrus.DebugLevel
}
return config
}
| 1 | 21,520 | We have `ResolveRootfs` in `libcontainer/utils` so maybe use that one here? | opencontainers-runc | go |
@@ -630,6 +630,19 @@ namespace AutoRest.Swagger.Tests
messages.AssertOnlyValidationMessage(typeof(SummaryAndDescriptionMustNotBeSame), 1);
}
+ [Fact]
+ public void LongRunningHasExtensionValidation()
+ {
+ var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "long-running-operation-extension.json"));
+ messages.AssertOnlyValidationMessage(typeof(LongRunningOperationsWithLongRunningExtension), 1);
+ }
+
+ [Fact]
+ public void LongRunningHasExtensionTrueValidation()
+ {
+ var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "long-running-operation-extension-2.json"));
+ messages.AssertOnlyValidationMessage(typeof(LongRunningOperationsWithLongRunningExtension), 1);
+ }
}
#region Positive tests | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.IO;
using System.Linq;
using Xunit;
using System.Collections.Generic;
using AutoRest.Swagger.Validation.Core;
using AutoRest.Core.Logging;
using AutoRest.Core;
using AutoRest.Swagger.Model;
using AutoRest.Swagger.Validation;
using static AutoRest.Core.Utilities.DependencyInjection;
namespace AutoRest.Swagger.Tests
{
internal static class AssertExtensions
{
internal static void AssertOnlyValidationWarning(this IEnumerable<ValidationMessage> messages, Type validationType)
{
AssertOnlyValidationMessage(messages.Where(m => m.Severity == Category.Warning), validationType);
}
internal static void AssertOnlyValidationWarning(this IEnumerable<ValidationMessage> messages, Type validationType, int count)
{
AssertOnlyValidationMessage(messages.Where(m => m.Severity == Category.Warning), validationType, count);
}
internal static void AssertOnlyValidationMessage(this IEnumerable<ValidationMessage> messages, Type validationType)
{
// checks that the collection has one item, and that it is the correct message type.
AssertOnlyValidationMessage(messages, validationType, 1);
}
internal static void AssertOnlyValidationMessage(this IEnumerable<ValidationMessage> messages, Type validationType, int count)
{
// checks that the collection has the right number of items and each is the correct type.
Assert.Equal(count, messages.Count(message => message.Rule.GetType() == validationType));
}
}
[Collection("Validation Tests")]
public partial class SwaggerModelerValidationTests
{
private IEnumerable<ValidationMessage> ValidateSwagger(string input, ServiceDefinitionDocumentType serviceDefDocType = ServiceDefinitionDocumentType.ARM,
ServiceDefinitionDocumentState mergeState = ServiceDefinitionDocumentState.Composed)
{
// Most rules are to be applied for ARM documents
// Also, most rules need to be run over the composite document (i.e. AFTER merge state)
// hence the defaults
using (NewContext)
{
var validator = new RecursiveObjectValidator(PropertyNameResolver.JsonName);
var serviceDefinition = SwaggerParser.Parse(input, File.ReadAllText(input));
var metaData = new ServiceDefinitionMetadata
{
ServiceDefinitionDocumentType = serviceDefDocType,
MergeState = mergeState
};
return validator.GetValidationExceptions(new Uri(input, UriKind.RelativeOrAbsolute), serviceDefinition, metaData).OfType<ValidationMessage>();
}
}
[Fact]
public void MissingDescriptionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "definition-missing-description.json"));
messages.AssertOnlyValidationMessage(typeof(DescriptionMissing), 2);
}
[Fact]
public void AvoidMsdnReferencesValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "definition-contains-msdn-reference.json"));
messages.AssertOnlyValidationMessage(typeof(AvoidMsdnReferences), 4);
}
[Fact]
public void BooleanPropertiesValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "boolean-properties.json"));
messages.AssertOnlyValidationMessage(typeof(EnumInsteadOfBoolean), 4);
}
[Fact]
public void DefaultValueInEnumValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "default-value-not-in-enum.json"));
messages.AssertOnlyValidationMessage(typeof(DefaultMustBeInEnum));
}
[Fact]
public void EmptyClientNameValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "empty-client-name-extension.json"));
messages.AssertOnlyValidationMessage(typeof(NonEmptyClientName));
}
[Fact]
public void UniqueResourcePathsValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "network-interfaces-api.json"));
messages.AssertOnlyValidationMessage(typeof(UniqueResourcePaths));
}
[Fact]
public void AnonymousSchemasDiscouragedValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "anonymous-response-type.json"));
messages.AssertOnlyValidationMessage(typeof(AvoidAnonymousTypes));
}
[Fact]
public void AnonymousParameterSchemaValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "anonymous-parameter-type.json"));
messages.AssertOnlyValidationMessage(typeof(AnonymousBodyParameter));
}
[Fact]
public void OperationParametersValidation()
{
// ignore ParameterNotDefinedInGlobalParameters validation rule since it overlaps with this
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "operations-invalid-parameters.json"))
.Where(msg => msg.Rule.GetType().Name != "ParameterNotDefinedInGlobalParameters");
messages.AssertOnlyValidationMessage(typeof(SubscriptionIdParameterInOperations), 1);
}
[Fact]
public void ServiceDefinitionParametersValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "service-def-invalid-parameters.json"));
messages.AssertOnlyValidationMessage(typeof(ParameterNotDefinedInGlobalParameters), 2);
}
[Fact]
public void OperationGroupSingleUnderscoreValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "operation-group-underscores.json"));
messages.AssertOnlyValidationMessage(typeof(OneUnderscoreInOperationId));
}
[Fact]
public void NonAppJsonTypeOperationForConsumes()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "non-app-json-operation-consumes.json"));
messages.AssertOnlyValidationWarning(typeof(NonApplicationJsonType));
}
[Fact]
public void ProvidersPathValidate()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "providers-path.json"));
messages.AssertOnlyValidationWarning(typeof(ParameterizeProperties), 2);
}
[Fact]
public void NonAppJsonTypeOperationForProduces()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "non-app-json-operation-produces.json"));
messages.AssertOnlyValidationWarning(typeof(NonApplicationJsonType));
}
[Fact]
public void NonAppJsonTypeServiceDefinitionForProduces()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "non-app-json-service-def-produces.json"));
messages.AssertOnlyValidationWarning(typeof(NonApplicationJsonType));
}
[Fact]
public void NonAppJsonTypeServiceDefinitionForConsumes()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "non-app-json-service-def-consumes.json"));
messages.AssertOnlyValidationWarning(typeof(NonApplicationJsonType));
}
[Fact]
public void NonHttpsServiceDefinitionForScheme()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "non-https-service-def-scheme.json"),
ServiceDefinitionDocumentType.ARM,
ServiceDefinitionDocumentState.Individual);
messages.AssertOnlyValidationWarning(typeof(HttpsSupportedScheme));
}
[Fact]
public void NonHttpsOperationsForScheme()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "non-https-operations-scheme.json"),
ServiceDefinitionDocumentType.ARM,
ServiceDefinitionDocumentState.Individual);
messages.AssertOnlyValidationWarning(typeof(HttpsSupportedScheme));
}
[Fact]
public void XmsPathNotInPathsValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "xms-path-not-in-paths.json"));
messages.AssertOnlyValidationMessage(typeof(XmsPathsMustOverloadPaths));
}
[Fact]
public void InvalidFormatValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "invalid-format.json"));
messages.AssertOnlyValidationMessage(typeof(ValidFormats));
}
[Fact]
public void ListOperationsNamingValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "list-operations-naming.json"));
messages.AssertOnlyValidationMessage(typeof(ListInOperationName), 2);
}
[Fact]
public void ArmResourcePropertiesBagValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "arm-resource-properties-bag.json"));
messages.AssertOnlyValidationMessage(typeof(ArmResourcePropertiesBag), 1);
}
[Fact]
public void CollectionObjectsPropertiesNamingValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "collection-objects-naming.json"));
messages.AssertOnlyValidationMessage(typeof(CollectionObjectPropertiesNaming), 2);
}
[Fact]
public void BodyTopLevelPropertiesValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "body-top-level-properties.json"));
messages.AssertOnlyValidationMessage(typeof(BodyTopLevelProperties), 1);
}
[Fact]
public void PropertyNameCasingValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "property-names-casing.json"));
messages.AssertOnlyValidationMessage(typeof(BodyPropertiesNamesCamelCase), 1);
messages.AssertOnlyValidationMessage(typeof(DefinitionsPropertiesNamesCamelCase), 2);
}
[Fact]
public void NestedPropertiesValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "nested-properties.json"));
messages.AssertOnlyValidationMessage(typeof(AvoidNestedProperties));
}
[Fact]
public void OperationDescriptionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "operation-missing-description.json"));
messages.AssertOnlyValidationMessage(typeof(OperationDescriptionRequired));
}
[Fact]
public void ParameterDescriptionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "parameter-missing-description.json"));
messages.AssertOnlyValidationMessage(typeof(ParameterDescriptionRequired), 2);
}
[Fact]
public void PageableNextLinkNotModeledValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "pageable-nextlink-not-modeled.json"));
messages.AssertOnlyValidationMessage(typeof(NextLinkPropertyMustExist));
}
[Fact]
public void Pageable200ResponseNotModeledValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "pageable-no-200-response.json"));
messages.Any(m => m.Rule.GetType() == typeof(PageableRequires200Response));
}
[Fact]
public void OperationNameValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "operation-name-not-valid.json"));
messages.AssertOnlyValidationMessage(typeof(GetInOperationName), 1);
messages.AssertOnlyValidationMessage(typeof(PutInOperationName), 1);
messages.AssertOnlyValidationMessage(typeof(DeleteInOperationName), 1);
}
[Fact]
public void LongRunningResponseForPutValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource","Swagger", "Validation", "long-running-invalid-response-put.json"));
messages.AssertOnlyValidationMessage(typeof(LongRunningResponseStatusCode));
}
[Fact]
public void LongRunningResponseForPostValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "long-running-invalid-response-post.json"));
messages.AssertOnlyValidationMessage(typeof(LongRunningResponseStatusCode));
}
[Fact]
public void LongRunningResponseForDeleteValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "long-running-invalid-response-delete.json"));
messages.AssertOnlyValidationMessage(typeof(LongRunningResponseStatusCode));
}
[Fact]
public void MutabilityNotModeledWithReadOnlyValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "mutability-invalid-values-for-readonly.json"));
messages.AssertOnlyValidationMessage(typeof(MutabilityWithReadOnly), 2);
}
[Fact]
public void VersionFormatValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-version-validation.json"));
messages.AssertOnlyValidationMessage(typeof(APIVersionPattern), 1);
}
[Fact]
public void GuidUsageValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-guid-validation.json"));
messages.AssertOnlyValidationMessage(typeof(GuidUsage), 1);
}
[Fact]
public void DeleteRequestBodyValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-delete-request-body-validation.json"));
messages.AssertOnlyValidationMessage(typeof(DeleteMustNotHaveRequestBody), 1);
}
[Fact]
public void ResourceExtensionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-ext-msresource-validation.json"));
messages.AssertOnlyValidationMessage(typeof(ResourceHasXMsResourceEnabled), 1);
}
[Fact]
public void MsClientNameExtensionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-ext-msclientname-validation.json"));
messages.AssertOnlyValidationMessage(typeof(XmsClientNameProperty), 1);
messages.AssertOnlyValidationMessage(typeof(XmsClientNameParameter), 1);
}
[Fact]
public void OperationsApiValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-operations-api-validation.json"));
messages.AssertOnlyValidationMessage(typeof(OperationsAPIImplementation), 1);
}
[Fact]
public void ResourceModelValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-ext-resource-validation.json"));
messages.AssertOnlyValidationMessage(typeof(RequiredPropertiesMissingInResourceModel ), 1);
}
[Fact]
public void SkuModelValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-skumodel-validation.json"));
messages.AssertOnlyValidationMessage(typeof(InvalidSkuModel), 1);
}
[Fact]
public void TrackedResourceGetOperationValidation2()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource","Swagger", "Validation", "swagger-tracked-resource-1-validation.json"));
messages.AssertOnlyValidationMessage(typeof(TrackedResourceGetOperation), 1);
}
[Fact]
public void TrackedResourceListByResourceGroupValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource","Swagger", "Validation", "swagger-tracked-resource-2-validation.json"));
messages.AssertOnlyValidationMessage(typeof(TrackedResourceListByResourceGroup), 1);
}
[Fact]
public void TrackedResourcePatchOperationValidationValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "tracked-resource-patch-operation.json"));
messages.AssertOnlyValidationMessage(typeof(TrackedResourcePatchOperation), 1);
}
[Fact]
public void TrackedResourceGetOperationValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "tracked-resource-get-operation.json"));
messages.AssertOnlyValidationMessage(typeof(TrackedResourceGetOperation), 1);
}
[Fact]
public void TrackedResourceListBySubscriptionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource","Swagger", "Validation", "swagger-tracked-resource-3-validation.json"));
messages.AssertOnlyValidationMessage(typeof(TrackedResourceListBySubscription), 1);
}
[Fact]
public void TrackedResourceListByImmediateParentValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-list-by-immediate-parent.json"));
messages.AssertOnlyValidationMessage(typeof(TrackedResourceListByImmediateParent), 1);
}
[Fact]
public void TrackedResourceListByImmediateParentWithOperationValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-list-by-immediate-parent-2.json"));
messages.AssertOnlyValidationMessage(typeof(TrackedResourceListByImmediateParent), 1);
}
[Fact]
public void PutGetPatchResponseValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-putgetpatch-response-validation.json"));
messages.AssertOnlyValidationMessage(typeof(PutGetPatchResponseSchema), 1);
}
[Fact]
public void SecurityDefinitionStructurePresenceValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-1.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureEmptyValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-2.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureMultipleEntriesValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-3.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureIncorrectKeyValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-4.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureMissingDescriptionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-5.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureEmptyDescriptionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-6.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureIncorrectDefValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-7.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureMissingScopesValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-8.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureEmptyScopesValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-9.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureMultipleScopesValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-10.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
[Fact]
public void SecurityDefinitionStructureMissingScopesDescriptionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "security-definitions-validations-11.json"));
messages.AssertOnlyValidationMessage(typeof(SecurityDefinitionsStructure), 1);
}
public void RequiredReadOnlyPropertiesValidationInDefinitions()
{
// This test validates if a definition has required properties which are marked as readonly true
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "required-readonly-properties.json"));
messages.AssertOnlyValidationMessage(typeof(RequiredReadOnlyProperties), 1);
}
[Fact]
public void RequiredReadOnlyPropertiesValidationInResponses()
{
// This test validates if a definition has required properties which are marked as readonly true
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "required-readonly-properties-2.json"));
messages.AssertOnlyValidationMessage(typeof(RequiredReadOnlyProperties), 1);
}
[Fact]
public void RequiredReadOnlyPropertiesValidationInParameters()
{
// This test validates if a definition has required properties which are marked as readonly true
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "required-readonly-properties-3.json"));
messages.AssertOnlyValidationMessage(typeof(RequiredReadOnlyProperties), 1);
}
[Fact]
public void RequiredReadOnlyPropertiesValidationInNestedSchema()
{
// This test validates if a definition has required properties which are marked as readonly true
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "required-readonly-properties-4.json"));
messages.AssertOnlyValidationMessage(typeof(RequiredReadOnlyProperties), 1);
}
[Fact]
public void RequiredReadOnlyPropertiesValidationInItems()
{
// This test validates if a definition has required properties which are marked as readonly true
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "required-readonly-properties-5.json"));
messages.AssertOnlyValidationMessage(typeof(RequiredReadOnlyProperties), 1);
}
[Fact]
public void DefaultValuedInPropertiesInPatchRequestValidation()
{
// This test validates if a definition has required properties which are marked as readonly true
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "default-valued-properties-in-patch-request.json"));
messages.AssertOnlyValidationMessage(typeof(PatchBodyParametersSchema), 1);
}
[Fact]
public void RequiredPropertiesInPatchRequestValidation()
{
// This test validates if a definition has required properties which are marked as readonly true
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "req-properties-in-patch-request.json"));
messages.AssertOnlyValidationMessage(typeof(PatchBodyParametersSchema), 1);
}
[Fact]
public void XmsEnumExtensionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "x-ms-enum-absent.json"));
messages.AssertOnlyValidationMessage(typeof(XmsEnumValidation), 1);
}
[Fact]
public void XmsExamplesProvidedValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "xms-examples-absent.json"));
messages.AssertOnlyValidationMessage(typeof(XmsExamplesRequired), 2);
}
[Fact]
public void PutResponseResourceValidationTest()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "put-response-resource-validation.json"));
messages.AssertOnlyValidationMessage(typeof(XmsResourceInPutResponse), 1);
}
[Fact]
public void LROStatusCodesValidationTest()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "lro-status-codes-validation.json"));
messages.AssertOnlyValidationMessage(typeof(LROStatusCodesReturnTypeSchema), 2);
}
[Fact]
public void EmptyParameterNameValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "empty-parameter-name.json"));
messages.AssertOnlyValidationMessage(typeof(NamePropertyDefinitionInParameter), 2);
}
[Fact]
public void OperationIdNounConflictingModelNameValidationTest()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "operationid-noun-conflicting-model.json"));
messages.AssertOnlyValidationMessage(typeof(OperationIdNounConflictingModelNames), 1);
}
[Fact]
public void PutRequestResponseBodySchemaValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "put-request-response-validation.json"));
messages.AssertOnlyValidationMessage(typeof(PutRequestResponseScheme), 1);
}
[Fact]
public void XMSPageableListByRGAndSubscriptionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "xms-pageable-validation.json"));
messages.AssertOnlyValidationMessage(typeof(XmsPageableListByRGAndSubscriptions), 1);
}
[Fact]
public void SummaryDescriptionValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "swagger-summary-description.json"));
messages.AssertOnlyValidationMessage(typeof(SummaryAndDescriptionMustNotBeSame), 1);
}
}
#region Positive tests
public partial class SwaggerModelerValidationTests
{
/// <summary>
/// Verifies that a clean Swagger file does not result in any validation errors
/// </summary>
[Fact]
public void CleanFileValidation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "clean-complex-spec.json"));
Assert.Empty(messages.Where(m => m.Severity >= Category.Warning));
}
/// <summary>
///
/// </summary>
[Fact]
public void ValidCollectionObjectsPropertiesName()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "collection-objects-naming-valid.json"));
Assert.Empty(messages.Where(m => m.Severity >= Category.Warning));
}
/// <summary>
/// Verifies that a clean Swagger file does not result in any validation errors
/// </summary>
[Fact]
public void RequiredPropertyDefinedAllOf()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "required-property-defined-allof.json"));
Assert.Empty(messages.Where(m => m.Severity >= Category.Warning));
}
/// <summary>
/// Verifies that a clean Swagger file does not result in any validation errors
/// </summary>
[Fact]
public void PageableNextLinkDefinedAllOf()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "pageable-nextlink-defined-allof.json"));
Assert.Empty(messages.Where(m => m.Severity >= Category.Warning));
}
/// <summary>
/// Verifies that a x-ms-long-running extension response modeled correctly
/// </summary>
[Fact]
public void LongRunningResponseDefined()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource","Swagger", "Validation", "positive", "long-running-valid-response.json"));
messages.AssertOnlyValidationMessage(typeof(LongRunningResponseStatusCode), 0);
}
/// <summary>
/// Verifies that tracked resource has a patch operation
/// </summary>
[Fact]
public void ValidTrackedResourcePatchOperation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "tracked-resource-patch-valid-operation.json"));
messages.AssertOnlyValidationMessage(typeof(TrackedResourcePatchOperation), 0);
}
/// <summary>
/// Verifies that tracked resource has a get operation
/// </summary>
[Fact]
public void ValidTrackedResourceGetOperation()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "tracked-resource-get-valid-operation.json"));
messages.AssertOnlyValidationMessage(typeof(TrackedResourceGetOperation), 0);
}
/// <summary>
/// Verifies that listing operations (any operation that returns an array or is of xmspageable type)
/// are correctly named
/// </summary>
[Fact]
public void ListOperationsCorrectlyNamed()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "list-operations-valid-naming.json"));
messages.AssertOnlyValidationMessage(typeof(ListInOperationName), 0);
}
/// Verifies that a providers path is of proper format
/// </summary>
[Fact]
public void ProvidersPathValidJson()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "providers-path-valid.json"));
messages.AssertOnlyValidationWarning(typeof(ParameterizeProperties), 0);
}
/// <summary>
/// Verifies that property names follow camelCase style
/// </summary>
[Fact]
public void ValidPropertyNameCasing()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "property-names-casing-valid.json"));
messages.AssertOnlyValidationMessage(typeof(BodyPropertiesNamesCamelCase), 0);
messages.AssertOnlyValidationMessage(typeof(DefinitionsPropertiesNamesCamelCase), 0);
}
[Fact]
public void ValidServiceDefinitionParameters()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "service-def-valid-parameters.json"));
messages.AssertOnlyValidationMessage(typeof(ParameterNotDefinedInGlobalParameters), 0);
}
[Fact]
public void ValidOperationParameters()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "operations-valid-parameters.json"));
messages.AssertOnlyValidationMessage(typeof(SubscriptionIdParameterInOperations), 0);
}
[Fact]
public void ValidArmResourcePropertiesBag()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "arm-resource-properties-valid.json"));
messages.AssertOnlyValidationMessage(typeof(ArmResourcePropertiesBag), 0);
}
/// <summary>
/// Verifies resource models are correctly identified
/// </summary>
[Fact]
public void ValidResourceModels()
{
var filePath = Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "valid-resource-model-definitions.json");
var fileText = System.IO.File.ReadAllText(filePath);
var servDef = SwaggerParser.Parse(filePath, fileText);
Uri uriPath = null;
Uri.TryCreate(filePath, UriKind.RelativeOrAbsolute, out uriPath);
var context = new RuleContext(servDef, uriPath);
Assert.Equal(4, context.ResourceModels.Count());
Assert.Equal(1, context.TrackedResourceModels.Count());
Assert.Equal(3, context.ProxyResourceModels.Count());
}
/// <summary>
/// Verifies that sku object
/// </summary>
[Fact]
public void ValidSkuObjectStructure()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "swagger-skumodel-validation-valid.json"));
messages.AssertOnlyValidationMessage(typeof(InvalidSkuModel), 0);
}
/// <summary>
/// Verifies resource model readonly properties
/// </summary>
[Fact]
public void ValidResourceModelReadOnlyProperties()
{
var messages = ValidateSwagger(Path.Combine(Core.Utilities.Extensions.CodeBaseDirectory, "Resource", "Swagger", "Validation", "positive", "valid-resource-model-readonly-props.json"));
messages.AssertOnlyValidationMessage(typeof(RequiredPropertiesMissingInResourceModel ), 0);
}
}
#endregion
}
| 1 | 25,004 | Just curious, do we have positive test covered somewhere? | Azure-autorest | java |
@@ -101,7 +101,7 @@ static void move_resize(struct roots_view *view, double x, double y,
constrained_height);
if (serial > 0) {
roots_surface->pending_move_resize_configure_serial = serial;
- } else {
+ } else if(roots_surface->pending_move_resize_configure_serial == 0) {
view->x = x;
view->y = y;
} | 1 | #include <assert.h>
#include <stdlib.h>
#include <stdbool.h>
#include <wayland-server.h>
#include <wlr/types/wlr_box.h>
#include <wlr/types/wlr_surface.h>
#include <wlr/types/wlr_xdg_shell_v6.h>
#include <wlr/util/log.h>
#include "rootston/desktop.h"
#include "rootston/server.h"
#include "rootston/input.h"
static void get_size(const struct roots_view *view, struct wlr_box *box) {
assert(view->type == ROOTS_XDG_SHELL_V6_VIEW);
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
if (surface->geometry->width > 0 && surface->geometry->height > 0) {
box->width = surface->geometry->width;
box->height = surface->geometry->height;
} else {
box->width = view->wlr_surface->current->width;
box->height = view->wlr_surface->current->height;
}
}
static void activate(struct roots_view *view, bool active) {
assert(view->type == ROOTS_XDG_SHELL_V6_VIEW);
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
if (surface->role == WLR_XDG_SURFACE_V6_ROLE_TOPLEVEL) {
wlr_xdg_toplevel_v6_set_activated(surface, active);
}
}
static void apply_size_constraints(struct wlr_xdg_surface_v6 *surface,
uint32_t width, uint32_t height, uint32_t *dest_width,
uint32_t *dest_height) {
*dest_width = width;
*dest_height = height;
struct wlr_xdg_toplevel_v6_state *state = &surface->toplevel_state->current;
if (width < state->min_width) {
*dest_width = state->min_width;
} else if (state->max_width > 0 &&
width > state->max_width) {
*dest_width = state->max_width;
}
if (height < state->min_height) {
*dest_height = state->min_height;
} else if (state->max_height > 0 &&
height > state->max_height) {
*dest_height = state->max_height;
}
}
static void resize(struct roots_view *view, uint32_t width, uint32_t height) {
assert(view->type == ROOTS_XDG_SHELL_V6_VIEW);
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
if (surface->role != WLR_XDG_SURFACE_V6_ROLE_TOPLEVEL) {
return;
}
uint32_t constrained_width, constrained_height;
apply_size_constraints(surface, width, height, &constrained_width,
&constrained_height);
wlr_xdg_toplevel_v6_set_size(surface, constrained_width,
constrained_height);
}
static void move_resize(struct roots_view *view, double x, double y,
uint32_t width, uint32_t height) {
assert(view->type == ROOTS_XDG_SHELL_V6_VIEW);
struct roots_xdg_surface_v6 *roots_surface = view->roots_xdg_surface_v6;
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
if (surface->role != WLR_XDG_SURFACE_V6_ROLE_TOPLEVEL) {
return;
}
bool update_x = x != view->x;
bool update_y = y != view->y;
uint32_t constrained_width, constrained_height;
apply_size_constraints(surface, width, height, &constrained_width,
&constrained_height);
if (update_x) {
x = x + width - constrained_width;
}
if (update_y) {
y = y + height - constrained_height;
}
view->pending_move_resize.update_x = update_x;
view->pending_move_resize.update_y = update_y;
view->pending_move_resize.x = x;
view->pending_move_resize.y = y;
view->pending_move_resize.width = constrained_width;
view->pending_move_resize.height = constrained_height;
uint32_t serial = wlr_xdg_toplevel_v6_set_size(surface, constrained_width,
constrained_height);
if (serial > 0) {
roots_surface->pending_move_resize_configure_serial = serial;
} else {
view->x = x;
view->y = y;
}
}
static void maximize(struct roots_view *view, bool maximized) {
assert(view->type == ROOTS_XDG_SHELL_V6_VIEW);
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
if (surface->role != WLR_XDG_SURFACE_V6_ROLE_TOPLEVEL) {
return;
}
wlr_xdg_toplevel_v6_set_maximized(surface, maximized);
}
static void set_fullscreen(struct roots_view *view, bool fullscreen) {
assert(view->type == ROOTS_XDG_SHELL_V6_VIEW);
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
if (surface->role != WLR_XDG_SURFACE_V6_ROLE_TOPLEVEL) {
return;
}
wlr_xdg_toplevel_v6_set_fullscreen(surface, fullscreen);
}
static void close(struct roots_view *view) {
assert(view->type == ROOTS_XDG_SHELL_V6_VIEW);
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
if (surface->role == WLR_XDG_SURFACE_V6_ROLE_TOPLEVEL) {
wlr_xdg_toplevel_v6_send_close(surface);
}
}
static void handle_request_move(struct wl_listener *listener, void *data) {
struct roots_xdg_surface_v6 *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, request_move);
struct roots_view *view = roots_xdg_surface->view;
struct roots_input *input = view->desktop->server->input;
struct wlr_xdg_toplevel_v6_move_event *e = data;
struct roots_seat *seat = input_seat_from_wlr_seat(input, e->seat->seat);
// TODO verify event serial
if (!seat || seat->cursor->mode != ROOTS_CURSOR_PASSTHROUGH) {
return;
}
roots_seat_begin_move(seat, view);
}
static void handle_request_resize(struct wl_listener *listener, void *data) {
struct roots_xdg_surface_v6 *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, request_resize);
struct roots_view *view = roots_xdg_surface->view;
struct roots_input *input = view->desktop->server->input;
struct wlr_xdg_toplevel_v6_resize_event *e = data;
// TODO verify event serial
struct roots_seat *seat = input_seat_from_wlr_seat(input, e->seat->seat);
assert(seat);
if (!seat || seat->cursor->mode != ROOTS_CURSOR_PASSTHROUGH) {
return;
}
roots_seat_begin_resize(seat, view, e->edges);
}
static void handle_request_maximize(struct wl_listener *listener, void *data) {
struct roots_xdg_surface_v6 *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, request_maximize);
struct roots_view *view = roots_xdg_surface->view;
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
if (surface->role != WLR_XDG_SURFACE_V6_ROLE_TOPLEVEL) {
return;
}
view_maximize(view, surface->toplevel_state->next.maximized);
}
static void handle_request_fullscreen(struct wl_listener *listener,
void *data) {
struct roots_xdg_surface_v6 *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, request_fullscreen);
struct roots_view *view = roots_xdg_surface->view;
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
struct wlr_xdg_toplevel_v6_set_fullscreen_event *e = data;
if (surface->role != WLR_XDG_SURFACE_V6_ROLE_TOPLEVEL) {
return;
}
view_set_fullscreen(view, e->fullscreen, e->output);
}
static void handle_commit(struct wl_listener *listener, void *data) {
struct roots_xdg_surface_v6 *roots_surface =
wl_container_of(listener, roots_surface, commit);
struct roots_view *view = roots_surface->view;
struct wlr_xdg_surface_v6 *surface = view->xdg_surface_v6;
uint32_t pending_serial =
roots_surface->pending_move_resize_configure_serial;
if (pending_serial > 0 && pending_serial >= surface->configure_serial) {
struct wlr_box size;
get_size(view, &size);
if (view->pending_move_resize.update_x) {
view->x = view->pending_move_resize.x +
view->pending_move_resize.width - size.width;
}
if (view->pending_move_resize.update_y) {
view->y = view->pending_move_resize.y +
view->pending_move_resize.height - size.height;
}
if (pending_serial == surface->configure_serial) {
roots_surface->pending_move_resize_configure_serial = 0;
}
}
}
static void handle_destroy(struct wl_listener *listener, void *data) {
struct roots_xdg_surface_v6 *roots_xdg_surface =
wl_container_of(listener, roots_xdg_surface, destroy);
wl_list_remove(&roots_xdg_surface->commit.link);
wl_list_remove(&roots_xdg_surface->destroy.link);
wl_list_remove(&roots_xdg_surface->request_move.link);
wl_list_remove(&roots_xdg_surface->request_resize.link);
wl_list_remove(&roots_xdg_surface->view->link);
view_destroy(roots_xdg_surface->view);
free(roots_xdg_surface);
}
void handle_xdg_shell_v6_surface(struct wl_listener *listener, void *data) {
struct wlr_xdg_surface_v6 *surface = data;
assert(surface->role != WLR_XDG_SURFACE_V6_ROLE_NONE);
if (surface->role == WLR_XDG_SURFACE_V6_ROLE_POPUP) {
wlr_log(L_DEBUG, "new xdg popup");
return;
}
struct roots_desktop *desktop =
wl_container_of(listener, desktop, xdg_shell_v6_surface);
wlr_log(L_DEBUG, "new xdg toplevel: title=%s, app_id=%s",
surface->title, surface->app_id);
wlr_xdg_surface_v6_ping(surface);
struct roots_xdg_surface_v6 *roots_surface =
calloc(1, sizeof(struct roots_xdg_surface_v6));
if (!roots_surface) {
return;
}
roots_surface->commit.notify = handle_commit;
wl_signal_add(&surface->surface->events.commit, &roots_surface->commit);
roots_surface->destroy.notify = handle_destroy;
wl_signal_add(&surface->events.destroy, &roots_surface->destroy);
roots_surface->request_move.notify = handle_request_move;
wl_signal_add(&surface->events.request_move, &roots_surface->request_move);
roots_surface->request_resize.notify = handle_request_resize;
wl_signal_add(&surface->events.request_resize,
&roots_surface->request_resize);
roots_surface->request_maximize.notify = handle_request_maximize;
wl_signal_add(&surface->events.request_maximize,
&roots_surface->request_maximize);
roots_surface->request_fullscreen.notify = handle_request_fullscreen;
wl_signal_add(&surface->events.request_fullscreen,
&roots_surface->request_fullscreen);
struct roots_view *view = calloc(1, sizeof(struct roots_view));
if (!view) {
free(roots_surface);
return;
}
view->type = ROOTS_XDG_SHELL_V6_VIEW;
view->xdg_surface_v6 = surface;
view->roots_xdg_surface_v6 = roots_surface;
view->wlr_surface = surface->surface;
view->get_size = get_size;
view->activate = activate;
view->resize = resize;
view->move_resize = move_resize;
view->maximize = maximize;
view->set_fullscreen = set_fullscreen;
view->close = close;
roots_surface->view = view;
view_init(view, desktop);
wl_list_insert(&desktop->views, &view->link);
view_setup(view);
}
| 1 | 9,785 | Style error, put a space between `if` and `(` | swaywm-wlroots | c |
@@ -239,6 +239,13 @@ type Config struct {
// Key: aws.String("/foo/bar/moo"),
// })
EnableEndpointDiscovery *bool
+
+ // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
+ // request endpoint hosts with modeled information.
+ //
+ // Disabling this feature is useful when you want to use local endpoints
+ // for testing that do not support the modeled host prefix pattern.
+ DisableEndpointHostPrefix *bool
}
// NewConfig returns a new Config pointer that can be chained with builder | 1 | package aws
import (
"net/http"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
)
// UseServiceDefaultRetries instructs the config to use the service's own
// default number of retries. This will be the default action if
// Config.MaxRetries is nil also.
const UseServiceDefaultRetries = -1
// RequestRetryer is an alias for a type that implements the request.Retryer
// interface.
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig structure.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(&aws.Config{
// MaxRetries: aws.Int(3),
// }))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, &aws.Config{
// Region: aws.String("us-west-2"),
// })
type Config struct {
// Enables verbose error printing of all credential chain errors.
// Should be used when wanting to see all errors while attempting to
// retrieve credentials.
CredentialsChainVerboseErrors *bool
// The credentials object to use when signing requests. Defaults to a
// chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
Credentials *credentials.Credentials
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The resolver to use for looking up endpoints for AWS service clients
// to use based on region.
EndpointResolver endpoints.Resolver
// EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
// ShouldRetry regardless of whether or not if request.Retryable is set.
// This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
// is not set, then ShouldRetry will only be called if request.Retryable is nil.
// Proper handling of the request.Retryable field is important when setting this field.
EnforceShouldRetryCheck *bool
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
// Regions and Endpoints.
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
// to `false`.
DisableSSL *bool
// The HTTP client to use when sending requests. Defaults to
// `http.DefaultClient`.
HTTPClient *http.Client
// An integer value representing the logging level. The default log level
// is zero (LogOff), which represents no logging. To enable logging set
// to a LogLevel Value.
LogLevel *LogLevelType
// The logger writer interface to write logging messages to. Defaults to
// standard out.
Logger Logger
// The maximum number of times that a request will be retried for failures.
// Defaults to -1, which defers the max retry setting to the service
// specific configuration.
MaxRetries *int
// Retryer guides how HTTP requests should be retried in case of
// recoverable failures.
//
// When nil or the value does not implement the request.Retryer interface,
// the client.DefaultRetryer will be used.
//
// When both Retryer and MaxRetries are non-nil, the former is used and
// the latter ignored.
//
// To set the Retryer field in a type-safe manner and with chaining, use
// the request.WithRetryer helper function:
//
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
//
Retryer RequestRetryer
// Disables semantic parameter validation, which validates input for
// missing required fields and/or other semantic request input errors.
DisableParamValidation *bool
// Disables the computation of request and response checksums, e.g.,
// CRC32 checksums in Amazon DynamoDB.
DisableComputeChecksums *bool
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
// will use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// Note: This configuration option is specific to the Amazon S3 service.
//
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// for Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
// header to PUT requests over 2MB of content. 100-Continue instructs the
// HTTP client not to send the body until the service responds with a
// `continue` status. This is useful to prevent sending the request body
// until after the request is authenticated, and validated.
//
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
//
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
// `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport
//
// You should use this flag to disble 100-Continue if you experience issues
// with proxies or third party S3 compatible services.
S3Disable100Continue *bool
// Set this to `true` to enable S3 Accelerate feature. For all operations
// compatible with S3 Accelerate will use the accelerate endpoint for
// requests. Requests not compatible will fall back to normal S3 requests.
//
// The bucket must be enable for accelerate to be used with S3 client with
// accelerate enabled. If the bucket is not enabled for accelerate an error
// will be returned. The bucket name must be DNS compatible to also work
// with accelerate.
S3UseAccelerate *bool
// S3DisableContentMD5Validation config option is temporarily disabled,
// For S3 GetObject API calls, #1837.
//
// Set this to `true` to disable the S3 service client from automatically
// adding the ContentMD5 to S3 Object Put and Upload API calls. This option
// will also disable the SDK from performing object ContentMD5 validation
// on GetObject API calls.
S3DisableContentMD5Validation *bool
// Set this to `true` to disable the EC2Metadata client from overriding the
// default http.Client's Timeout. This is helpful if you do not want the
// EC2Metadata client to create a new http.Client. This options is only
// meaningful if you're not already using a custom HTTP client with the
// SDK. Enabled by default.
//
// Must be set and provided to the session.NewSession() in order to disable
// the EC2Metadata overriding the timeout for default credentials chain.
//
// Example:
// sess := session.Must(session.NewSession(aws.NewConfig()
// .WithEC2MetadataDiableTimeoutOverride(true)))
//
// svc := s3.New(sess)
//
EC2MetadataDisableTimeoutOverride *bool
// Instructs the endpoint to be generated for a service client to
// be the dual stack endpoint. The dual stack endpoint will support
// both IPv4 and IPv6 addressing.
//
// Setting this for a service which does not support dual stack will fail
// to make requets. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints.
//
// If the Endpoint config value is also provided the UseDualStack flag
// will be ignored.
//
// Only supported with.
//
// sess := session.Must(session.NewSession())
//
// svc := s3.New(sess, &aws.Config{
// UseDualStack: aws.Bool(true),
// })
UseDualStack *bool
// SleepDelay is an override for the func the SDK will call when sleeping
// during the lifecycle of a request. Specifically this will be used for
// request delays. This value should only be used for testing. To adjust
// the delay of a request see the aws/client.DefaultRetryer and
// aws/request.Retryer.
//
// SleepDelay will prevent any Context from being used for canceling retry
// delay of an API operation. It is recommended to not use SleepDelay at all
// and specify a Retryer instead.
SleepDelay func(time.Duration)
// DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
// Will default to false. This would only be used for empty directory names in s3 requests.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
DisableRestProtocolURICleaning *bool
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
// have the definition in its model. By default, endpoint discovery is off.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// EnableEndpointDiscovery: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("/foo/bar/moo"),
// })
EnableEndpointDiscovery *bool
}
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(aws.NewConfig().
// WithMaxRetries(3),
// ))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, aws.NewConfig().
// WithRegion("us-west-2"),
// )
func NewConfig() *Config {
return &Config{}
}
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
// a Config pointer.
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
c.CredentialsChainVerboseErrors = &verboseErrs
return c
}
// WithCredentials sets a config Credentials value returning a Config pointer
// for chaining.
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
c.Credentials = creds
return c
}
// WithEndpoint sets a config Endpoint value returning a Config pointer for
// chaining.
func (c *Config) WithEndpoint(endpoint string) *Config {
c.Endpoint = &endpoint
return c
}
// WithEndpointResolver sets a config EndpointResolver value returning a
// Config pointer for chaining.
func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
c.EndpointResolver = resolver
return c
}
// WithRegion sets a config Region value returning a Config pointer for
// chaining.
func (c *Config) WithRegion(region string) *Config {
c.Region = ®ion
return c
}
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
// for chaining.
func (c *Config) WithDisableSSL(disable bool) *Config {
c.DisableSSL = &disable
return c
}
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
// for chaining.
func (c *Config) WithHTTPClient(client *http.Client) *Config {
c.HTTPClient = client
return c
}
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
// for chaining.
func (c *Config) WithMaxRetries(max int) *Config {
c.MaxRetries = &max
return c
}
// WithDisableParamValidation sets a config DisableParamValidation value
// returning a Config pointer for chaining.
func (c *Config) WithDisableParamValidation(disable bool) *Config {
c.DisableParamValidation = &disable
return c
}
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
// returning a Config pointer for chaining.
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
c.DisableComputeChecksums = &disable
return c
}
// WithLogLevel sets a config LogLevel value returning a Config pointer for
// chaining.
func (c *Config) WithLogLevel(level LogLevelType) *Config {
c.LogLevel = &level
return c
}
// WithLogger sets a config Logger value returning a Config pointer for
// chaining.
func (c *Config) WithLogger(logger Logger) *Config {
c.Logger = logger
return c
}
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
// pointer for chaining.
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
c.S3ForcePathStyle = &force
return c
}
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
// a Config pointer for chaining.
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
c.S3Disable100Continue = &disable
return c
}
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
// pointer for chaining.
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
c.S3UseAccelerate = &enable
return c
}
// WithS3DisableContentMD5Validation sets a config
// S3DisableContentMD5Validation value returning a Config pointer for chaining.
func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
c.S3DisableContentMD5Validation = &enable
return c
}
// WithUseDualStack sets a config UseDualStack value returning a Config
// pointer for chaining.
func (c *Config) WithUseDualStack(enable bool) *Config {
c.UseDualStack = &enable
return c
}
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
// returning a Config pointer for chaining.
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
c.EC2MetadataDisableTimeoutOverride = &enable
return c
}
// WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
c.SleepDelay = fn
return c
}
// WithEndpointDiscovery will set whether or not to use endpoint discovery.
func (c *Config) WithEndpointDiscovery(t bool) *Config {
c.EnableEndpointDiscovery = &t
return c
}
// MergeIn merges the passed in configs into the existing config object.
func (c *Config) MergeIn(cfgs ...*Config) {
for _, other := range cfgs {
mergeInConfig(c, other)
}
}
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
}
if other.CredentialsChainVerboseErrors != nil {
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
}
if other.Credentials != nil {
dst.Credentials = other.Credentials
}
if other.Endpoint != nil {
dst.Endpoint = other.Endpoint
}
if other.EndpointResolver != nil {
dst.EndpointResolver = other.EndpointResolver
}
if other.Region != nil {
dst.Region = other.Region
}
if other.DisableSSL != nil {
dst.DisableSSL = other.DisableSSL
}
if other.HTTPClient != nil {
dst.HTTPClient = other.HTTPClient
}
if other.LogLevel != nil {
dst.LogLevel = other.LogLevel
}
if other.Logger != nil {
dst.Logger = other.Logger
}
if other.MaxRetries != nil {
dst.MaxRetries = other.MaxRetries
}
if other.Retryer != nil {
dst.Retryer = other.Retryer
}
if other.DisableParamValidation != nil {
dst.DisableParamValidation = other.DisableParamValidation
}
if other.DisableComputeChecksums != nil {
dst.DisableComputeChecksums = other.DisableComputeChecksums
}
if other.S3ForcePathStyle != nil {
dst.S3ForcePathStyle = other.S3ForcePathStyle
}
if other.S3Disable100Continue != nil {
dst.S3Disable100Continue = other.S3Disable100Continue
}
if other.S3UseAccelerate != nil {
dst.S3UseAccelerate = other.S3UseAccelerate
}
if other.S3DisableContentMD5Validation != nil {
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
}
if other.UseDualStack != nil {
dst.UseDualStack = other.UseDualStack
}
if other.EC2MetadataDisableTimeoutOverride != nil {
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
}
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
if other.DisableRestProtocolURICleaning != nil {
dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
}
if other.EnforceShouldRetryCheck != nil {
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
}
if other.EnableEndpointDiscovery != nil {
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
}
}
// Copy will return a shallow copy of the Config object. If any additional
// configurations are provided they will be merged into the new config returned.
func (c *Config) Copy(cfgs ...*Config) *Config {
dst := &Config{}
dst.MergeIn(c)
for _, cfg := range cfgs {
dst.MergeIn(cfg)
}
return dst
}
| 1 | 9,555 | It's a bit odd to have a "disable<x>" boolean, though it looks like you already do that for a few other flags. | aws-aws-sdk-go | go |
@@ -47,6 +47,7 @@ const (
// Timelimits for docker operations enforced above docker
const (
pullImageTimeout = 2 * time.Hour
+ loadImageTimeout = 2 * time.Hour
createContainerTimeout = 3 * time.Minute
startContainerTimeout = 1*time.Minute + 30*time.Second
stopContainerTimeout = 1 * time.Minute | 1 | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
import (
"archive/tar"
"bufio"
"io"
"os"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/ecr"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerauth"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/engine/dockeriface"
"github.com/aws/amazon-ecs-agent/agent/engine/emptyvolume"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/cihub/seelog"
"github.com/docker/docker/pkg/parsers"
docker "github.com/fsouza/go-dockerclient"
)
const (
dockerStopTimeoutSeconds = 30
dockerDefaultTag = "latest"
)
// Timelimits for docker operations enforced above docker
const (
pullImageTimeout = 2 * time.Hour
createContainerTimeout = 3 * time.Minute
startContainerTimeout = 1*time.Minute + 30*time.Second
stopContainerTimeout = 1 * time.Minute
removeContainerTimeout = 5 * time.Minute
inspectContainerTimeout = 30 * time.Second
listContainersTimeout = 10 * time.Minute
// dockerPullBeginTimeout is the timeout from when a 'pull' is called to when
// we expect to see output on the pull progress stream. This is to work
// around a docker bug which sometimes results in pulls not progressing.
dockerPullBeginTimeout = 5 * time.Minute
)
// Interface to make testing it easier
type DockerClient interface {
// SupportedVersions returns a slice of the supported docker versions (or at least supposedly supported).
SupportedVersions() []dockerclient.DockerVersion
// WithVersion returns a new DockerClient for which all operations will use the given remote api version.
// A default version will be used for a client not produced via this method.
WithVersion(dockerclient.DockerVersion) DockerClient
ContainerEvents(ctx context.Context) (<-chan DockerContainerChangeEvent, error)
PullImage(image string, authData *api.RegistryAuthenticationData) DockerContainerMetadata
CreateContainer(*docker.Config, *docker.HostConfig, string) DockerContainerMetadata
StartContainer(string) DockerContainerMetadata
StopContainer(string) DockerContainerMetadata
DescribeContainer(string) (api.ContainerStatus, DockerContainerMetadata)
RemoveContainer(string) error
GetContainerName(string) (string, error)
InspectContainer(string) (*docker.Container, error)
ListContainers(bool) ListContainersResponse
Version() (string, error)
}
// DockerGoClient wraps the underlying go-dockerclient library.
// It exists primarily for the following three purposes:
// 1) Provide an abstraction over inputs and outputs,
// a) Inputs: Trims them down to what we actually need (largely unchanged tbh)
// b) Outputs: Unifies error handling and the common 'start->inspect'
// pattern by having a consistent error output. This error output
// contains error data with a given Name that aims to be presentable as a
// 'reason' in state changes. It also filters out the information about a
// container that is of interest, such as network bindings, while
// ignoring the rest.
// 2) Timeouts: It adds timeouts everywhere, mostly as a reaction to
// pull-related issues in the Docker daemon.
// 3) Versioning: It abstracts over multiple client versions to allow juggling
// appropriately there.
// Implements DockerClient
type dockerGoClient struct {
clientFactory dockerclient.Factory
version dockerclient.DockerVersion
auth dockerauth.DockerAuthProvider
ecrClientFactory ecr.ECRFactory
}
func (dg *dockerGoClient) WithVersion(version dockerclient.DockerVersion) DockerClient {
return &dockerGoClient{
clientFactory: dg.clientFactory,
version: version,
auth: dg.auth,
}
}
// pullLock is a temporary workaround for a devicemapper issue. See: https://github.com/docker/docker/issues/9718
var pullLock sync.Mutex
// scratchCreateLock guards against multiple 'scratch' image creations at once
var scratchCreateLock sync.Mutex
type DockerImageResponse struct {
Images []docker.APIImages
}
// NewDockerGoClient creates a new DockerGoClient
func NewDockerGoClient(clientFactory dockerclient.Factory, authType string, authData *config.SensitiveRawMessage, acceptInsecureCert bool) (DockerClient, error) {
endpoint := utils.DefaultIfBlank(os.Getenv(DOCKER_ENDPOINT_ENV_VARIABLE), DOCKER_DEFAULT_ENDPOINT)
if clientFactory == nil {
clientFactory = dockerclient.NewFactory(endpoint)
}
client, err := clientFactory.GetDefaultClient()
if err != nil {
log.Error("Unable to connect to docker daemon . Ensure docker is running", "endpoint", endpoint, "err", err)
return nil, err
}
// Even if we have a dockerclient, the daemon might not be running. Ping it
// to ensure it's up.
err = client.Ping()
if err != nil {
log.Error("Unable to ping docker daemon. Ensure docker is running", "endpoint", endpoint, "err", err)
return nil, err
}
return &dockerGoClient{
clientFactory: clientFactory,
auth: dockerauth.NewDockerAuthProvider(authType, authData.Contents()),
ecrClientFactory: ecr.NewECRFactory(acceptInsecureCert),
}, nil
}
func (dg *dockerGoClient) dockerClient() (dockeriface.Client, error) {
if dg.version == "" {
return dg.clientFactory.GetDefaultClient()
}
return dg.clientFactory.GetClient(dg.version)
}
func (dg *dockerGoClient) PullImage(image string, authData *api.RegistryAuthenticationData) DockerContainerMetadata {
timeout := ttime.After(pullImageTimeout)
// Workaround for devicemapper bug. See:
// https://github.com/docker/docker/issues/9718
pullLock.Lock()
defer pullLock.Unlock()
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.pullImage(image, authData) }()
select {
case resp := <-response:
return resp
case <-timeout:
return DockerContainerMetadata{Error: &DockerTimeoutError{pullImageTimeout, "pulled"}}
}
}
func (dg *dockerGoClient) pullImage(image string, authData *api.RegistryAuthenticationData) DockerContainerMetadata {
log.Debug("Pulling image", "image", image)
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
// Special case; this image is not one that should be pulled, but rather
// should be created locally if necessary
if image == emptyvolume.Image+":"+emptyvolume.Tag {
err := dg.createScratchImageIfNotExists()
if err != nil {
return DockerContainerMetadata{Error: &api.DefaultNamedError{Name: "CreateEmptyVolumeError", Err: "Could not create empty volume " + err.Error()}}
}
return DockerContainerMetadata{}
}
authConfig, err := dg.getAuthdata(image, authData)
if err != nil {
return DockerContainerMetadata{Error: err}
}
pullDebugOut, pullWriter := io.Pipe()
defer pullWriter.Close()
repository, tag := parsers.ParseRepositoryTag(image)
if tag == "" {
repository = repository + ":" + dockerDefaultTag
} else {
repository = image
}
opts := docker.PullImageOptions{
Repository: repository,
OutputStream: pullWriter,
}
timeout := ttime.After(dockerPullBeginTimeout)
// pullBegan is a channel indicating that we have seen at least one line of data on the 'OutputStream' above.
// It is here to guard against a bug wherin docker never writes anything to that channel and hangs in pulling forever.
pullBegan := make(chan bool, 1)
// pullBeganOnce ensures we only indicate it began once (since our channel will only be read 0 or 1 times)
pullBeganOnce := sync.Once{}
go func() {
reader := bufio.NewReader(pullDebugOut)
var line string
var err error
for err == nil {
line, err = reader.ReadString('\n')
if err != nil {
break
}
pullBeganOnce.Do(func() {
pullBegan <- true
})
log.Debug("Pulling image", "image", image, "status", line)
if strings.Contains(line, "already being pulled by another client. Waiting.") {
// This can mean the deamon is 'hung' in pulling status for this image, but we can't be sure.
log.Error("Image 'pull' status marked as already being pulled", "image", image, "status", line)
}
}
if err != nil && err != io.EOF {
log.Warn("Error reading pull image status", "image", image, "err", err)
}
}()
pullFinished := make(chan error, 1)
go func() {
pullFinished <- client.PullImage(opts, authConfig)
log.Debug("Pulling image complete", "image", image)
}()
select {
case <-pullBegan:
break
case err := <-pullFinished:
if err != nil {
return DockerContainerMetadata{Error: CannotXContainerError{"Pull", err.Error()}}
}
return DockerContainerMetadata{}
case <-timeout:
return DockerContainerMetadata{Error: &DockerTimeoutError{dockerPullBeginTimeout, "pullBegin"}}
}
log.Debug("Pull began for image", "image", image)
defer log.Debug("Pull completed for image", "image", image)
err = <-pullFinished
if err != nil {
return DockerContainerMetadata{Error: CannotXContainerError{"Pull", err.Error()}}
}
return DockerContainerMetadata{}
}
func (dg *dockerGoClient) createScratchImageIfNotExists() error {
client, err := dg.dockerClient()
if err != nil {
return err
}
scratchCreateLock.Lock()
defer scratchCreateLock.Unlock()
_, err = client.InspectImage(emptyvolume.Image + ":" + emptyvolume.Tag)
if err == nil {
// Already exists; assume that it's okay to use it
return nil
}
reader, writer := io.Pipe()
emptytarball := tar.NewWriter(writer)
go func() {
emptytarball.Close()
writer.Close()
}()
// Create it from an empty tarball
err = client.ImportImage(docker.ImportImageOptions{
Repository: emptyvolume.Image,
Tag: emptyvolume.Tag,
Source: "-",
InputStream: reader,
})
return err
}
func (dg *dockerGoClient) getAuthdata(image string, authData *api.RegistryAuthenticationData) (docker.AuthConfiguration, error) {
if authData == nil || authData.Type != "ecr" {
return dg.auth.GetAuthconfig(image)
}
provider := dockerauth.NewECRAuthProvider(authData.ECRAuthData, dg.ecrClientFactory)
authConfig, err := provider.GetAuthconfig(image)
if err != nil {
return authConfig, CannotXContainerError{"PullECR", err.Error()}
}
return authConfig, nil
}
func (dg *dockerGoClient) CreateContainer(config *docker.Config, hostConfig *docker.HostConfig, name string) DockerContainerMetadata {
timeout := ttime.After(createContainerTimeout)
ctx, cancelFunc := context.WithCancel(context.TODO()) // Could pass one through from engine
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.createContainer(ctx, config, hostConfig, name) }()
select {
case resp := <-response:
return resp
case <-timeout:
cancelFunc()
return DockerContainerMetadata{Error: &DockerTimeoutError{createContainerTimeout, "created"}}
}
}
func (dg *dockerGoClient) createContainer(ctx context.Context, config *docker.Config, hostConfig *docker.HostConfig, name string) DockerContainerMetadata {
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
containerOptions := docker.CreateContainerOptions{Config: config, HostConfig: hostConfig, Name: name}
dockerContainer, err := client.CreateContainer(containerOptions)
select {
case <-ctx.Done():
// Parent function already timed out; no need to get container metadata
return DockerContainerMetadata{}
default:
}
if err != nil {
return DockerContainerMetadata{Error: CannotXContainerError{"Create", err.Error()}}
}
return dg.containerMetadata(dockerContainer.ID)
}
func (dg *dockerGoClient) StartContainer(id string) DockerContainerMetadata {
timeout := ttime.After(startContainerTimeout)
ctx, cancelFunc := context.WithCancel(context.TODO()) // Could pass one through from engine
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.startContainer(ctx, id) }()
select {
case resp := <-response:
return resp
case <-timeout:
cancelFunc()
return DockerContainerMetadata{Error: &DockerTimeoutError{startContainerTimeout, "started"}}
}
}
func (dg *dockerGoClient) startContainer(ctx context.Context, id string) DockerContainerMetadata {
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
err = client.StartContainer(id, nil)
select {
case <-ctx.Done():
// Parent function already timed out; no need to get container metadata
return DockerContainerMetadata{}
default:
}
metadata := dg.containerMetadata(id)
if err != nil {
metadata.Error = CannotXContainerError{"Start", err.Error()}
}
return metadata
}
func dockerStateToState(state docker.State) api.ContainerStatus {
if state.Running {
return api.ContainerRunning
}
return api.ContainerStopped
}
func (dg *dockerGoClient) DescribeContainer(dockerId string) (api.ContainerStatus, DockerContainerMetadata) {
dockerContainer, err := dg.InspectContainer(dockerId)
if err != nil {
return api.ContainerStatusNone, DockerContainerMetadata{Error: CannotXContainerError{"Describe", err.Error()}}
}
return dockerStateToState(dockerContainer.State), metadataFromContainer(dockerContainer)
}
func (dg *dockerGoClient) InspectContainer(dockerId string) (*docker.Container, error) {
timeout := ttime.After(inspectContainerTimeout)
type inspectResponse struct {
container *docker.Container
err error
}
response := make(chan inspectResponse, 1)
go func() {
container, err := dg.inspectContainer(dockerId)
response <- inspectResponse{container, err}
}()
select {
case resp := <-response:
return resp.container, resp.err
case <-timeout:
return nil, &DockerTimeoutError{inspectContainerTimeout, "inspecting"}
}
}
func (dg *dockerGoClient) inspectContainer(dockerId string) (*docker.Container, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
return client.InspectContainer(dockerId)
}
func (dg *dockerGoClient) StopContainer(dockerId string) DockerContainerMetadata {
timeout := ttime.After(stopContainerTimeout)
ctx, cancelFunc := context.WithCancel(context.TODO()) // Could pass one through from engine
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.stopContainer(ctx, dockerId) }()
select {
case resp := <-response:
return resp
case <-timeout:
cancelFunc()
return DockerContainerMetadata{Error: &DockerTimeoutError{stopContainerTimeout, "stopped"}}
}
}
func (dg *dockerGoClient) stopContainer(ctx context.Context, dockerId string) DockerContainerMetadata {
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
err = client.StopContainer(dockerId, dockerStopTimeoutSeconds)
select {
case <-ctx.Done():
// parent function has already timed out and returned; we're writing to a
// buffered channel that will never be read
return DockerContainerMetadata{}
default:
}
metadata := dg.containerMetadata(dockerId)
if err != nil {
log.Debug("Error stopping container", "err", err, "id", dockerId)
if metadata.Error == nil {
metadata.Error = CannotXContainerError{"Stop", err.Error()}
}
}
return metadata
}
func (dg *dockerGoClient) RemoveContainer(dockerId string) error {
timeout := ttime.After(removeContainerTimeout)
response := make(chan error, 1)
go func() { response <- dg.removeContainer(dockerId) }()
select {
case resp := <-response:
return resp
case <-timeout:
return &DockerTimeoutError{removeContainerTimeout, "removing"}
}
}
func (dg *dockerGoClient) removeContainer(dockerId string) error {
client, err := dg.dockerClient()
if err != nil {
return err
}
return client.RemoveContainer(docker.RemoveContainerOptions{ID: dockerId, RemoveVolumes: true, Force: false})
}
func (dg *dockerGoClient) GetContainerName(id string) (string, error) {
container, err := dg.InspectContainer(id)
if err != nil {
return "", err
}
return container.Name, nil
}
func (dg *dockerGoClient) containerMetadata(id string) DockerContainerMetadata {
dockerContainer, err := dg.InspectContainer(id)
if err != nil {
return DockerContainerMetadata{DockerId: id, Error: CannotXContainerError{"Inspect", err.Error()}}
}
return metadataFromContainer(dockerContainer)
}
func metadataFromContainer(dockerContainer *docker.Container) DockerContainerMetadata {
var bindings []api.PortBinding
var err api.NamedError
if dockerContainer.NetworkSettings != nil {
// Convert port bindings into the format our container expects
bindings, err = api.PortBindingFromDockerPortBinding(dockerContainer.NetworkSettings.Ports)
if err != nil {
log.Crit("Docker had network bindings we couldn't understand", "err", err)
return DockerContainerMetadata{Error: api.NamedError(err)}
}
}
metadata := DockerContainerMetadata{
DockerId: dockerContainer.ID,
PortBindings: bindings,
Volumes: dockerContainer.Volumes,
}
if !dockerContainer.State.Running && !dockerContainer.State.FinishedAt.IsZero() {
// Only record an exitcode if it has exited
metadata.ExitCode = &dockerContainer.State.ExitCode
}
if dockerContainer.State.Error != "" {
metadata.Error = NewDockerStateError(dockerContainer.State.Error)
}
if dockerContainer.State.OOMKilled {
metadata.Error = OutOfMemoryError{}
}
return metadata
}
// Listen to the docker event stream for container changes and pass them up
func (dg *dockerGoClient) ContainerEvents(ctx context.Context) (<-chan DockerContainerChangeEvent, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
events := make(chan *docker.APIEvents)
err = client.AddEventListener(events)
if err != nil {
log.Error("Unable to add a docker event listener", "err", err)
return nil, err
}
go func() {
<-ctx.Done()
client.RemoveEventListener(events)
}()
changedContainers := make(chan DockerContainerChangeEvent)
go func() {
for event := range events {
containerId := event.ID
if containerId == "" {
continue
}
log.Debug("Got event from docker daemon", "event", event)
var status api.ContainerStatus
switch event.Status {
case "create":
status = api.ContainerCreated
case "start":
status = api.ContainerRunning
case "stop":
fallthrough
case "die":
status = api.ContainerStopped
case "kill":
fallthrough
case "rename":
// TODO, ensure this wasn't one of our containers. This isn't critical
// because we typically have the docker id stored too and a wrong name
// won't be fatal once we do
continue
case "restart":
case "resize":
case "destroy":
case "unpause":
// These result in us falling through to inspect the container, some
// out of caution, some because it's a form of state change
case "oom":
seelog.Infof("process within container %v died due to OOM", event.ID)
// "oom" can either means any process got OOM'd, but doesn't always
// mean the container dies (non-init processes). If the container also
// dies, you see a "die" status as well; we'll update suitably there
fallthrough
case "pause":
// non image events that aren't of interest currently
fallthrough
case "exec_create":
fallthrough
case "exec_start":
fallthrough
case "top":
fallthrough
case "attach":
fallthrough
// image events
case "export":
fallthrough
case "pull":
fallthrough
case "push":
fallthrough
case "tag":
fallthrough
case "untag":
fallthrough
case "import":
fallthrough
case "delete":
// No interest in image events
continue
default:
if strings.HasPrefix(event.Status, "exec_create:") || strings.HasPrefix(event.Status, "exec_start:") {
continue
}
// Because docker emits new events even when you use an old event api
// version, it's not that big a deal
seelog.Debugf("Unknown status event from docker: %s", event.Status)
}
metadata := dg.containerMetadata(containerId)
changedContainers <- DockerContainerChangeEvent{
Status: status,
DockerContainerMetadata: metadata,
}
}
}()
return changedContainers, nil
}
// ListContainers returns a slice of container IDs.
func (dg *dockerGoClient) ListContainers(all bool) ListContainersResponse {
timeout := ttime.After(listContainersTimeout)
response := make(chan ListContainersResponse, 1)
go func() { response <- dg.listContainers(all) }()
select {
case resp := <-response:
return resp
case <-timeout:
return ListContainersResponse{Error: &DockerTimeoutError{listContainersTimeout, "listing"}}
}
}
func (dg *dockerGoClient) listContainers(all bool) ListContainersResponse {
client, err := dg.dockerClient()
if err != nil {
return ListContainersResponse{Error: err}
}
containers, err := client.ListContainers(docker.ListContainersOptions{All: all})
if err != nil {
return ListContainersResponse{Error: err}
}
// We get an empty slice if there are no containers to be listed.
// Extract container IDs from this list.
containerIDs := make([]string, len(containers))
for i, container := range containers {
containerIDs[i] = container.ID
}
return ListContainersResponse{DockerIds: containerIDs, Error: nil}
}
func (dg *dockerGoClient) SupportedVersions() []dockerclient.DockerVersion {
return dg.clientFactory.FindAvailableVersions()
}
func (dg *dockerGoClient) Version() (string, error) {
client, err := dg.dockerClient()
if err != nil {
return "", err
}
info, err := client.Version()
if err != nil {
return "", err
}
return "DockerVersion: " + info.Get("Version"), nil
}
| 1 | 13,892 | Why did you pick two hours for this? | aws-amazon-ecs-agent | go |
@@ -110,8 +110,8 @@ func getNewPassword(gopts GlobalOptions) (string, error) {
newopts.password = ""
return ReadPasswordTwice(newopts,
- "enter password for new key: ",
- "enter password again: ")
+ "enter password (new): ",
+ "enter password (again): ")
}
func addKey(gopts GlobalOptions, repo *repository.Repository) error { | 1 | package main
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/table"
"github.com/spf13/cobra"
)
var cmdKey = &cobra.Command{
Use: "key [list|add|remove|passwd] [ID]",
Short: "Manage keys (passwords)",
Long: `
The "key" command manages keys (passwords) for accessing the repository.
EXIT STATUS
===========
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runKey(globalOptions, args)
},
}
var newPasswordFile string
func init() {
cmdRoot.AddCommand(cmdKey)
flags := cmdKey.Flags()
flags.StringVarP(&newPasswordFile, "new-password-file", "", "", "the file from which to load a new password")
}
func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions) error {
type keyInfo struct {
Current bool `json:"current"`
ID string `json:"id"`
UserName string `json:"userName"`
HostName string `json:"hostName"`
Created string `json:"created"`
}
var keys []keyInfo
err := s.List(ctx, restic.KeyFile, func(id restic.ID, size int64) error {
k, err := repository.LoadKey(ctx, s, id.String())
if err != nil {
Warnf("LoadKey() failed: %v\n", err)
return nil
}
key := keyInfo{
Current: id.String() == s.KeyName(),
ID: id.Str(),
UserName: k.Username,
HostName: k.Hostname,
Created: k.Created.Local().Format(TimeFormat),
}
keys = append(keys, key)
return nil
})
if err != nil {
return err
}
if gopts.JSON {
return json.NewEncoder(globalOptions.stdout).Encode(keys)
}
tab := table.New()
tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ID }}")
tab.AddColumn("User", "{{ .UserName }}")
tab.AddColumn("Host", "{{ .HostName }}")
tab.AddColumn("Created", "{{ .Created }}")
for _, key := range keys {
tab.AddRow(key)
}
return tab.Write(globalOptions.stdout)
}
// testKeyNewPassword is used to set a new password during integration testing.
var testKeyNewPassword string
func getNewPassword(gopts GlobalOptions) (string, error) {
if testKeyNewPassword != "" {
return testKeyNewPassword, nil
}
if newPasswordFile != "" {
return loadPasswordFromFile(newPasswordFile)
}
// Since we already have an open repository, temporary remove the password
// to prompt the user for the passwd.
newopts := gopts
newopts.password = ""
return ReadPasswordTwice(newopts,
"enter password for new key: ",
"enter password again: ")
}
func addKey(gopts GlobalOptions, repo *repository.Repository) error {
pw, err := getNewPassword(gopts)
if err != nil {
return err
}
id, err := repository.AddKey(gopts.ctx, repo, pw, repo.Key())
if err != nil {
return errors.Fatalf("creating new key failed: %v\n", err)
}
Verbosef("saved new key as %s\n", id)
return nil
}
func deleteKey(ctx context.Context, repo *repository.Repository, name string) error {
if name == repo.KeyName() {
return errors.Fatal("refusing to remove key currently used to access repository")
}
h := restic.Handle{Type: restic.KeyFile, Name: name}
err := repo.Backend().Remove(ctx, h)
if err != nil {
return err
}
Verbosef("removed key %v\n", name)
return nil
}
func changePassword(gopts GlobalOptions, repo *repository.Repository) error {
pw, err := getNewPassword(gopts)
if err != nil {
return err
}
id, err := repository.AddKey(gopts.ctx, repo, pw, repo.Key())
if err != nil {
return errors.Fatalf("creating new key failed: %v\n", err)
}
h := restic.Handle{Type: restic.KeyFile, Name: repo.KeyName()}
err = repo.Backend().Remove(gopts.ctx, h)
if err != nil {
return err
}
Verbosef("saved new key as %s\n", id)
return nil
}
func runKey(gopts GlobalOptions, args []string) error {
if len(args) < 1 || (args[0] == "remove" && len(args) != 2) || (args[0] != "remove" && len(args) != 1) {
return errors.Fatal("wrong number of arguments")
}
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
switch args[0] {
case "list":
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
return listKeys(ctx, repo, gopts)
case "add":
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
return addKey(gopts, repo)
case "remove":
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
id, err := restic.Find(repo.Backend(), restic.KeyFile, args[1])
if err != nil {
return err
}
return deleteKey(gopts.ctx, repo, id)
case "passwd":
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
return changePassword(gopts, repo)
}
return nil
}
func loadPasswordFromFile(pwdFile string) (string, error) {
s, err := ioutil.ReadFile(pwdFile)
if os.IsNotExist(err) {
return "", errors.Fatalf("%s does not exist", pwdFile)
}
return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile")
}
| 1 | 13,603 | What about "enter new password"? That would be a bit more consistent to the `ReadPasswordTwice` usage in cmd_init. | restic-restic | go |
@@ -20,7 +20,7 @@ if not versionInfo.updateVersionType:
raise RuntimeError("No update version type, update checking not supported")
import addonAPIVersion
# Avoid a E402 'module level import not at top of file' warning, because several checks are performed above.
-from gui.contextHelp import ContextHelpMixin # noqa: E402
+import gui.contextHelp # noqa: E402
from gui.dpiScalingHelper import DpiScalingHelperMixin, DpiScalingHelperMixinWithoutInit # noqa: E402
import winVersion
import os | 1 | #updateCheck.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2012-2019 NV Access Limited, Zahari Yurukov, Babbage B.V., Joseph Lee
"""Update checking functionality.
@note: This module may raise C{RuntimeError} on import if update checking for this build is not supported.
"""
import garbageHandler
import globalVars
import config
if globalVars.appArgs.secure:
raise RuntimeError("updates disabled in secure mode")
elif config.isAppX:
raise RuntimeError("updates managed by Windows Store")
import versionInfo
if not versionInfo.updateVersionType:
raise RuntimeError("No update version type, update checking not supported")
import addonAPIVersion
# Avoid a E402 'module level import not at top of file' warning, because several checks are performed above.
from gui.contextHelp import ContextHelpMixin # noqa: E402
from gui.dpiScalingHelper import DpiScalingHelperMixin, DpiScalingHelperMixinWithoutInit # noqa: E402
import winVersion
import os
import inspect
import threading
import time
import pickle
# #9818: one must import at least urllib.request in Python 3 in order to use full urllib functionality.
import urllib.request
import urllib.parse
import tempfile
import hashlib
import ctypes.wintypes
import ssl
import wx
import languageHandler
# Avoid a E402 'module level import not at top of file' warning, because several checks are performed above.
import synthDriverHandler # noqa: E402
import braille
import gui
from gui import guiHelper
from addonHandler import getCodeAddon, AddonError, getIncompatibleAddons
from logHandler import log, isPathExternalToNVDA
import config
import shellapi
import winUser
import winKernel
import fileUtils
#: The URL to use for update checks.
CHECK_URL = "https://www.nvaccess.org/nvdaUpdateCheck"
#: The time to wait between checks.
CHECK_INTERVAL = 86400 # 1 day
#: The time to wait before retrying a failed check.
RETRY_INTERVAL = 600 # 10 min
#: The download block size in bytes.
DOWNLOAD_BLOCK_SIZE = 8192 # 8 kb
#: directory to store pending update files
storeUpdatesDir=os.path.join(globalVars.appArgs.configPath, 'updates')
try:
os.makedirs(storeUpdatesDir)
except OSError:
if not os.path.isdir(storeUpdatesDir):
log.debugWarning("Default download path for updates %s could not be created."%storeUpdatesDir)
#: Persistent state information.
#: @type: dict
state = None
_stateFileName = None
#: The single instance of L{AutoUpdateChecker} if automatic update checking is enabled,
#: C{None} if it is disabled.
autoChecker = None
def getQualifiedDriverClassNameForStats(cls):
""" fetches the name from a given synthDriver or brailleDisplay class, and appends core for in-built code, the add-on name for code from an add-on, or external for code in the NVDA user profile.
Some examples:
espeak (core)
newfon (external)
eloquence (addon:CodeFactory)
noBraille (core)
"""
name=cls.name
try:
addon=getCodeAddon(cls)
except AddonError:
addon=None
if addon:
return "%s (addon:%s)"%(name,addon.name)
path=inspect.getsourcefile(cls)
if isPathExternalToNVDA(path):
return "%s (external)"%name
return "%s (core)"%name
def checkForUpdate(auto=False):
"""Check for an updated version of NVDA.
This will block, so it generally shouldn't be called from the main thread.
@param auto: Whether this is an automatic check for updates.
@type auto: bool
@return: Information about the update or C{None} if there is no update.
@rtype: dict
@raise RuntimeError: If there is an error checking for an update.
"""
allowUsageStats=config.conf["update"]['allowUsageStats']
params = {
"autoCheck": auto,
"allowUsageStats":allowUsageStats,
"version": versionInfo.version,
"versionType": versionInfo.updateVersionType,
"osVersion": winVersion.winVersionText,
"x64": os.environ.get("PROCESSOR_ARCHITEW6432") == "AMD64",
}
if auto and allowUsageStats:
synthDriverClass = synthDriverHandler.getSynth().__class__
brailleDisplayClass = braille.handler.display.__class__ if braille.handler else None
# Following are parameters sent purely for stats gathering.
# If new parameters are added here, they must be documented in the userGuide for transparency.
extraParams={
"language": languageHandler.getLanguage(),
"installed": config.isInstalledCopy(),
"synthDriver":getQualifiedDriverClassNameForStats(synthDriverClass) if synthDriverClass else None,
"brailleDisplay":getQualifiedDriverClassNameForStats(brailleDisplayClass) if brailleDisplayClass else None,
"outputBrailleTable":config.conf['braille']['translationTable'] if brailleDisplayClass else None,
}
params.update(extraParams)
url = "%s?%s" % (CHECK_URL, urllib.parse.urlencode(params))
try:
res = urllib.request.urlopen(url)
except IOError as e:
if isinstance(e.strerror, ssl.SSLError) and e.strerror.reason == "CERTIFICATE_VERIFY_FAILED":
# #4803: Windows fetches trusted root certificates on demand.
# Python doesn't trigger this fetch (PythonIssue:20916), so try it ourselves
_updateWindowsRootCertificates()
# and then retry the update check.
res = urllib.request.urlopen(url)
else:
raise
if res.code != 200:
raise RuntimeError("Checking for update failed with code %d" % res.code)
info = {}
for line in res:
# #9819: update description resource returns bytes, so make it Unicode.
line = line.decode("utf-8").rstrip()
try:
key, val = line.split(": ", 1)
except ValueError:
raise RuntimeError("Error in update check output")
info[key] = val
if not info:
return None
return info
def _setStateToNone(_state):
_state["pendingUpdateFile"] = None
_state["pendingUpdateVersion"] = None
_state["pendingUpdateAPIVersion"] = (0,0,0)
_state["pendingUpdateBackCompatToAPIVersion"] = (0,0,0)
def getPendingUpdate():
"""Returns a tuple of the path to and version of the pending update, if any. Returns C{None} otherwise.
@rtype: tuple
"""
try:
pendingUpdateFile=state["pendingUpdateFile"]
pendingUpdateVersion=state["pendingUpdateVersion"]
pendingUpdateAPIVersion=state["pendingUpdateAPIVersion"] or (0,0,0)
pendingUpdateBackCompatToAPIVersion=state["pendingUpdateBackCompatToAPIVersion"] or (0,0,0)
except KeyError:
_setStateToNone(state)
return None
else:
if pendingUpdateFile and os.path.isfile(pendingUpdateFile):
return (
pendingUpdateFile, pendingUpdateVersion, pendingUpdateAPIVersion, pendingUpdateBackCompatToAPIVersion
)
else:
_setStateToNone(state)
return None
def isPendingUpdate():
"""Returns whether there is a pending update.
@rtype: bool
"""
return bool(getPendingUpdate())
def executePendingUpdate():
updateTuple = getPendingUpdate()
if not updateTuple:
return
else:
_executeUpdate(updateTuple[0])
def _executeUpdate(destPath):
if not destPath:
return
_setStateToNone(state)
saveState()
if config.isInstalledCopy():
executeParams = u"--install -m"
else:
portablePath = globalVars.appDir
if os.access(portablePath, os.W_OK):
executeParams = u'--create-portable --portable-path "{portablePath}" --config-path "{configPath}" -m'.format(
portablePath=portablePath,
configPath=globalVars.appArgs.configPath
)
else:
executeParams = u"--launcher"
# #4475: ensure that the new process shows its first window, by providing SW_SHOWNORMAL
shellapi.ShellExecute(None, None,
destPath,
executeParams,
None, winUser.SW_SHOWNORMAL)
class UpdateChecker(garbageHandler.TrackedObject):
"""Check for an updated version of NVDA, presenting appropriate user interface.
The check is performed in the background.
This class is for manual update checks.
To use, call L{check} on an instance.
"""
AUTO = False
def check(self):
"""Check for an update.
"""
t = threading.Thread(
name=f"{self.__class__.__module__}.{self.check.__qualname__}",
target=self._bg
)
t.daemon = True
self._started()
t.start()
def _bg(self):
try:
info = checkForUpdate(self.AUTO)
except:
log.debugWarning("Error checking for update", exc_info=True)
self._error()
return
self._result(info)
if info:
state["dontRemindVersion"] = info["version"]
state["lastCheck"] = time.time()
saveState()
if autoChecker:
autoChecker.setNextCheck()
def _started(self):
self._progressDialog = gui.IndeterminateProgressDialog(gui.mainFrame,
# Translators: The title of the dialog displayed while manually checking for an NVDA update.
_("Checking for Update"),
# Translators: The progress message displayed while manually checking for an NVDA update.
_("Checking for update"))
def _error(self):
wx.CallAfter(self._progressDialog.done)
self._progressDialog = None
wx.CallAfter(gui.messageBox,
# Translators: A message indicating that an error occurred while checking for an update to NVDA.
_("Error checking for update."),
# Translators: The title of an error message dialog.
_("Error"),
wx.OK | wx.ICON_ERROR)
def _result(self, info):
wx.CallAfter(self._progressDialog.done)
self._progressDialog = None
wx.CallAfter(UpdateResultDialog, gui.mainFrame, info, False)
class AutoUpdateChecker(UpdateChecker):
"""Automatically check for an updated version of NVDA.
To use, create a single instance and maintain a reference to it.
Checks will then be performed automatically.
"""
AUTO = True
def __init__(self):
self._checkTimer = gui.NonReEntrantTimer(self.check)
if config.conf["update"]["startupNotification"] and isPendingUpdate():
secsTillNext = 0 # Display the update message instantly
else:
# Set the initial check based on the last check time.
# #3260: If the system time is earlier than the last check,
# treat the last check as being right now (so the next will be tomorrow).
secsSinceLast = max(time.time() - state["lastCheck"], 0)
# The maximum time till the next check is CHECK_INTERVAL.
secsTillNext = CHECK_INTERVAL - int(min(secsSinceLast, CHECK_INTERVAL))
self._checkTimer.Start(secsTillNext * 1000, True)
def terminate(self):
self._checkTimer.Stop()
self._checkTimer = None
def setNextCheck(self, isRetry=False):
# #6127: Timers must be manipulated from the main thread.
wx.CallAfter(self._checkTimer.Stop)
wx.CallAfter(self._checkTimer.Start, (RETRY_INTERVAL if isRetry else CHECK_INTERVAL) * 1000, True)
def _started(self):
log.info("Performing automatic update check")
def _error(self):
self.setNextCheck(isRetry=True)
def _result(self, info):
if not info:
return
if info["version"]==state["dontRemindVersion"]:
return
wx.CallAfter(UpdateResultDialog, gui.mainFrame, info, True)
class UpdateResultDialog(
DpiScalingHelperMixinWithoutInit,
ContextHelpMixin,
wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "GeneralSettingsCheckForUpdates"
def __init__(self, parent, updateInfo, auto):
# Translators: The title of the dialog informing the user about an NVDA update.
super().__init__(parent, title=_("NVDA Update"))
self.updateInfo = updateInfo
mainSizer = wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
pendingUpdateDetails = getPendingUpdate()
canOfferPendingUpdate = isPendingUpdate() and pendingUpdateDetails[1] == updateInfo["version"]
text = sHelper.addItem(wx.StaticText(self))
bHelper = guiHelper.ButtonHelper(wx.HORIZONTAL)
if not updateInfo:
# Translators: A message indicating that no update to NVDA is available.
message = _("No update available.")
elif canOfferPendingUpdate:
# Translators: A message indicating that an updated version of NVDA has been downloaded
# and is pending to be installed.
message = _("NVDA version {version} has been downloaded and is pending installation.").format(**updateInfo)
self.apiVersion = pendingUpdateDetails[2]
self.backCompatTo = pendingUpdateDetails[3]
showAddonCompat = any(getIncompatibleAddons(
currentAPIVersion=self.apiVersion,
backCompatToAPIVersion=self.backCompatTo
))
if showAddonCompat:
message = message + _(
# Translators: A message indicating that some add-ons will be disabled
# unless reviewed before installation.
"\n\n"
"However, your NVDA configuration contains add-ons that are incompatible with this version of NVDA. "
"These add-ons will be disabled after installation. If you rely on these add-ons, "
"please review the list to decide whether to continue with the installation"
)
confirmationCheckbox = sHelper.addItem(wx.CheckBox(
self,
# Translators: A message to confirm that the user understands that addons that have not been
# reviewed and made available, will be disabled after installation.
label=_("I understand that these incompatible add-ons will be disabled")
))
confirmationCheckbox.Bind(
wx.EVT_CHECKBOX,
lambda evt: self.installPendingButton.Enable(not self.installPendingButton.Enabled)
)
confirmationCheckbox.SetFocus()
# Translators: The label of a button to review add-ons prior to NVDA update.
reviewAddonsButton = bHelper.addButton(self, label=_("&Review add-ons..."))
reviewAddonsButton.Bind(wx.EVT_BUTTON, self.onReviewAddonsButton)
self.installPendingButton = bHelper.addButton(
self,
# Translators: The label of a button to install a pending NVDA update.
# {version} will be replaced with the version; e.g. 2011.3.
label=_("&Install NVDA {version}").format(**updateInfo)
)
self.installPendingButton.Bind(
wx.EVT_BUTTON,
lambda evt: self.onInstallButton(pendingUpdateDetails[0])
)
self.installPendingButton.Enable(not showAddonCompat)
bHelper.addButton(
self,
# Translators: The label of a button to re-download a pending NVDA update.
label=_("Re-&download update")
).Bind(wx.EVT_BUTTON, self.onDownloadButton)
else:
# Translators: A message indicating that an updated version of NVDA is available.
# {version} will be replaced with the version; e.g. 2011.3.
message = _("NVDA version {version} is available.").format(**updateInfo)
bHelper.addButton(
self,
# Translators: The label of a button to download an NVDA update.
label=_("&Download update")
).Bind(wx.EVT_BUTTON, self.onDownloadButton)
if auto: # this prompt was triggered by auto update checker
# the user might not want to wait for a download right now, so give the option to be reminded later.
# Translators: The label of a button to remind the user later about performing some action.
remindMeButton = bHelper.addButton(self, label=_("Remind me &later"))
remindMeButton.Bind(wx.EVT_BUTTON, self.onLaterButton)
remindMeButton.SetFocus()
text.SetLabel(message)
text.Wrap(self.scaleSize(500))
sHelper.addDialogDismissButtons(bHelper)
# Translators: The label of a button to close a dialog.
closeButton = bHelper.addButton(self, wx.ID_CLOSE, label=_("&Close"))
closeButton.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
self.Bind(wx.EVT_CLOSE, lambda evt: self.Destroy())
self.EscapeId = wx.ID_CLOSE
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.CentreOnScreen()
self.Show()
def onInstallButton(self, destPath):
_executeUpdate(destPath)
self.Destroy()
def onDownloadButton(self, evt):
self.Hide()
DonateRequestDialog(gui.mainFrame, self._download)
def _download(self):
UpdateDownloader(self.updateInfo).start()
self.Destroy()
def onLaterButton(self, evt):
state["dontRemindVersion"] = None
saveState()
self.Close()
def onReviewAddonsButton(self, evt):
from gui import addonGui
incompatibleAddons = addonGui.IncompatibleAddonsDialog(
parent=self,
APIVersion=self.apiVersion,
APIBackwardsCompatToVersion=self.backCompatTo
)
incompatibleAddons.ShowModal()
class UpdateAskInstallDialog(
DpiScalingHelperMixinWithoutInit,
gui.ContextHelpMixin,
wx.Dialog, # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "GeneralSettingsCheckForUpdates"
def __init__(self, parent, destPath, version, apiVersion, backCompatTo):
self.destPath = destPath
self.version = version
self.apiVersion = apiVersion
self.backCompatTo = backCompatTo
self.storeUpdatesDirWritable = os.path.isdir(storeUpdatesDir) and os.access(storeUpdatesDir, os.W_OK)
# Translators: The title of the dialog asking the user to Install an NVDA update.
super().__init__(parent, title=_("NVDA Update"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: A message indicating that an updated version of NVDA is ready to be installed.
message = _("NVDA version {version} is ready to be installed.\n").format(version=version)
showAddonCompat = any(getIncompatibleAddons(
currentAPIVersion=self.apiVersion,
backCompatToAPIVersion=self.backCompatTo
))
if showAddonCompat:
message = message + _(
# Translators: A message indicating that some add-ons will be disabled
# unless reviewed before installation.
"\n"
"However, your NVDA configuration contains add-ons that are incompatible with this version of NVDA. "
"These add-ons will be disabled after installation. If you rely on these add-ons, "
"please review the list to decide whether to continue with the installation"
)
text = sHelper.addItem(wx.StaticText(self, label=message))
text.Wrap(self.scaleSize(500))
if showAddonCompat:
self.confirmationCheckbox = sHelper.addItem(wx.CheckBox(
self,
# Translators: A message to confirm that the user understands that addons that have not been reviewed and made
# available, will be disabled after installation.
label=_("I understand that these incompatible add-ons will be disabled")
))
bHelper = sHelper.addDialogDismissButtons(guiHelper.ButtonHelper(wx.HORIZONTAL))
if showAddonCompat:
# Translators: The label of a button to review add-ons prior to NVDA update.
reviewAddonsButton = bHelper.addButton(self, label=_("&Review add-ons..."))
reviewAddonsButton.Bind(wx.EVT_BUTTON, self.onReviewAddonsButton)
# Translators: The label of a button to install an NVDA update.
installButton = bHelper.addButton(self, wx.ID_OK, label=_("&Install update"))
installButton.Bind(wx.EVT_BUTTON, self.onInstallButton)
if not showAddonCompat:
installButton.SetFocus()
else:
self.confirmationCheckbox.SetFocus()
self.confirmationCheckbox.Bind(
wx.EVT_CHECKBOX,
lambda evt: installButton.Enable(not installButton.Enabled)
)
installButton.Enable(False)
if self.storeUpdatesDirWritable:
# Translators: The label of a button to postpone an NVDA update.
postponeButton = bHelper.addButton(self, wx.ID_CLOSE, label=_("&Postpone update"))
postponeButton.Bind(wx.EVT_BUTTON, self.onPostponeButton)
self.EscapeId = wx.ID_CLOSE
else:
self.EscapeId = wx.ID_OK
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.CentreOnScreen()
def onReviewAddonsButton(self, evt):
from gui import addonGui
incompatibleAddons = addonGui.IncompatibleAddonsDialog(
parent=self,
APIVersion=self.apiVersion,
APIBackwardsCompatToVersion=self.backCompatTo
)
incompatibleAddons.ShowModal()
def onInstallButton(self, evt):
_executeUpdate(self.destPath)
self.EndModal(wx.ID_OK)
def onPostponeButton(self, evt):
finalDest=os.path.join(storeUpdatesDir, os.path.basename(self.destPath))
try:
# #9825: behavior of os.rename(s) has changed (see https://bugs.python.org/issue28356).
# In Python 2, os.renames did rename files across drives, no longer allowed in Python 3 (error 17 (cannot move files across drives) is raised).
# This is prominent when trying to postpone an update for portable copy of NVDA if this runs from a USB flash drive or another internal storage device.
# Therefore use kernel32::MoveFileEx with copy allowed (0x2) flag set.
winKernel.moveFileEx(self.destPath, finalDest, winKernel.MOVEFILE_COPY_ALLOWED)
except:
log.debugWarning("Unable to rename the file from {} to {}".format(self.destPath, finalDest), exc_info=True)
gui.messageBox(
# Translators: The message when a downloaded update file could not be preserved.
_("Unable to postpone update."),
# Translators: The title of the message when a downloaded update file could not be preserved.
_("Error"),
wx.OK | wx.ICON_ERROR)
finalDest=self.destPath
state["pendingUpdateFile"]=finalDest
state["pendingUpdateVersion"]=self.version
state["pendingUpdateAPIVersion"]=self.apiVersion
state["pendingUpdateBackCompatToAPIVersion"]=self.backCompatTo
# Postponing an update indicates that the user is likely interested in getting a reminder.
# Therefore, clear the dontRemindVersion.
state["dontRemindVersion"] = None
saveState()
self.EndModal(wx.ID_CLOSE)
class UpdateDownloader(garbageHandler.TrackedObject):
"""Download and start installation of an updated version of NVDA, presenting appropriate user interface.
To use, call L{start} on an instance.
"""
def __init__(self, updateInfo):
"""Constructor.
@param updateInfo: update information such as possible URLs, version and the SHA-1 hash of the file as a hex string.
@type updateInfo: dict
"""
from addonAPIVersion import getAPIVersionTupleFromString
self.updateInfo = updateInfo
self.urls = updateInfo["launcherUrl"].split(" ")
self.version = updateInfo["version"]
self.apiVersion = getAPIVersionTupleFromString(updateInfo["apiVersion"])
self.backCompatToAPIVersion = getAPIVersionTupleFromString(updateInfo["apiCompatTo"])
self.versionTuple = None
self.fileHash = updateInfo.get("launcherHash")
self.destPath = tempfile.mktemp(prefix="nvda_update_", suffix=".exe")
def start(self):
"""Start the download.
"""
self._shouldCancel = False
# Use a timer because timers aren't re-entrant.
self._guiExecTimer = gui.NonReEntrantTimer(self._guiExecNotify)
gui.mainFrame.prePopup()
# Translators: The title of the dialog displayed while downloading an NVDA update.
self._progressDialog = wx.ProgressDialog(_("Downloading Update"),
# Translators: The progress message indicating that a connection is being established.
_("Connecting"),
# PD_AUTO_HIDE is required because ProgressDialog.Update blocks at 100%
# and waits for the user to press the Close button.
style=wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_AUTO_HIDE,
parent=gui.mainFrame)
self._progressDialog.Raise()
t = threading.Thread(
name=f"{self.__class__.__module__}.{self.start.__qualname__}",
target=self._bg
)
t.daemon = True
t.start()
def _guiExec(self, func, *args):
self._guiExecFunc = func
self._guiExecArgs = args
if not self._guiExecTimer.IsRunning():
# #6127: Timers must be manipulated from the main thread.
wx.CallAfter(self._guiExecTimer.Start, 50, True)
def _guiExecNotify(self):
self._guiExecFunc(*self._guiExecArgs)
def _bg(self):
success=False
for url in self.urls:
try:
self._download(url)
except:
log.error("Error downloading %s" % url, exc_info=True)
else: #Successfully downloaded or canceled
if not self._shouldCancel:
success=True
break
else:
# None of the URLs succeeded.
self._guiExec(self._error)
return
if not success:
try:
os.remove(self.destPath)
except OSError:
pass
return
self._guiExec(self._downloadSuccess)
def _download(self, url):
# #2352: Some security scanners such as Eset NOD32 HTTP Scanner
# cause huge read delays while downloading.
# Therefore, set a higher timeout.
remote = urllib.request.urlopen(url, timeout=120)
if remote.code != 200:
raise RuntimeError("Download failed with code %d" % remote.code)
size = int(remote.headers["content-length"])
with open(self.destPath, "wb") as local:
if self.fileHash:
hasher = hashlib.sha1()
self._guiExec(self._downloadReport, 0, size)
read = 0
chunk=DOWNLOAD_BLOCK_SIZE
while True:
if self._shouldCancel:
return
if size -read <chunk:
chunk =size -read
block = remote.read(chunk)
if not block:
break
read += len(block)
if self._shouldCancel:
return
local.write(block)
if self.fileHash:
hasher.update(block)
self._guiExec(self._downloadReport, read, size)
if read < size:
raise RuntimeError("Content too short")
if self.fileHash and hasher.hexdigest() != self.fileHash:
raise RuntimeError("Content has incorrect file hash")
self._guiExec(self._downloadReport, read, size)
def _downloadReport(self, read, size):
if self._shouldCancel:
return
percent = int(float(read) / size * 100)
# Translators: The progress message indicating that a download is in progress.
cont, skip = self._progressDialog.Update(percent, _("Downloading"))
if not cont:
self._shouldCancel = True
self._stopped()
def _stopped(self):
self._guiExecTimer = None
self._guiExecFunc = None
self._guiExecArgs = None
self._progressDialog.Hide()
self._progressDialog.Destroy()
self._progressDialog = None
# Not sure why, but this doesn't work if we call it directly here.
wx.CallLater(50, gui.mainFrame.postPopup)
def _error(self):
self._stopped()
gui.messageBox(
# Translators: A message indicating that an error occurred while downloading an update to NVDA.
_("Error downloading update."),
_("Error"),
wx.OK | wx.ICON_ERROR)
def _downloadSuccess(self):
self._stopped()
gui.runScriptModalDialog(UpdateAskInstallDialog(
parent=gui.mainFrame,
destPath=self.destPath,
version=self.version,
apiVersion=self.apiVersion,
backCompatTo=self.backCompatToAPIVersion
))
class DonateRequestDialog(wx.Dialog):
MESSAGE = _(
# Translators: The message requesting donations from users.
"We need your help in order to continue to improve NVDA.\n"
"This project relies primarily on donations and grants. By donating, you are helping to fund full time development.\n"
"If even $10 is donated for every download, we will be able to cover all of the ongoing costs of the project.\n"
"All donations are received by NV Access, the non-profit organisation which develops NVDA.\n"
"Thank you for your support."
)
def __init__(self, parent, continueFunc):
# Translators: The title of the dialog requesting donations from users.
super(DonateRequestDialog, self).__init__(parent, title=_("Please Donate"))
self._continue = continueFunc
mainSizer=wx.BoxSizer(wx.VERTICAL)
item = wx.StaticText(self, label=self.MESSAGE)
mainSizer.Add(item, border=20, flag=wx.LEFT | wx.RIGHT | wx.TOP)
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of the button to donate
# in the "Please Donate" dialog.
item = self.donateButton = wx.Button(self, label=_("&Donate"))
item.Bind(wx.EVT_BUTTON, self.onDonate)
sizer.Add(item)
# Translators: The label of the button to decline donation
# in the "Please Donate" dialog.
item = wx.Button(self, wx.ID_CLOSE, label=_("&Not now"))
item.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
sizer.Add(item)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.EscapeId = wx.ID_CLOSE
mainSizer.Add(sizer, flag=wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, border=20)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.CentreOnScreen()
self.Show()
def onDonate(self, evt):
os.startfile(gui.DONATE_URL)
# Translators: The label of a button to indicate that the user is finished donating
# in the "Please Donate" dialog.
self.donateButton.Label = _("&Done")
self.donateButton.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
def onClose(self, evt):
self.Hide()
self._continue()
self.Destroy()
def saveState():
try:
# #9038: Python 3 requires binary format when working with pickles.
with open(_stateFilename, "wb") as f:
pickle.dump(state, f, protocol=0)
except:
log.debugWarning("Error saving state", exc_info=True)
def initialize():
global state, _stateFilename, autoChecker
_stateFilename = os.path.join(globalVars.appArgs.configPath, "updateCheckState.pickle")
try:
# #9038: Python 3 requires binary format when working with pickles.
with open(_stateFilename, "rb") as f:
state = pickle.load(f)
except:
log.debugWarning("Couldn't retrieve update state", exc_info=True)
# Defaults.
state = {
"lastCheck": 0,
"dontRemindVersion": None,
}
_setStateToNone(state)
# check the pending version against the current version
# and make sure that pendingUpdateFile and pendingUpdateVersion are part of the state dictionary.
if "pendingUpdateVersion" not in state or state["pendingUpdateVersion"] == versionInfo.version:
_setStateToNone(state)
# remove all update files except the one that is currently pending (if any)
try:
for fileName in os.listdir(storeUpdatesDir):
f=os.path.join(storeUpdatesDir, fileName)
if f != state["pendingUpdateFile"]:
os.remove(f)
log.debug("Update file %s removed"%f)
except OSError:
log.warning("Unable to remove old update file %s"%f, exc_info=True)
if not globalVars.appArgs.launcher and (config.conf["update"]["autoCheck"] or (config.conf["update"]["startupNotification"] and isPendingUpdate())):
autoChecker = AutoUpdateChecker()
def terminate():
global state, autoChecker
state = None
if autoChecker:
autoChecker.terminate()
autoChecker = None
# These structs are only complete enough to achieve what we need.
class CERT_USAGE_MATCH(ctypes.Structure):
_fields_ = (
("dwType", ctypes.wintypes.DWORD),
# CERT_ENHKEY_USAGE struct
("cUsageIdentifier", ctypes.wintypes.DWORD),
("rgpszUsageIdentifier", ctypes.c_void_p), # LPSTR *
)
class CERT_CHAIN_PARA(ctypes.Structure):
_fields_ = (
("cbSize", ctypes.wintypes.DWORD),
("RequestedUsage", CERT_USAGE_MATCH),
("RequestedIssuancePolicy", CERT_USAGE_MATCH),
("dwUrlRetrievalTimeout", ctypes.wintypes.DWORD),
("fCheckRevocationFreshnessTime", ctypes.wintypes.BOOL),
("dwRevocationFreshnessTime", ctypes.wintypes.DWORD),
("pftCacheResync", ctypes.c_void_p), # LPFILETIME
("pStrongSignPara", ctypes.c_void_p), # PCCERT_STRONG_SIGN_PARA
("dwStrongSignFlags", ctypes.wintypes.DWORD),
)
def _updateWindowsRootCertificates():
crypt = ctypes.windll.crypt32
# Get the server certificate.
sslCont = ssl._create_unverified_context()
# We must specify versionType so the server doesn't return a 404 error and
# thus cause an exception.
u = urllib.request.urlopen(CHECK_URL + "?versionType=stable", context=sslCont)
cert = u.fp.raw._sock.getpeercert(True)
u.close()
# Convert to a form usable by Windows.
certCont = crypt.CertCreateCertificateContext(
0x00000001, # X509_ASN_ENCODING
cert,
len(cert))
# Ask Windows to build a certificate chain, thus triggering a root certificate update.
chainCont = ctypes.c_void_p()
crypt.CertGetCertificateChain(None, certCont, None, None,
ctypes.byref(CERT_CHAIN_PARA(cbSize=ctypes.sizeof(CERT_CHAIN_PARA),
RequestedUsage=CERT_USAGE_MATCH())),
0, None,
ctypes.byref(chainCont))
crypt.CertFreeCertificateChain(chainCont)
crypt.CertFreeCertificateContext(certCont)
| 1 | 31,492 | Another redundant import | nvaccess-nvda | py |
@@ -120,7 +120,7 @@ func (c *client) FetchUpdates(ctx context.Context, req *node.FetchX509SVIDReques
// Close the stream whether there was an error or not
stream.CloseSend()
if err != nil {
- // TODO: should we try to create a new stream?
+ c.Release()
return nil, err
}
| 1 | package client
import (
"context"
"crypto/ecdsa"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"net/url"
"sync"
"time"
"github.com/sirupsen/logrus"
spiffe_tls "github.com/spiffe/go-spiffe/tls"
"github.com/spiffe/spire/pkg/common/grpcutil"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/proto/api/node"
"github.com/spiffe/spire/proto/common"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
var (
ErrUnableToGetStream = errors.New("unable to get a stream")
)
type JWTSVID struct {
Token string
IssuedAt time.Time
ExpiresAt time.Time
}
type Client interface {
FetchUpdates(ctx context.Context, req *node.FetchX509SVIDRequest) (*Update, error)
FetchJWTSVID(ctx context.Context, jsr *node.JSR) (*JWTSVID, error)
// Release releases any resources that were held by this Client, if any.
Release()
}
// Config holds a client configuration
type Config struct {
Addr string
Log logrus.FieldLogger
TrustDomain url.URL
// KeysAndBundle is a callback that must return the keys and bundle used by the client
// to connect via mTLS to Addr.
KeysAndBundle func() ([]*x509.Certificate, *ecdsa.PrivateKey, []*x509.Certificate)
}
type client struct {
c *Config
conn *grpc.ClientConn
m sync.Mutex
// Callback to be used for testing purposes.
newNodeClientCallback func() (node.NodeClient, error)
}
// New creates a new client struct with the configuration provided
func New(c *Config) *client {
return &client{
c: c,
}
}
func (c *client) credsFunc() (credentials.TransportCredentials, error) {
var tlsCerts []tls.Certificate
var tlsConfig *tls.Config
svid, key, bundle := c.c.KeysAndBundle()
spiffePeer := &spiffe_tls.TLSPeer{
SpiffeIDs: []string{idutil.ServerID(c.c.TrustDomain.Host)},
TrustRoots: util.NewCertPool(bundle...),
}
tlsCert := tls.Certificate{PrivateKey: key}
for _, cert := range svid {
tlsCert.Certificate = append(tlsCert.Certificate, cert.Raw)
}
tlsCerts = append(tlsCerts, tlsCert)
tlsConfig = spiffePeer.NewTLSConfig(tlsCerts)
return credentials.NewTLS(tlsConfig), nil
}
func (c *client) dial(ctx context.Context) (*grpc.ClientConn, error) {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second) // TODO: Make this timeout configurable?
defer cancel()
config := grpcutil.GRPCDialerConfig{
Log: grpcutil.LoggerFromFieldLogger(c.c.Log),
CredFunc: c.credsFunc,
}
dialer := grpcutil.NewGRPCDialer(config)
conn, err := dialer.Dial(ctx, c.c.Addr)
if err != nil {
return nil, fmt.Errorf("cannot create connection: %v", err)
}
return conn, nil
}
func (c *client) FetchUpdates(ctx context.Context, req *node.FetchX509SVIDRequest) (*Update, error) {
nodeClient, err := c.newNodeClient(ctx)
if err != nil {
return nil, err
}
stream, err := nodeClient.FetchX509SVID(ctx)
// We weren't able to get a stream...close the client and return the error.
if err != nil {
c.Release()
c.c.Log.Errorf("Failure fetching X509 SVID. %v: %v", ErrUnableToGetStream, err)
return nil, ErrUnableToGetStream
}
// Send the request to the server using the stream.
err = stream.Send(req)
// Close the stream whether there was an error or not
stream.CloseSend()
if err != nil {
// TODO: should we try to create a new stream?
return nil, err
}
regEntries := map[string]*common.RegistrationEntry{}
svids := map[string]*node.X509SVID{}
bundles := map[string]*common.Bundle{}
// Read all the server responses from the stream.
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// There was an error receiving a response, exit loop to return what we have.
logrus.Errorf("failed to consume entire SVID update stream: %v", err)
return nil, err
}
if resp.SvidUpdate == nil {
logrus.Warn("empty update in SVID update stream")
continue
}
for _, re := range resp.SvidUpdate.RegistrationEntries {
regEntries[re.EntryId] = re
}
for spiffeid, svid := range resp.SvidUpdate.Svids {
svids[spiffeid] = svid
}
for spiffeid, bundle := range resp.SvidUpdate.Bundles {
bundles[spiffeid] = bundle
}
}
return &Update{
Entries: regEntries,
SVIDs: svids,
Bundles: bundles,
}, nil
}
func (c *client) FetchJWTSVID(ctx context.Context, jsr *node.JSR) (*JWTSVID, error) {
nodeClient, err := c.newNodeClient(ctx)
if err != nil {
return nil, err
}
response, err := nodeClient.FetchJWTSVID(ctx, &node.FetchJWTSVIDRequest{
Jsr: jsr,
})
// We weren't able to make the request...close the client and return the error.
if err != nil {
c.Release()
c.c.Log.Errorf("Failure fetching JWT SVID. %v: %v", ErrUnableToGetStream, err)
return nil, ErrUnableToGetStream
}
svid := response.GetSvid()
if svid == nil {
return nil, errors.New("JWTSVID response missing SVID")
}
if svid.IssuedAt == 0 {
return nil, errors.New("JWTSVID missing issued at")
}
if svid.ExpiresAt == 0 {
return nil, errors.New("JWTSVID missing expires at")
}
if svid.IssuedAt > svid.ExpiresAt {
return nil, errors.New("JWTSVID issued after it has expired")
}
return &JWTSVID{
Token: svid.Token,
IssuedAt: time.Unix(svid.IssuedAt, 0).UTC(),
ExpiresAt: time.Unix(svid.ExpiresAt, 0).UTC(),
}, nil
}
func (c *client) Release() {
c.m.Lock()
defer c.m.Unlock()
if c.conn != nil {
c.conn.Close()
c.conn = nil
}
}
func (c *client) newNodeClient(ctx context.Context) (node.NodeClient, error) {
if c.newNodeClientCallback != nil {
return c.newNodeClientCallback()
}
c.m.Lock()
defer c.m.Unlock()
if c.conn == nil {
conn, err := c.dial(ctx)
if err != nil {
return nil, err
}
c.conn = conn
}
return node.NewNodeClient(c.conn), nil
}
| 1 | 10,745 | What kind of errors might cause us to reach this point? Could any of them be recoverable i.e. might work if we asked the same server again? Similar question for below, but perhaps with a slightly different answer | spiffe-spire | go |
@@ -212,14 +212,14 @@ public:
class triangle_counting_test {
public:
- using my_graph_type = dal::preview::undirected_adjacency_vector_graph<>;
+ using graph_type = dal::preview::undirected_adjacency_vector_graph<>;
template <typename GraphType>
auto create_graph() {
GraphType graph_data;
- my_graph_type my_graph;
+ graph_type graph;
- auto &graph_impl = oneapi::dal::detail::get_impl(my_graph);
+ auto &graph_impl = oneapi::dal::detail::get_impl(graph);
auto &vertex_allocator = graph_impl._vertex_allocator;
auto &edge_allocator = graph_impl._edge_allocator;
| 1 | /*******************************************************************************
* Copyright 2020-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <array>
#include "oneapi/dal/algo/triangle_counting/vertex_ranking.hpp"
#include "oneapi/dal/test/engine/common.hpp"
namespace oneapi::dal::algo::triangle_counting::test {
class graph_base_data {
public:
graph_base_data() = default;
std::int64_t get_vertex_count() const {
return vertex_count;
}
std::int64_t get_edge_count() const {
return edge_count;
}
std::int64_t get_global_triangle_count() const {
return global_triangle_count;
}
std::int64_t get_cols_count() const {
return cols_count;
}
std::int64_t get_rows_count() const {
return rows_count;
}
protected:
std::int64_t vertex_count;
std::int64_t edge_count;
std::int64_t cols_count;
std::int64_t rows_count;
std::int64_t global_triangle_count;
};
class complete_graph_5_type : public graph_base_data {
public:
complete_graph_5_type() {
vertex_count = 5;
edge_count = 10;
cols_count = edge_count * 2;
rows_count = vertex_count + 1;
global_triangle_count = 10;
}
std::array<std::int32_t, 5> degrees = { 4, 4, 4, 4, 4 };
std::array<std::int32_t, 20> cols = {
1, 2, 3, 4, 0, 2, 3, 4, 0, 1, 3, 4, 0, 1, 2, 4, 0, 1, 2, 3
};
std::array<std::int64_t, 6> rows = { 0, 4, 8, 12, 16, 20 };
std::array<std::int64_t, 5> local_triangles = { 6, 6, 6, 6, 6 };
};
class complete_graph_9_type : public graph_base_data {
public:
complete_graph_9_type() {
vertex_count = 9;
edge_count = 36;
cols_count = edge_count * 2;
rows_count = vertex_count + 1;
global_triangle_count = 84;
}
std::array<std::int32_t, 9> degrees = { 8, 8, 8, 8, 8, 8, 8, 8, 8 };
std::array<std::int32_t, 72> cols = { 1, 2, 3, 4, 5, 6, 7, 8, 0, 2, 3, 4, 5, 6, 7, 8, 0, 1,
3, 4, 5, 6, 7, 8, 0, 1, 2, 4, 5, 6, 7, 8, 0, 1, 2, 3,
5, 6, 7, 8, 0, 1, 2, 3, 4, 6, 7, 8, 0, 1, 2, 3, 4, 5,
7, 8, 0, 1, 2, 3, 4, 5, 6, 8, 0, 1, 2, 3, 4, 5, 6, 7 };
std::array<std::int64_t, 10> rows = { 0, 8, 16, 24, 32, 40, 48, 56, 64, 72 };
std::array<std::int64_t, 9> local_triangles = { 28, 28, 28, 28, 28, 28, 28, 28, 28 };
};
class acyclic_graph_8_type : public graph_base_data {
public:
acyclic_graph_8_type() {
vertex_count = 8;
edge_count = 7;
cols_count = edge_count * 2;
rows_count = vertex_count + 1;
global_triangle_count = 0;
}
std::array<std::int32_t, 8> degrees = { 3, 1, 3, 3, 1, 1, 1, 1 };
std::array<std::int32_t, 14> cols = { 1, 2, 4, 0, 0, 3, 6, 2, 5, 7, 0, 3, 2, 3 };
std::array<std::int64_t, 9> rows = { 0, 3, 4, 7, 10, 11, 12, 13, 14 };
std::array<std::int64_t, 8> local_triangles = { 0, 0, 0, 0, 0, 0, 0, 0 };
};
class two_vertices_graph_type : public graph_base_data {
public:
two_vertices_graph_type() {
vertex_count = 2;
edge_count = 1;
cols_count = edge_count * 2;
rows_count = vertex_count + 1;
global_triangle_count = 0;
}
std::array<std::int32_t, 2> degrees = { 1, 1 };
std::array<std::int32_t, 2> cols = { 1, 0 };
std::array<std::int64_t, 3> rows = { 0, 1, 2 };
std::array<std::int64_t, 2> local_triangles = { 0, 0 };
};
class cycle_graph_9_type : public graph_base_data {
public:
cycle_graph_9_type() {
vertex_count = 9;
edge_count = 9;
cols_count = edge_count * 2;
rows_count = vertex_count + 1;
global_triangle_count = 0;
}
std::array<std::int32_t, 9> degrees = { 2, 2, 2, 2, 2, 2, 2, 2, 2 };
std::array<std::int32_t, 18> cols = { 1, 8, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 0, 7 };
std::array<std::int64_t, 10> rows = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 };
std::array<std::int64_t, 9> local_triangles = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
};
class triangle_graph_type : public graph_base_data {
public:
triangle_graph_type() {
vertex_count = 3;
edge_count = 3;
cols_count = edge_count * 2;
rows_count = vertex_count + 1;
global_triangle_count = 1;
}
std::array<std::int32_t, 3> degrees = { 2, 2, 2 };
std::array<std::int32_t, 6> cols = { 1, 2, 0, 2, 0, 1 };
std::array<std::int64_t, 4> rows = { 0, 2, 4, 6 };
std::array<std::int64_t, 3> local_triangles = { 1, 1, 1 };
};
class wheel_graph_6_type : public graph_base_data {
public:
wheel_graph_6_type() {
vertex_count = 6;
edge_count = 10;
cols_count = edge_count * 2;
rows_count = vertex_count + 1;
global_triangle_count = 5;
}
std::array<std::int32_t, 6> degrees = { 5, 3, 3, 3, 3, 3 };
std::array<std::int32_t, 20> cols = {
1, 2, 3, 4, 5, 0, 2, 5, 0, 1, 3, 0, 2, 4, 0, 3, 5, 0, 1, 4
};
std::array<std::int64_t, 7> rows = { 0, 5, 8, 11, 14, 17, 20 };
std::array<std::int64_t, 6> local_triangles = { 5, 2, 2, 2, 2, 2 };
};
class graph_with_isolated_vertices_10_type : public graph_base_data {
public:
graph_with_isolated_vertices_10_type() {
vertex_count = 10;
edge_count = 11;
cols_count = edge_count * 2;
rows_count = vertex_count + 1;
global_triangle_count = 5;
}
std::array<std::int32_t, 10> degrees = { 5, 3, 2, 0, 3, 4, 0, 2, 0, 3 };
std::array<std::int32_t, 22> cols = { 1, 2, 4, 5, 7, 0, 5, 9, 0, 7, 0,
5, 9, 0, 1, 4, 9, 0, 2, 1, 4, 5 };
std::array<std::int64_t, 11> rows = { 0, 5, 8, 10, 10, 13, 17, 17, 19, 19, 22 };
std::array<std::int64_t, 10> local_triangles = { 3, 2, 1, 0, 2, 4, 0, 1, 0, 2 };
};
class graph_with_isolated_vertex_11_type : public graph_base_data {
public:
graph_with_isolated_vertex_11_type() {
vertex_count = 11;
edge_count = 45;
cols_count = edge_count * 2;
rows_count = vertex_count + 1;
global_triangle_count = 120;
}
std::array<std::int32_t, 11> degrees = { 9, 9, 9, 9, 9, 0, 9, 9, 9, 9, 9 };
std::array<std::int32_t, 90> cols = { 1, 2, 3, 4, 6, 7, 8, 9, 10, 0, 2, 3, 4, 6, 7, 8, 9, 10,
0, 1, 3, 4, 6, 7, 8, 9, 10, 0, 1, 2, 4, 6, 7, 8, 9, 10,
0, 1, 2, 3, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 7, 8, 9, 10,
0, 1, 2, 3, 4, 6, 8, 9, 10, 0, 1, 2, 3, 4, 6, 7, 9, 10,
0, 1, 2, 3, 4, 6, 7, 8, 10, 0, 1, 2, 3, 4, 6, 7, 8, 9 };
std::array<std::int64_t, 12> rows = { 0, 9, 18, 27, 36, 45, 45, 54, 63, 72, 81, 90 };
std::array<std::int64_t, 11> local_triangles = { 36, 36, 36, 36, 36, 0, 36, 36, 36, 36, 36 };
};
class triangle_counting_test {
public:
using my_graph_type = dal::preview::undirected_adjacency_vector_graph<>;
template <typename GraphType>
auto create_graph() {
GraphType graph_data;
my_graph_type my_graph;
auto &graph_impl = oneapi::dal::detail::get_impl(my_graph);
auto &vertex_allocator = graph_impl._vertex_allocator;
auto &edge_allocator = graph_impl._edge_allocator;
const std::int64_t vertex_count = graph_data.get_vertex_count();
const std::int64_t edge_count = graph_data.get_edge_count();
const std::int64_t cols_count = graph_data.get_cols_count();
const std::int64_t rows_count = graph_data.get_rows_count();
std::int32_t *degrees =
oneapi::dal::preview::detail::allocate(vertex_allocator, vertex_count);
std::int32_t *cols = oneapi::dal::preview::detail::allocate(vertex_allocator, cols_count);
std::int64_t *rows = oneapi::dal::preview::detail::allocate(edge_allocator, rows_count);
std::int32_t *rows_vertex =
oneapi::dal::preview::detail::allocate(vertex_allocator, rows_count);
for (int i = 0; i < vertex_count; i++) {
degrees[i] = graph_data.degrees[i];
}
for (int i = 0; i < cols_count; i++) {
cols[i] = graph_data.cols[i];
}
for (int i = 0; i < rows_count; i++) {
rows[i] = graph_data.rows[i];
rows_vertex[i] = graph_data.rows[i];
}
graph_impl.set_topology(vertex_count, edge_count, rows, cols, degrees);
graph_impl.get_topology()._rows_vertex =
oneapi::dal::preview::detail::container<std::int32_t>::wrap(rows_vertex, rows_count);
return my_graph;
}
template <typename GraphType>
void check_local_task() {
GraphType graph_data;
const auto graph = create_graph<GraphType>();
std::int64_t vertex_count = graph_data.get_vertex_count();
std::allocator<char> alloc;
const auto tc_desc = dal::preview::triangle_counting::descriptor<
float,
dal::preview::triangle_counting::method::ordered_count,
dal::preview::triangle_counting::task::local,
std::allocator<char>>(alloc);
const auto result_vertex_ranking = dal::preview::vertex_ranking(tc_desc, graph);
auto local_triangles_table = result_vertex_ranking.get_ranks();
const auto &local_triangles =
static_cast<const dal::homogen_table &>(local_triangles_table);
const auto local_triangles_data = local_triangles.get_data<std::int64_t>();
REQUIRE(local_triangles_table.get_row_count() == vertex_count);
int correct_local_triangle_count = 0;
for (std::int64_t i = 0; i < vertex_count; i++) {
if (local_triangles_data[i] == graph_data.local_triangles[i]) {
correct_local_triangle_count++;
}
}
REQUIRE(correct_local_triangle_count == vertex_count);
}
template <typename GraphType>
void check_local_and_global_task() {
GraphType graph_data;
const auto graph = create_graph<GraphType>();
std::int64_t vertex_count = graph_data.get_vertex_count();
std::int64_t global_triangle_count = graph_data.get_global_triangle_count();
std::allocator<char> alloc;
const auto tc_desc = dal::preview::triangle_counting::descriptor<
float,
dal::preview::triangle_counting::method::ordered_count,
dal::preview::triangle_counting::task::local_and_global,
std::allocator<char>>(alloc);
const auto result_vertex_ranking = dal::preview::vertex_ranking(tc_desc, graph);
auto local_triangles_table = result_vertex_ranking.get_ranks();
const auto &local_triangles =
static_cast<const dal::homogen_table &>(local_triangles_table);
const auto local_triangles_data = local_triangles.get_data<std::int64_t>();
REQUIRE(result_vertex_ranking.get_global_rank() == global_triangle_count);
REQUIRE(local_triangles_table.get_row_count() == vertex_count);
int correct_local_triangle_count = 0;
for (std::int64_t i = 0; i < vertex_count; i++) {
if (local_triangles_data[i] == graph_data.local_triangles[i]) {
correct_local_triangle_count++;
}
}
REQUIRE(correct_local_triangle_count == vertex_count);
}
template <typename GraphType>
void check_global_task_relabeled() {
GraphType graph_data;
const auto graph = create_graph<GraphType>();
std::int64_t global_triangle_count = graph_data.get_global_triangle_count();
std::allocator<char> alloc;
auto tc_desc = dal::preview::triangle_counting::descriptor<
float,
dal::preview::triangle_counting::method::ordered_count,
dal::preview::triangle_counting::task::global,
std::allocator<char>>(alloc)
.set_relabel(dal::preview::triangle_counting::relabel::yes);
const auto result_vertex_ranking = dal::preview::vertex_ranking(tc_desc, graph);
REQUIRE(result_vertex_ranking.get_global_rank() == global_triangle_count);
}
template <typename GraphType>
void check_global_task_not_relabeled() {
GraphType graph_data;
const auto graph = create_graph<GraphType>();
std::int64_t global_triangle_count = graph_data.get_global_triangle_count();
std::allocator<char> alloc;
auto tc_desc = dal::preview::triangle_counting::descriptor<
float,
dal::preview::triangle_counting::method::ordered_count,
dal::preview::triangle_counting::task::global,
std::allocator<char>>(alloc)
.set_relabel(dal::preview::triangle_counting::relabel::no);
const auto result_vertex_ranking = dal::preview::vertex_ranking(tc_desc, graph);
REQUIRE(result_vertex_ranking.get_global_rank() == global_triangle_count);
}
};
TEST_M(triangle_counting_test, "local task for graphs with average_degree < 4") {
this->check_local_task<complete_graph_5_type>();
this->check_local_task<acyclic_graph_8_type>();
this->check_local_task<two_vertices_graph_type>();
this->check_local_task<cycle_graph_9_type>();
this->check_local_task<triangle_graph_type>();
this->check_local_task<wheel_graph_6_type>();
this->check_local_task<graph_with_isolated_vertices_10_type>();
}
TEST_M(triangle_counting_test, "local task for graphs with average_degree >= 4") {
this->check_local_task<complete_graph_9_type>();
this->check_local_task<graph_with_isolated_vertex_11_type>();
}
TEST_M(triangle_counting_test, "local_and_global task for graphs with average_degree < 4") {
this->check_local_and_global_task<complete_graph_5_type>();
this->check_local_and_global_task<acyclic_graph_8_type>();
this->check_local_and_global_task<two_vertices_graph_type>();
this->check_local_and_global_task<cycle_graph_9_type>();
this->check_local_and_global_task<triangle_graph_type>();
this->check_local_and_global_task<wheel_graph_6_type>();
this->check_local_and_global_task<graph_with_isolated_vertices_10_type>();
}
TEST_M(triangle_counting_test, "local_and_global task for graphs with average_degree >= 4") {
this->check_local_and_global_task<complete_graph_9_type>();
this->check_local_and_global_task<graph_with_isolated_vertex_11_type>();
}
TEST_M(triangle_counting_test, "global task for graphs with average_degree < 4") {
this->check_global_task_relabeled<complete_graph_5_type>();
this->check_global_task_relabeled<acyclic_graph_8_type>();
this->check_global_task_relabeled<two_vertices_graph_type>();
this->check_global_task_relabeled<cycle_graph_9_type>();
this->check_global_task_relabeled<triangle_graph_type>();
this->check_global_task_relabeled<wheel_graph_6_type>();
this->check_global_task_relabeled<graph_with_isolated_vertices_10_type>();
}
TEST_M(triangle_counting_test, "global task for relabeled graph with average_degree >= 4") {
this->check_global_task_relabeled<complete_graph_9_type>();
this->check_global_task_relabeled<graph_with_isolated_vertex_11_type>();
}
TEST_M(triangle_counting_test, "global task for not relabeled graph with average_degree >= 4") {
this->check_global_task_not_relabeled<complete_graph_9_type>();
this->check_global_task_not_relabeled<graph_with_isolated_vertex_11_type>();
}
} // namespace oneapi::dal::algo::triangle_counting::test
| 1 | 30,250 | What is the difference between GraphType and graph_type. Naming should be more accurate. | oneapi-src-oneDAL | cpp |
@@ -9,7 +9,7 @@ use Shopsys\FrameworkBundle\Model\Product\Product;
class UnitRepository
{
/**
- * @var \Doctrine\ORM\EntityRepository
+ * @var \Doctrine\ORM\EntityManagerInterface
*/
protected $em;
| 1 | <?php
namespace Shopsys\FrameworkBundle\Model\Product\Unit;
use Doctrine\ORM\AbstractQuery;
use Doctrine\ORM\EntityManagerInterface;
use Shopsys\FrameworkBundle\Model\Product\Product;
class UnitRepository
{
/**
* @var \Doctrine\ORM\EntityRepository
*/
protected $em;
/**
* @param \Doctrine\ORM\EntityManagerInterface $entityManager
*/
public function __construct(EntityManagerInterface $entityManager)
{
$this->em = $entityManager;
}
/**
* @return \Doctrine\ORM\EntityRepository
*/
protected function getUnitRepository()
{
return $this->em->getRepository(Unit::class);
}
/**
* @param int $unitId
* @return \Shopsys\FrameworkBundle\Model\Product\Unit\Unit|null
*/
public function findById($unitId)
{
return $this->getUnitRepository()->find($unitId);
}
/**
* @param int $unitId
* @return \Shopsys\FrameworkBundle\Model\Product\Unit\Unit
*/
public function getById($unitId)
{
$unit = $this->findById($unitId);
if ($unit === null) {
throw new \Shopsys\FrameworkBundle\Model\Product\Unit\Exception\UnitNotFoundException('Unit with ID ' . $unitId . ' not found.');
}
return $unit;
}
/**
* @return \Doctrine\ORM\QueryBuilder
*/
protected function getAllQueryBuilder()
{
return $this->em->createQueryBuilder()
->select('u')
->from(Unit::class, 'u')
->orderBy('u.id');
}
/**
* @return \Shopsys\FrameworkBundle\Model\Product\Unit\Unit[]
*/
public function getAll()
{
return $this->getAllQueryBuilder()->getQuery()->execute();
}
/**
* @param int $unitId
* @return \Shopsys\FrameworkBundle\Model\Product\Unit\Unit[]
*/
public function getAllExceptId($unitId)
{
return $this->getAllQueryBuilder()
->where('u.id != :id')->setParameter('id', $unitId)
->getQuery()->execute();
}
/**
* @param \Shopsys\FrameworkBundle\Model\Product\Unit\Unit $unit
* @return bool
*/
public function existsProductWithUnit(Unit $unit)
{
$qb = $this->em->createQueryBuilder()
->select('COUNT(p)')
->from(Product::class, 'p')
->where('p.unit = :unit')->setParameter('unit', $unit);
return $qb->getQuery()->getOneOrNullResult(AbstractQuery::HYDRATE_SINGLE_SCALAR) > 0;
}
/**
* @param \Shopsys\FrameworkBundle\Model\Product\Unit\Unit $oldUnit
* @param \Shopsys\FrameworkBundle\Model\Product\Unit\Unit $newUnit
*/
public function replaceUnit(Unit $oldUnit, Unit $newUnit)
{
$this->em->createQueryBuilder()
->update(Product::class, 'p')
->set('p.unit', ':newUnit')->setParameter('newUnit', $newUnit)
->where('p.unit = :oldUnit')->setParameter('oldUnit', $oldUnit)
->getQuery()->execute();
}
}
| 1 | 16,285 | hmmm in docblock of contructor it is EntityManagerInterface....maybe it should be that way.... :confused: | shopsys-shopsys | php |
@@ -54,8 +54,13 @@ struct feat_wrapper {
.def("GetPos",
(RDGeom::Point3D(MolChemicalFeature::*)(int) const) &
MolChemicalFeature::getPos,
- (python::arg("self"), python::arg("confId") = -1),
+ (python::arg("self"), python::arg("confId")),
"Get the location of the chemical feature")
+ .def("GetPosDefault",
+ (RDGeom::Point3D(MolChemicalFeature::*)() const) &
+ MolChemicalFeature::getPos,
+ python::arg("self"),
+ "Get the location of the default chemical feature (first position)")
.def("GetAtomIds", getFeatAtomIds,
"Get the IDs of the atoms that participate in the feature")
.def("GetMol", &MolChemicalFeature::getMol, | 1 | // $Id$
//
// Copyright (C) 2004-2006 Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#define NO_IMPORT_ARRAY
#include <RDBoost/python.h>
#include <RDGeneral/types.h>
#include <RDBoost/pyint_api.h>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/MolChemicalFeatures/MolChemicalFeature.h>
#include <GraphMol/MolChemicalFeatures/MolChemicalFeatureFactory.h>
namespace python = boost::python;
namespace RDKit {
PyObject *getFeatAtomIds(const MolChemicalFeature &feat) {
const MolChemicalFeature::AtomPtrContainer &atoms = feat.getAtoms();
PyObject *res = PyTuple_New(atoms.size());
MolChemicalFeature::AtomPtrContainer_CI aci;
int idx = 0;
for (aci = atoms.begin(); aci != atoms.end(); ++aci) {
PyTuple_SetItem(res, idx, PyInt_FromLong((*aci)->getIdx()));
idx++;
}
return res;
}
std::string featClassDoc =
"Class to represent a chemical feature.\n\
These chemical features may or may not have been derived from molecule object;\n\
i.e. it is possible to have a chemical feature that was created just from its type\n\
and location.\n";
struct feat_wrapper {
static void wrap() {
python::class_<MolChemicalFeature, FeatSPtr>(
"MolChemicalFeature", featClassDoc.c_str(), python::no_init)
.def("GetId", &MolChemicalFeature::getId,
"Returns the identifier of the feature\n")
.def("GetFamily", &MolChemicalFeature::getFamily,
"Get the family to which the feature belongs; donor, acceptor, "
"etc.",
python::return_value_policy<python::copy_const_reference>())
.def("GetType", &MolChemicalFeature::getType,
"Get the specific type for the feature",
python::return_value_policy<python::copy_const_reference>())
.def("GetPos",
(RDGeom::Point3D(MolChemicalFeature::*)(int) const) &
MolChemicalFeature::getPos,
(python::arg("self"), python::arg("confId") = -1),
"Get the location of the chemical feature")
.def("GetAtomIds", getFeatAtomIds,
"Get the IDs of the atoms that participate in the feature")
.def("GetMol", &MolChemicalFeature::getMol,
"Get the molecule used to derive the features",
python::return_value_policy<python::reference_existing_object>())
.def("GetFactory", &MolChemicalFeature::getFactory,
"Get the factory used to generate this feature",
python::return_value_policy<python::reference_existing_object>())
.def("ClearCache", &MolChemicalFeature::clearCache,
"Clears the cache used to store position information.")
.def("SetActiveConformer", &MolChemicalFeature::setActiveConformer,
"Sets the conformer to use (must be associated with a molecule).")
.def("GetActiveConformer", &MolChemicalFeature::getActiveConformer,
"Gets the conformer to use.");
};
};
} // namespace RDKit
void wrap_MolChemicalFeat() { RDKit::feat_wrapper::wrap(); }
| 1 | 22,271 | I'm curious why you went the route of adding a new method name for this. Given that you it would be possible to just have an an overload of `GetPos()`, and that this change breaks existing code, I wonder why you chose to go this way. | rdkit-rdkit | cpp |
@@ -33,12 +33,14 @@ export default Component.extend({
let subscriptions = this.member.get('stripe');
if (subscriptions && subscriptions.length > 0) {
return subscriptions.map((subscription) => {
+ const statusLabel = subscription.status === 'past_due' ? 'Past due' : subscription.status;
return {
id: subscription.id,
customer: subscription.customer,
name: subscription.name || '',
email: subscription.email || '',
status: subscription.status,
+ statusLabel: statusLabel,
startDate: subscription.start_date ? moment(subscription.start_date).format('D MMM YYYY') : '-',
plan: subscription.plan,
amount: parseInt(subscription.plan.amount) ? (subscription.plan.amount / 100) : 0, | 1 | import Component from '@ember/component';
import moment from 'moment';
import {computed} from '@ember/object';
import {gt} from '@ember/object/computed';
import {inject as service} from '@ember/service';
import {task} from 'ember-concurrency';
export default Component.extend({
membersUtils: service(),
feature: service(),
config: service(),
mediaQueries: service(),
ghostPaths: service(),
ajax: service(),
store: service(),
// Allowed actions
setProperty: () => {},
hasMultipleSubscriptions: gt('member.stripe', 1),
canShowStripeInfo: computed('member.isNew', 'membersUtils.isStripeEnabled', function () {
let stripeEnabled = this.membersUtils.isStripeEnabled;
if (this.member.get('isNew') || !stripeEnabled) {
return false;
} else {
return true;
}
}),
subscriptions: computed('member.stripe', function () {
let subscriptions = this.member.get('stripe');
if (subscriptions && subscriptions.length > 0) {
return subscriptions.map((subscription) => {
return {
id: subscription.id,
customer: subscription.customer,
name: subscription.name || '',
email: subscription.email || '',
status: subscription.status,
startDate: subscription.start_date ? moment(subscription.start_date).format('D MMM YYYY') : '-',
plan: subscription.plan,
amount: parseInt(subscription.plan.amount) ? (subscription.plan.amount / 100) : 0,
cancelAtPeriodEnd: subscription.cancel_at_period_end,
validUntil: subscription.current_period_end ? moment(subscription.current_period_end).format('D MMM YYYY') : '-'
};
}).reverse();
}
return null;
}),
actions: {
setProperty(property, value) {
this.setProperty(property, value);
}
},
cancelSubscription: task(function* (subscriptionId) {
let url = this.get('ghostPaths.url').api('members', this.member.get('id'), 'subscriptions', subscriptionId);
let response = yield this.ajax.put(url, {
data: {
cancel_at_period_end: true
}
});
this.store.pushPayload('member', response);
return response;
}).drop(),
continueSubscription: task(function* (subscriptionId) {
let url = this.get('ghostPaths.url').api('members', this.member.get('id'), 'subscriptions', subscriptionId);
let response = yield this.ajax.put(url, {
data: {
cancel_at_period_end: false
}
});
this.store.pushPayload('member', response);
return response;
}).drop()
});
| 1 | 9,489 | Does this need to change `'active'` to `'Active'` and that? Or is that done in CSS or something? | TryGhost-Admin | js |
@@ -11,6 +11,13 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core
public class Http2Limits
{
private int _maxStreamsPerConnection = 100;
+ private int _headerTableSize = MaxAllowedHeaderTableSize;
+ private int _maxFrameSize = MinAllowedMaxFrameSize;
+
+ // These are limits defined by the RFC https://tools.ietf.org/html/rfc7540#section-4.2
+ public const int MaxAllowedHeaderTableSize = 4096;
+ public const int MinAllowedMaxFrameSize = 16 * 1024;
+ public const int MaxAllowedMaxFrameSize = 16 * 1024 * 1024 - 1;
/// <summary>
/// Limits the number of concurrent request streams per HTTP/2 connection. Excess streams will be refused. | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
namespace Microsoft.AspNetCore.Server.Kestrel.Core
{
/// <summary>
/// Limits only applicable to HTTP/2 connections.
/// </summary>
public class Http2Limits
{
private int _maxStreamsPerConnection = 100;
/// <summary>
/// Limits the number of concurrent request streams per HTTP/2 connection. Excess streams will be refused.
/// <para>
/// Defaults to 100
/// </para>
/// </summary>
public int MaxStreamsPerConnection
{
get => _maxStreamsPerConnection;
set
{
if (value <= 0)
{
throw new ArgumentOutOfRangeException(nameof(value), value, CoreStrings.GreaterThanZeroRequired);
}
_maxStreamsPerConnection = value;
}
}
}
}
| 1 | 16,424 | Why are these constants public? They should also be listed above members. | aspnet-KestrelHttpServer | .cs |
@@ -217,6 +217,8 @@ public class MainnetTransactionValidator {
// org.bouncycastle.math.ec.ECCurve.AbstractFp.decompressPoint throws an
// IllegalArgumentException for "Invalid point compression" for bad signatures.
try {
+ // TODO: this is where we are checking the signature. We have to fix the v value if this is a
+ // private transaction
transaction.getSender();
} catch (final IllegalArgumentException e) {
return ValidationResult.invalid( | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.mainnet;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.crypto.SECP256K1;
import org.hyperledger.besu.ethereum.core.AcceptedTransactionTypes;
import org.hyperledger.besu.ethereum.core.Account;
import org.hyperledger.besu.ethereum.core.Gas;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.core.TransactionFilter;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.core.fees.EIP1559;
import org.hyperledger.besu.ethereum.core.fees.TransactionPriceCalculator;
import org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason;
import org.hyperledger.besu.ethereum.vm.GasCalculator;
import java.math.BigInteger;
import java.util.Optional;
/**
* Validates a transaction based on Frontier protocol runtime requirements.
*
* <p>The {@link MainnetTransactionValidator} performs the intrinsic gas cost check on the given
* {@link Transaction}.
*/
public class MainnetTransactionValidator {
private final GasCalculator gasCalculator;
private final Optional<TransactionPriceCalculator> transactionPriceCalculator;
private final boolean disallowSignatureMalleability;
private final Optional<BigInteger> chainId;
private Optional<TransactionFilter> transactionFilter = Optional.empty();
private final Optional<EIP1559> maybeEip1559;
private final AcceptedTransactionTypes acceptedTransactionTypes;
private final boolean goQuorumCompatibilityMode;
public MainnetTransactionValidator(
final GasCalculator gasCalculator,
final boolean checkSignatureMalleability,
final Optional<BigInteger> chainId,
final boolean goQuorumCompatibilityMode) {
this(
gasCalculator,
Optional.empty(),
checkSignatureMalleability,
chainId,
Optional.empty(),
AcceptedTransactionTypes.FRONTIER_TRANSACTIONS,
goQuorumCompatibilityMode);
}
public MainnetTransactionValidator(
final GasCalculator gasCalculator,
final Optional<TransactionPriceCalculator> transactionPriceCalculator,
final boolean checkSignatureMalleability,
final Optional<BigInteger> chainId,
final Optional<EIP1559> maybeEip1559,
final AcceptedTransactionTypes acceptedTransactionTypes,
final boolean goQuorumCompatibilityMode) {
this.gasCalculator = gasCalculator;
this.transactionPriceCalculator = transactionPriceCalculator;
this.disallowSignatureMalleability = checkSignatureMalleability;
this.chainId = chainId;
this.maybeEip1559 = maybeEip1559;
this.acceptedTransactionTypes = acceptedTransactionTypes;
this.goQuorumCompatibilityMode = goQuorumCompatibilityMode;
}
/**
* Asserts whether a transaction is valid.
*
* @param transaction the transaction to validate
* @param baseFee optional baseFee
* @return An empty @{link Optional} if the transaction is considered valid; otherwise an @{code
* Optional} containing a {@link TransactionInvalidReason} that identifies why the transaction
* is invalid.
*/
public ValidationResult<TransactionInvalidReason> validate(
final Transaction transaction, final Optional<Long> baseFee) {
final ValidationResult<TransactionInvalidReason> signatureResult =
validateTransactionSignature(transaction);
if (!signatureResult.isValid()) {
return signatureResult;
}
if (goQuorumCompatibilityMode && !transaction.getGasPrice().isZero()) {
return ValidationResult.invalid(
TransactionInvalidReason.GAS_PRICE_MUST_BE_ZERO,
"gasPrice must be set to zero on a GoQuorum compatible network");
}
if (ExperimentalEIPs.eip1559Enabled && maybeEip1559.isPresent()) {
final EIP1559 eip1559 = maybeEip1559.get();
if (!eip1559.isValidFormat(transaction, acceptedTransactionTypes)) {
return ValidationResult.invalid(
TransactionInvalidReason.INVALID_TRANSACTION_FORMAT,
String.format(
"transaction format is invalid, accepted transaction types are %s",
acceptedTransactionTypes.toString()));
}
if (transaction.isEIP1559Transaction()) {
final Wei price = transactionPriceCalculator.orElseThrow().price(transaction, baseFee);
if (price.compareTo(Wei.of(baseFee.orElseThrow())) < 0) {
return ValidationResult.invalid(
TransactionInvalidReason.INVALID_TRANSACTION_FORMAT,
String.format("gasPrice is less than the current BaseFee"));
}
}
} else if (transaction.isEIP1559Transaction()) {
return ValidationResult.invalid(
TransactionInvalidReason.INVALID_TRANSACTION_FORMAT,
String.format(
"transaction format is invalid, accepted transaction types are %s",
acceptedTransactionTypes.toString()));
}
final Gas intrinsicGasCost = gasCalculator.transactionIntrinsicGasCost(transaction);
if (intrinsicGasCost.compareTo(Gas.of(transaction.getGasLimit())) > 0) {
return ValidationResult.invalid(
TransactionInvalidReason.INTRINSIC_GAS_EXCEEDS_GAS_LIMIT,
String.format(
"intrinsic gas cost %s exceeds gas limit %s",
intrinsicGasCost, transaction.getGasLimit()));
}
return ValidationResult.valid();
}
public ValidationResult<TransactionInvalidReason> validateForSender(
final Transaction transaction,
final Account sender,
final TransactionValidationParams validationParams) {
Wei senderBalance = Account.DEFAULT_BALANCE;
long senderNonce = Account.DEFAULT_NONCE;
if (sender != null) {
senderBalance = sender.getBalance();
senderNonce = sender.getNonce();
}
if (transaction.getUpfrontCost().compareTo(senderBalance) > 0) {
return ValidationResult.invalid(
TransactionInvalidReason.UPFRONT_COST_EXCEEDS_BALANCE,
String.format(
"transaction up-front cost %s exceeds transaction sender account balance %s",
transaction.getUpfrontCost(), senderBalance));
}
if (transaction.getNonce() < senderNonce) {
return ValidationResult.invalid(
TransactionInvalidReason.NONCE_TOO_LOW,
String.format(
"transaction nonce %s below sender account nonce %s",
transaction.getNonce(), senderNonce));
}
if (!validationParams.isAllowFutureNonce() && senderNonce != transaction.getNonce()) {
return ValidationResult.invalid(
TransactionInvalidReason.INCORRECT_NONCE,
String.format(
"transaction nonce %s does not match sender account nonce %s.",
transaction.getNonce(), senderNonce));
}
if (!isSenderAllowed(transaction, validationParams)) {
return ValidationResult.invalid(
TransactionInvalidReason.TX_SENDER_NOT_AUTHORIZED,
String.format("Sender %s is not on the Account Allowlist", transaction.getSender()));
}
return ValidationResult.valid();
}
public ValidationResult<TransactionInvalidReason> validateTransactionSignature(
final Transaction transaction) {
if (chainId.isPresent()
&& (transaction.getChainId().isPresent() && !transaction.getChainId().equals(chainId))) {
return ValidationResult.invalid(
TransactionInvalidReason.WRONG_CHAIN_ID,
String.format(
"transaction was meant for chain id %s and not this chain id %s",
transaction.getChainId().get(), chainId.get()));
}
if (!chainId.isPresent() && transaction.getChainId().isPresent()) {
return ValidationResult.invalid(
TransactionInvalidReason.REPLAY_PROTECTED_SIGNATURES_NOT_SUPPORTED,
"replay protected signatures is not supported");
}
final SECP256K1.Signature signature = transaction.getSignature();
if (disallowSignatureMalleability
&& signature.getS().compareTo(SECP256K1.HALF_CURVE_ORDER) > 0) {
return ValidationResult.invalid(
TransactionInvalidReason.INVALID_SIGNATURE,
String.format(
"Signature s value should be less than %s, but got %s",
SECP256K1.HALF_CURVE_ORDER, signature.getS()));
}
// org.bouncycastle.math.ec.ECCurve.AbstractFp.decompressPoint throws an
// IllegalArgumentException for "Invalid point compression" for bad signatures.
try {
transaction.getSender();
} catch (final IllegalArgumentException e) {
return ValidationResult.invalid(
TransactionInvalidReason.INVALID_SIGNATURE,
"sender could not be extracted from transaction signature");
}
return ValidationResult.valid();
}
private boolean isSenderAllowed(
final Transaction transaction, final TransactionValidationParams validationParams) {
if (validationParams.checkLocalPermissions() || validationParams.checkOnchainPermissions()) {
return transactionFilter
.map(
c ->
c.permitted(
transaction,
validationParams.checkLocalPermissions(),
validationParams.checkOnchainPermissions()))
.orElse(true);
} else {
return true;
}
}
public void setTransactionFilter(final TransactionFilter transactionFilter) {
this.transactionFilter = Optional.of(transactionFilter);
}
/**
* Asserts whether a transaction is valid for the sender accounts current state.
*
* <p>Note: {@code validate} should be called before getting the sender {@link Account} used in
* this method to ensure that a sender can be extracted from the {@link Transaction}.
*
* @param transaction the transaction to validateMessageFrame.State.COMPLETED_FAILED
* @param sender the sender account state to validate against
* @param allowFutureNonce if true, transactions with nonce equal or higher than the account nonce
* will be considered valid (used when received transactions in the transaction pool). If
* false, only a transaction with the nonce equals the account nonce will be considered valid
* (used when processing transactions).
* @return An empty @{link Optional} if the transaction is considered valid; otherwise an @{code
* Optional} containing a {@link TransactionInvalidReason} that identifies why the transaction
* is invalid.
*/
public ValidationResult<TransactionInvalidReason> validateForSender(
final Transaction transaction, final Account sender, final boolean allowFutureNonce) {
final TransactionValidationParams validationParams =
new TransactionValidationParams.Builder().allowFutureNonce(allowFutureNonce).build();
return validateForSender(transaction, sender, validationParams);
}
}
| 1 | 23,903 | This TODO isn't related to this change. We should remove it. | hyperledger-besu | java |
@@ -27,7 +27,5 @@ import org.apache.iceberg.FieldMetrics;
public interface ValueWriter<D> {
void write(D datum, Encoder encoder) throws IOException;
- default Stream<FieldMetrics> metrics() {
- return Stream.empty(); // TODO will populate in following PRs
- }
+ Stream<FieldMetrics> metrics();
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.avro;
import java.io.IOException;
import java.util.stream.Stream;
import org.apache.avro.io.Encoder;
import org.apache.iceberg.FieldMetrics;
public interface ValueWriter<D> {
void write(D datum, Encoder encoder) throws IOException;
default Stream<FieldMetrics> metrics() {
return Stream.empty(); // TODO will populate in following PRs
}
}
| 1 | 30,760 | `FieldMetrics` is parameterized, but this is a bare reference. Could you update it? I think it should be `FieldMetrics<?>` since the metrics are not necessarily for the written value type, `D`. | apache-iceberg | java |
@@ -58,12 +58,11 @@ namespace Examples.Console
// Application which decides to enable OpenTelemetry metrics
// would setup a MeterProvider and make it default.
// All meters from this factory will be configured with the common processing pipeline.
- MeterProvider.SetDefault(Sdk.CreateMeterProvider(mb =>
- {
- mb.SetMetricProcessor(processor);
- mb.SetMetricExporter(promExporter);
- mb.SetMetricPushInterval(TimeSpan.FromSeconds(pushIntervalInSecs));
- }));
+ MeterProvider.SetDefault(Sdk.CreateMeterProviderBuilder()
+ .SetProcessor(processor)
+ .SetExporter(promExporter)
+ .SetPushInterval(TimeSpan.FromSeconds(pushIntervalInSecs))
+ .Build());
// The following shows how libraries would obtain a MeterProvider.
// MeterProvider is the entry point, which provides Meter. | 1 | // <copyright file="TestPrometheusExporter.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Threading.Tasks;
using OpenTelemetry;
using OpenTelemetry.Exporter.Prometheus;
using OpenTelemetry.Metrics;
using OpenTelemetry.Metrics.Export;
using OpenTelemetry.Trace;
namespace Examples.Console
{
internal class TestPrometheusExporter
{
internal static async Task<object> RunAsync(int port, int pushIntervalInSecs, int totalDurationInMins)
{
System.Console.WriteLine($"OpenTelemetry Prometheus Exporter is making metrics available at http://localhost:{port}/metrics/");
/*
Following is sample prometheus.yml config. Adjust port,interval as needed.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'OpenTelemetryTest'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9184']
*/
// Create and Setup Prometheus Exporter
var promOptions = new PrometheusExporterOptions() { Url = $"http://localhost:{port}/metrics/" };
var promExporter = new PrometheusExporter(promOptions);
var metricsHttpServer = new PrometheusExporterMetricsHttpServer(promExporter);
metricsHttpServer.Start();
// Create Processor (called Batcher in Metric spec, this is still not decided)
var processor = new UngroupedBatcher();
// Application which decides to enable OpenTelemetry metrics
// would setup a MeterProvider and make it default.
// All meters from this factory will be configured with the common processing pipeline.
MeterProvider.SetDefault(Sdk.CreateMeterProvider(mb =>
{
mb.SetMetricProcessor(processor);
mb.SetMetricExporter(promExporter);
mb.SetMetricPushInterval(TimeSpan.FromSeconds(pushIntervalInSecs));
}));
// The following shows how libraries would obtain a MeterProvider.
// MeterProvider is the entry point, which provides Meter.
// If user did not set the Default MeterProvider (shown in earlier lines),
// all metric operations become no-ops.
var meterProvider = MeterProvider.Default;
var meter = meterProvider.GetMeter("MyMeter");
// the rest is purely from Metrics API.
var testCounter = meter.CreateInt64Counter("MyCounter");
var testMeasure = meter.CreateInt64Measure("MyMeasure");
var testObserver = meter.CreateInt64Observer("MyObservation", CallBackForMyObservation);
var labels1 = new List<KeyValuePair<string, string>>();
labels1.Add(new KeyValuePair<string, string>("dim1", "value1"));
var labels2 = new List<KeyValuePair<string, string>>();
labels2.Add(new KeyValuePair<string, string>("dim1", "value2"));
var defaultContext = default(SpanContext);
Stopwatch sw = Stopwatch.StartNew();
while (sw.Elapsed.TotalMinutes < totalDurationInMins)
{
testCounter.Add(defaultContext, 100, meter.GetLabelSet(labels1));
testMeasure.Record(defaultContext, 100, meter.GetLabelSet(labels1));
testMeasure.Record(defaultContext, 500, meter.GetLabelSet(labels1));
testMeasure.Record(defaultContext, 5, meter.GetLabelSet(labels1));
testMeasure.Record(defaultContext, 750, meter.GetLabelSet(labels1));
// Obviously there is no testObserver.Oberve() here, as Observer instruments
// have callbacks that are called by the Meter automatically at each collection interval.
await Task.Delay(1000);
var remaining = (totalDurationInMins * 60) - sw.Elapsed.TotalSeconds;
System.Console.WriteLine("Running and emitting metrics. Remaining time:" + (int)remaining + " seconds");
}
// Stopping
metricsHttpServer.Stop();
System.Console.WriteLine("Metrics server shutdown.");
System.Console.WriteLine("Press Enter key to exit.");
return null;
}
internal static void CallBackForMyObservation(Int64ObserverMetric observerMetric)
{
var labels1 = new List<KeyValuePair<string, string>>();
labels1.Add(new KeyValuePair<string, string>("dim1", "value1"));
observerMetric.Observe(Process.GetCurrentProcess().WorkingSet64, labels1);
}
}
}
| 1 | 16,583 | @cijothomas do we want this to be `Set` or `Add`? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -42,7 +42,7 @@ namespace Nethermind.JsonRpc
public string? IpcUnixDomainSocketPath { get; set; } = null;
public string[] EnabledModules { get; set; } = ModuleType.DefaultModules.ToArray();
- public long? GasCap { get; set; } = 100000000;
+ public long? GasCap { get; set; } = 50000000;
public int ReportIntervalSeconds { get; set; } = 300;
public bool BufferResponses { get; set; }
public string CallsFilterFilePath { get; set; } = "Data/jsonrpc.filter"; | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Linq;
using Nethermind.JsonRpc.Modules;
namespace Nethermind.JsonRpc
{
public class JsonRpcConfig : IJsonRpcConfig
{
public static readonly JsonRpcConfig Default = new();
private int? _webSocketsPort;
public bool Enabled { get; set; }
public string Host { get; set; } = "127.0.0.1";
public int Timeout { get; set; } = 20000;
public string RpcRecorderBaseFilePath { get; set; } = "logs/rpc.{counter}.txt";
public RpcRecorderState RpcRecorderState { get; set; } = RpcRecorderState.None;
public int Port { get; set; } = 8545;
public int WebSocketsPort
{
get => _webSocketsPort ?? Port;
set => _webSocketsPort = value;
}
public string? IpcUnixDomainSocketPath { get; set; } = null;
public string[] EnabledModules { get; set; } = ModuleType.DefaultModules.ToArray();
public long? GasCap { get; set; } = 100000000;
public int ReportIntervalSeconds { get; set; } = 300;
public bool BufferResponses { get; set; }
public string CallsFilterFilePath { get; set; } = "Data/jsonrpc.filter";
public long? MaxRequestBodySize { get; set; } = 30000000;
public int? EthModuleConcurrentInstances { get; set; } = null;
}
}
| 1 | 26,382 | Why are we dropping GasCap? | NethermindEth-nethermind | .cs |
@@ -45,11 +45,11 @@ namespace OpenTelemetry.Shims.OpenTracing
public SpanShim(TelemetrySpan span)
{
- this.Span = span ?? throw new ArgumentNullException(nameof(span));
+ this.Span = span ?? throw new ArgumentNullException(nameof(span), "Parameter cannot be null");
if (!this.Span.Context.IsValid)
{
- throw new ArgumentException(nameof(this.Span.Context));
+ throw new ArgumentException("Passed span's context is not valid", nameof(this.Span.Context));
}
this.spanContextShim = new SpanContextShim(this.Span.Context); | 1 | // <copyright file="SpanShim.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Linq;
using global::OpenTracing;
using OpenTelemetry.Trace;
namespace OpenTelemetry.Shims.OpenTracing
{
internal sealed class SpanShim : global::OpenTracing.ISpan
{
/// <summary>
/// The default event name if not specified.
/// </summary>
public const string DefaultEventName = "log";
private static readonly IReadOnlyCollection<Type> OpenTelemetrySupportedAttributeValueTypes = new List<Type>
{
typeof(string),
typeof(bool),
typeof(byte),
typeof(short),
typeof(int),
typeof(long),
typeof(float),
typeof(double),
};
private readonly SpanContextShim spanContextShim;
public SpanShim(TelemetrySpan span)
{
this.Span = span ?? throw new ArgumentNullException(nameof(span));
if (!this.Span.Context.IsValid)
{
throw new ArgumentException(nameof(this.Span.Context));
}
this.spanContextShim = new SpanContextShim(this.Span.Context);
}
public ISpanContext Context => this.spanContextShim;
public TelemetrySpan Span { get; private set; }
/// <inheritdoc/>
public void Finish()
{
this.Span.End();
}
/// <inheritdoc/>
public void Finish(DateTimeOffset finishTimestamp)
{
this.Span.End(finishTimestamp);
}
/// <inheritdoc/>
public string GetBaggageItem(string key)
=> Baggage.GetBaggage(key);
/// <inheritdoc/>
public global::OpenTracing.ISpan Log(DateTimeOffset timestamp, IEnumerable<KeyValuePair<string, object>> fields)
{
if (fields is null)
{
throw new ArgumentNullException(nameof(fields));
}
var payload = ConvertToEventPayload(fields);
var eventName = payload.Item1;
var spanAttributes = new SpanAttributes();
foreach (var field in payload.Item2)
{
switch (field.Value)
{
case long value:
spanAttributes.Add(field.Key, value);
break;
case long[] value:
spanAttributes.Add(field.Key, value);
break;
case bool value:
spanAttributes.Add(field.Key, value);
break;
case bool[] value:
spanAttributes.Add(field.Key, value);
break;
case double value:
spanAttributes.Add(field.Key, value);
break;
case double[] value:
spanAttributes.Add(field.Key, value);
break;
case string value:
spanAttributes.Add(field.Key, value);
break;
case string[] value:
spanAttributes.Add(field.Key, value);
break;
default:
break;
}
}
if (timestamp == DateTimeOffset.MinValue)
{
this.Span.AddEvent(eventName, spanAttributes);
}
else
{
this.Span.AddEvent(eventName, timestamp, spanAttributes);
}
return this;
}
/// <inheritdoc/>
public global::OpenTracing.ISpan Log(IEnumerable<KeyValuePair<string, object>> fields)
{
return this.Log(DateTimeOffset.MinValue, fields);
}
/// <inheritdoc/>
public global::OpenTracing.ISpan Log(string @event)
{
if (@event is null)
{
throw new ArgumentNullException(nameof(@event));
}
this.Span.AddEvent(@event);
return this;
}
/// <inheritdoc/>
public global::OpenTracing.ISpan Log(DateTimeOffset timestamp, string @event)
{
if (@event is null)
{
throw new ArgumentNullException(nameof(@event));
}
this.Span.AddEvent(@event, timestamp);
return this;
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetBaggageItem(string key, string value)
{
Baggage.SetBaggage(key, value);
return this;
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetOperationName(string operationName)
{
if (operationName is null)
{
throw new ArgumentNullException(nameof(operationName));
}
this.Span.UpdateName(operationName);
return this;
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetTag(string key, string value)
{
if (key is null)
{
throw new ArgumentNullException(nameof(key));
}
this.Span.SetAttribute(key, value);
return this;
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetTag(string key, bool value)
{
if (key is null)
{
throw new ArgumentNullException(nameof(key));
}
// Special case the OpenTracing Error Tag
// see https://opentracing.io/specification/conventions/
if (global::OpenTracing.Tag.Tags.Error.Key.Equals(key, StringComparison.Ordinal))
{
this.Span.SetStatus(value ? Trace.Status.Error : Trace.Status.Ok);
}
else
{
this.Span.SetAttribute(key, value);
}
return this;
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetTag(string key, int value)
{
if (key is null)
{
throw new ArgumentNullException(nameof(key));
}
this.Span.SetAttribute(key, value);
return this;
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetTag(string key, double value)
{
if (key is null)
{
throw new ArgumentNullException(nameof(key));
}
this.Span.SetAttribute(key, value);
return this;
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetTag(global::OpenTracing.Tag.BooleanTag tag, bool value)
{
return this.SetTag(tag?.Key, value);
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetTag(global::OpenTracing.Tag.IntOrStringTag tag, string value)
{
if (int.TryParse(value, out var result))
{
return this.SetTag(tag?.Key, result);
}
return this.SetTag(tag?.Key, value);
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetTag(global::OpenTracing.Tag.IntTag tag, int value)
{
return this.SetTag(tag?.Key, value);
}
/// <inheritdoc/>
public global::OpenTracing.ISpan SetTag(global::OpenTracing.Tag.StringTag tag, string value)
{
return this.SetTag(tag?.Key, value);
}
/// <summary>
/// Constructs an OpenTelemetry event payload from an OpenTracing Log key/value map.
/// </summary>
/// <param name="fields">The fields.</param>
/// <returns>A 2-Tuple containing the event name and payload information.</returns>
private static Tuple<string, IDictionary<string, object>> ConvertToEventPayload(IEnumerable<KeyValuePair<string, object>> fields)
{
string eventName = null;
var attributes = new Dictionary<string, object>();
foreach (var field in fields)
{
// TODO verify null values are NOT allowed.
if (field.Value == null)
{
continue;
}
// Duplicate keys must be ignored even though they appear to be allowed in OpenTracing.
if (attributes.ContainsKey(field.Key))
{
continue;
}
if (eventName == null && field.Key.Equals(LogFields.Event, StringComparison.Ordinal) && field.Value is string value)
{
// This is meant to be the event name
eventName = value;
// We don't want to add the event name as a separate attribute
continue;
}
// Supported types are added directly, all other types are converted to strings.
if (OpenTelemetrySupportedAttributeValueTypes.Contains(field.Value.GetType()))
{
attributes.Add(field.Key, field.Value);
}
else
{
// TODO should we completely ignore unsupported types?
attributes.Add(field.Key, field.Value.ToString());
}
}
return new Tuple<string, IDictionary<string, object>>(eventName ?? DefaultEventName, attributes);
}
}
}
| 1 | 19,329 | should we end the text with a .? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -752,9 +752,10 @@ dispatch_enter_dynamorio(dcontext_t *dcontext)
wherewasi == DR_WHERE_APP ||
/* If the thread was waiting at check_wait_at_safe_point when getting
* suspended, we were in dispatch (ref i#3427). We will be here after the
- * thread's context is being reset before sending it native.
+ * thread's context is being reset proactively (due to some -reset_at_*
+ * option) or before sending it native.
*/
- (dcontext->go_native && wherewasi == DR_WHERE_DISPATCH));
+ wherewasi == DR_WHERE_DISPATCH);
dcontext->whereami = DR_WHERE_DISPATCH;
ASSERT_LOCAL_HEAP_UNPROTECTED(dcontext);
ASSERT(check_should_be_protected(DATASEC_RARELY_PROT)); | 1 | /* **********************************************************
* Copyright (c) 2011-2020 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* dispatch.c - central dynamo control manager
*/
#include "globals.h"
#include "link.h"
#include "fragment.h"
#include "fcache.h"
#include "monitor.h"
#include "synch.h"
#include "perscache.h"
#include "native_exec.h"
#include "translate.h"
#ifdef CLIENT_INTERFACE
# include "emit.h"
# include "arch.h"
# include "instrument.h"
#endif
#ifdef DGC_DIAGNOSTICS
# include "instr.h"
# include "disassemble.h"
#endif
#ifdef RCT_IND_BRANCH
# include "rct.h"
#endif
#ifdef VMX86_SERVER
# include "vmkuw.h"
#endif
/* forward declarations */
static void
dispatch_enter_dynamorio(dcontext_t *dcontext);
static bool
dispatch_enter_fcache(dcontext_t *dcontext, fragment_t *targetf);
static void
dispatch_enter_fcache_stats(dcontext_t *dcontext, fragment_t *targetf);
static void
enter_fcache(dcontext_t *dcontext, fcache_enter_func_t entry, cache_pc pc);
static void
dispatch_enter_native(dcontext_t *dcontext);
static void
dispatch_exit_fcache(dcontext_t *dcontext);
static void
dispatch_exit_fcache_stats(dcontext_t *dcontext);
static void
handle_post_system_call(dcontext_t *dcontext);
static void
handle_special_tag(dcontext_t *dcontext);
#ifdef WINDOWS
static void
handle_callback_return(dcontext_t *dcontext);
#endif
#ifdef CLIENT_INTERFACE
/* PR 356503: detect clients making syscalls via sysenter */
static inline void
found_client_sysenter(void)
{
CLIENT_ASSERT(false,
"Is your client invoking raw system calls via vdso sysenter? "
"While such behavior is not recommended and can create problems, "
"it may work with the -sysenter_is_int80 runtime option.");
}
#endif
static bool
exited_due_to_ni_syscall(dcontext_t *dcontext)
{
if (TESTANY(LINK_NI_SYSCALL_ALL, dcontext->last_exit->flags))
return true;
if (TEST(LINK_SPECIAL_EXIT, dcontext->last_exit->flags) &&
(dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_NI_SYSCALL_INT_0x81 ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_NI_SYSCALL_INT_0x82))
return true;
return false;
}
/* This is the central hub of control management in DynamoRIO.
* It is entered with a clean dstack at startup and after every cache
* exit, whether normal or kernel-mediated via a trampoline context switch.
* Having no stack state kept across cache executions avoids
* self-protection issues with the dstack.
*/
void
d_r_dispatch(dcontext_t *dcontext)
{
fragment_t *targetf;
fragment_t coarse_f;
#ifdef HAVE_TLS
# if defined(UNIX) && defined(X86)
/* i#2089: the parent of a new thread has TLS in an unstable state
* and needs to restore it prior to invoking get_thread_private_dcontext().
*/
if (get_at_syscall(dcontext) && was_thread_create_syscall(dcontext))
os_clone_post(dcontext);
# endif
ASSERT(dcontext == get_thread_private_dcontext() ||
/* i#813: the app hit our post-sysenter hook while native */
(dcontext->whereami == DR_WHERE_APP &&
dcontext->last_exit == get_syscall_linkstub()));
#else
# ifdef UNIX
/* CAUTION: for !HAVE_TLS, upon a fork, the child's
* get_thread_private_dcontext() will return NULL because its thread
* id is different and tls_table hasn't been updated yet (will be
* done in post_system_call()). NULL dcontext thus returned causes
* logging/core dumping to malfunction; kstats trigger asserts.
*/
ASSERT(dcontext == get_thread_private_dcontext() || pid_cached != get_process_id());
# endif
#endif
dispatch_enter_dynamorio(dcontext);
LOG(THREAD, LOG_INTERP, 2, "\nd_r_dispatch: target = " PFX "\n", dcontext->next_tag);
/* This is really a 1-iter loop most of the time: we only iterate
* when we obtain a target fragment but then fail to enter the
* cache due to flushing before we get there.
*/
do {
if (is_in_dynamo_dll(dcontext->next_tag) ||
dcontext->next_tag == BACK_TO_NATIVE_AFTER_SYSCALL || dcontext->go_native) {
handle_special_tag(dcontext);
}
/* Neither hotp_only nor thin_client should have any fragment
* fcache related work to do.
*/
ASSERT(!RUNNING_WITHOUT_CODE_CACHE());
targetf = fragment_lookup_fine_and_coarse(dcontext, dcontext->next_tag, &coarse_f,
dcontext->last_exit);
#ifdef UNIX
/* i#1276: dcontext->next_tag could be a special stub pc used by
* DR to maintain control in hybrid execution, in which case the
* target should be replaced with correct app target.
*/
if (targetf == NULL && DYNAMO_OPTION(native_exec) &&
DYNAMO_OPTION(native_exec_opt) && native_exec_replace_next_tag(dcontext))
continue;
#endif
do {
if (targetf != NULL) {
KSTART(monitor_enter);
/* invoke monitor to continue or start a trace
* may result in changing or nullifying targetf
*/
targetf = monitor_cache_enter(dcontext, targetf);
KSTOP_NOT_MATCHING(monitor_enter); /* or monitor_enter_thci */
}
if (targetf != NULL)
break;
/* must call outside of USE_BB_BUILDING_LOCK guard for bb_lock_would_have: */
SHARED_BB_LOCK();
if (USE_BB_BUILDING_LOCK() || targetf == NULL) {
/* must re-lookup while holding lock and keep the lock until we've
* built the bb and added it to the lookup table
* FIXME: optimize away redundant lookup: flags to know why came out?
*/
targetf = fragment_lookup_fine_and_coarse(dcontext, dcontext->next_tag,
&coarse_f, dcontext->last_exit);
}
if (targetf == NULL) {
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
targetf = build_basic_block_fragment(
dcontext, dcontext->next_tag, 0, true /*link*/,
true /*visible*/
_IF_CLIENT(false /*!for_trace*/) _IF_CLIENT(NULL));
SELF_PROTECT_LOCAL(dcontext, READONLY);
}
if (targetf != NULL && TEST(FRAG_COARSE_GRAIN, targetf->flags)) {
/* targetf is a static temp fragment protected by bb_building_lock,
* so we must make a local copy to use before releasing the lock.
* FIXME: best to pass local wrapper to build_basic_block_fragment
* and all the way through emit and link? Would need linkstubs
* tailing the fragment_t.
*/
ASSERT(USE_BB_BUILDING_LOCK_STEADY_STATE());
fragment_coarse_wrapper(&coarse_f, targetf->tag,
FCACHE_ENTRY_PC(targetf));
targetf = &coarse_f;
}
SHARED_BB_UNLOCK();
if (targetf == NULL)
break;
/* loop around and re-do monitor check */
} while (true);
if (targetf != NULL) {
if (dispatch_enter_fcache(dcontext, targetf)) {
/* won't reach here: will re-enter d_r_dispatch() with a clean stack */
ASSERT_NOT_REACHED();
} else
targetf = NULL; /* targetf was flushed */
}
} while (true);
ASSERT_NOT_REACHED();
}
/* returns true if pc is a point at which DynamoRIO should stop interpreting */
bool
is_stopping_point(dcontext_t *dcontext, app_pc pc)
{
if ((pc == BACK_TO_NATIVE_AFTER_SYSCALL &&
/* case 6253: app may xfer to this "address" in which case pass
* exception to app
*/
dcontext->native_exec_postsyscall != NULL)
#ifdef DR_APP_EXPORTS
|| (!automatic_startup &&
(pc == (app_pc)dynamorio_app_exit ||
/* FIXME: Is this a holdover from long ago? dymamo_thread_exit
* should not be called from the cache.
*/
pc == (app_pc)dynamo_thread_exit || pc == (app_pc)dr_app_stop ||
pc == (app_pc)dr_app_stop_and_cleanup ||
pc == (app_pc)dr_app_stop_and_cleanup_with_stats))
#endif
#ifdef WINDOWS
/* we go all the way to NtTerminateThread/NtTerminateProcess */
#else /* UNIX */
/* we go all the way to SYS_exit or SYS_{,t,tg}kill(SIGABRT) */
#endif
)
return true;
return false;
}
static void
dispatch_enter_fcache_stats(dcontext_t *dcontext, fragment_t *targetf)
{
#ifdef DEBUG
# ifdef DGC_DIAGNOSTICS
if (TEST(FRAG_DYNGEN, targetf->flags) && !is_dyngen_vsyscall(targetf->tag)) {
char buf[MAXIMUM_SYMBOL_LENGTH];
bool stack = is_address_on_stack(dcontext, targetf->tag);
LOG(THREAD, LOG_DISPATCH, 1,
"Entry into dyngen F%d(" PFX "%s%s) via:", targetf->id, targetf->tag,
stack ? " stack" : "",
(targetf->flags & FRAG_DYNGEN_RESTRICTED) != 0 ? " BAD" : "");
if (!LINKSTUB_FAKE(dcontext->last_exit)) {
app_pc translated_pc;
/* can't recreate if fragment is deleted -- but should be fake then */
ASSERT(!TEST(FRAG_WAS_DELETED, dcontext->last_fragment->flags));
translated_pc = recreate_app_pc(
dcontext, EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit),
dcontext->last_fragment);
if (translated_pc != NULL) {
disassemble(dcontext, translated_pc, THREAD);
print_symbolic_address(translated_pc, buf, sizeof(buf), false);
LOG(THREAD, LOG_DISPATCH, 1, " %s\n", buf);
}
if (!stack &&
(strstr(buf, "user32.dll") != NULL ||
strstr(buf, "USER32.DLL") != NULL)) {
/* try to find who set up user32 callback */
dump_mcontext_callstack(dcontext);
}
DOLOG(stack ? 1U : 2U, LOG_DISPATCH, {
LOG(THREAD, LOG_DISPATCH, 1, "Originating bb:\n");
disassemble_app_bb(dcontext, dcontext->last_fragment->tag, THREAD);
});
} else {
/* FIXME: print type from last_exit */
LOG(THREAD, LOG_DISPATCH, 1, "\n");
}
if (stack) {
/* try to understand where code is on stack */
LOG(THREAD, LOG_DISPATCH, 1, "cur esp=" PFX " ebp=" PFX "\n",
get_mcontext(dcontext)->xsp, get_mcontext(dcontext)->xbp);
dump_mcontext_callstack(dcontext);
}
}
# endif
if (d_r_stats->loglevel >= 2 && (d_r_stats->logmask & LOG_DISPATCH) != 0) {
/* XXX: should use a different mask - and get printed at level 2 when turned on */
DOLOG(4, LOG_DISPATCH,
{ dump_mcontext(get_mcontext(dcontext), THREAD, DUMP_NOT_XML); });
DOLOG(6, LOG_DISPATCH, { dump_mcontext_callstack(dcontext); });
DOKSTATS({ DOLOG(6, LOG_DISPATCH, { kstats_dump_stack(dcontext); }); });
LOG(THREAD, LOG_DISPATCH, 2, "Entry into F%d(" PFX ")." PFX " %s%s%s",
targetf->id, targetf->tag, FCACHE_ENTRY_PC(targetf),
IF_X86_ELSE(
IF_X64_ELSE(FRAG_IS_32(targetf->flags) ? "(32-bit)" : "", ""),
IF_ARM_ELSE(FRAG_IS_THUMB(targetf->flags) ? "(T32)" : "(A32)", "")),
TEST(FRAG_COARSE_GRAIN, targetf->flags) ? "(coarse)" : "",
((targetf->flags & FRAG_IS_TRACE_HEAD) != 0) ? "(trace head)" : "",
((targetf->flags & FRAG_IS_TRACE) != 0) ? "(trace)" : "");
LOG(THREAD, LOG_DISPATCH, 2, "%s",
TEST(FRAG_SHARED, targetf->flags) ? "(shared)" : "");
# ifdef DGC_DIAGNOSTICS
LOG(THREAD, LOG_DISPATCH, 2, "%s",
TEST(FRAG_DYNGEN, targetf->flags) ? "(dyngen)" : "");
# endif
LOG(THREAD, LOG_DISPATCH, 2, "\n");
DOLOG(3, LOG_SYMBOLS, {
char symbuf[MAXIMUM_SYMBOL_LENGTH];
print_symbolic_address(targetf->tag, symbuf, sizeof(symbuf), true);
LOG(THREAD, LOG_SYMBOLS, 3, "\t%s\n", symbuf);
});
}
#endif /* DEBUG */
}
/* Executes a target fragment in the fragment cache */
static bool
dispatch_enter_fcache(dcontext_t *dcontext, fragment_t *targetf)
{
fcache_enter_func_t fcache_enter;
ASSERT(targetf != NULL);
/* ensure we don't take over when we should be going native */
ASSERT(dcontext->native_exec_postsyscall == NULL);
/* We wait until here, rather than at cache exit time, to do lazy
* linking so we can link to newly created fragments.
*/
if (dcontext->last_exit == get_coarse_exit_linkstub() ||
/* We need to lazy link if either of src or tgt is coarse */
(LINKSTUB_DIRECT(dcontext->last_exit->flags) &&
TEST(FRAG_COARSE_GRAIN, targetf->flags))) {
coarse_lazy_link(dcontext, targetf);
}
if (!enter_nolinking(dcontext, targetf, true)) {
/* not actually entering cache, so back to couldbelinking */
enter_couldbelinking(dcontext, NULL, true);
LOG(THREAD, LOG_DISPATCH, 2, "Just flushed targetf, next_tag is " PFX "\n",
dcontext->next_tag);
STATS_INC(num_entrances_aborted);
/* shared entrance cannot-tell-if-deleted -> invalidate targetf
* but then may double-do the trace!
* FIXME: for now, we abort every time, ok to abort twice (first time
* b/c there was a real flush of targetf), but could be perf hit.
*/
trace_abort(dcontext);
return false;
}
dispatch_enter_fcache_stats(dcontext, targetf);
/* FIXME: for now we do this before the synch point to avoid complexity of
* missing a KSTART(fcache_* for cases like NtSetContextThread where a thread
* appears back at d_r_dispatch() from the synch point w/o ever entering the cache.
* To truly fix we need to have the NtSetContextThread handler determine
* whether its suspended target is at this synch point or in the cache.
*/
DOKSTATS({
/* stopped in dispatch_exit_fcache_stats */
if (TEST(FRAG_IS_TRACE, targetf->flags))
KSTART(fcache_trace_trace);
else
KSTART(fcache_default); /* fcache_bb_bb or fcache_bb_trace */
/* FIXME: overestimates fcache time by counting in
* fcache_enter/fcache_return for it - proper reading of this
* value should discount the minimal cost of
* fcache_enter/fcache_return for actual code cache times
*/
/* FIXME: asynch events currently continue their current kstat
* until they get back to d_r_dispatch, so in-fcache kstats are counting
* the in-DR trampoline execution time!
*/
});
/* synch point for suspend, terminate, and detach */
/* assumes mcontext is valid including errno but not pc (which we fix here)
* assumes that thread is holding no locks
* also assumes past enter_nolinking, so could_be_linking is false
* for safety with respect to flush */
/* a fast check before the heavy lifting */
if (should_wait_at_safe_spot(dcontext)) {
/* FIXME : we could put this synch point in enter_fcache but would need
* to use SYSCALL_PC for syscalls (see issues with that in win32/os.c)
*/
priv_mcontext_t *mcontext = get_mcontext(dcontext);
cache_pc save_pc = mcontext->pc;
/* FIXME : implementation choice, we could do recreate_app_pc
* (fairly expensive but this is rare) instead of using the tag
* which is a little hacky but should always be right */
mcontext->pc = targetf->tag;
/* could be targeting interception code or our dll main, would be
* incorrect for GetContextThread and racy for detach, though we
* would expect it to be very rare */
if (!is_dynamo_address(mcontext->pc)) {
check_wait_at_safe_spot(dcontext, THREAD_SYNCH_VALID_MCONTEXT);
/* If we don't come back here synch-er is responsible for ensuring
* our kstat stack doesn't get off (have to do a KSTART here) -- we
* don't want to do the KSTART of fcache_* before this to avoid
* counting synch time.
*/
} else {
LOG(THREAD, LOG_SYNCH, 1,
"wait_at_safe_spot - unable to wait, targeting dr addr " PFX,
mcontext->pc);
STATS_INC(no_wait_entries);
}
mcontext->pc = save_pc;
}
#ifdef UNIX
/* We store this for purposes like signal unlinking (i#2019) */
dcontext->asynch_target = dcontext->next_tag;
#endif
#if defined(UNIX) && defined(DEBUG)
/* i#238/PR 499179: check that libc errno hasn't changed. It's
* not worth actually saving+restoring since to we'd also need to
* preserve on clean calls, a perf hit. Better to catch all libc
* routines that need it and wrap just those.
*/
ASSERT(
get_libc_errno() == dcontext->libc_errno ||
/* w/ private loader, our errno is disjoint from app's */
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false) ||
/* only when pthreads is loaded does libc switch to a per-thread
* errno, so our raw thread tests end up using the same errno
* for each thread!
*/
check_filter("linux.thread;linux.clone", get_short_name(get_application_name())));
#endif
#if defined(UNIX) && !defined(DGC_DIAGNOSTICS) && defined(X86)
/* i#107: handle segment register usage conflicts between app and dr:
* if the target fragment has an instr that updates the segment selector,
* update the corresponding information maintained by DR.
*/
if (INTERNAL_OPTION(mangle_app_seg) && TEST(FRAG_HAS_MOV_SEG, targetf->flags)) {
os_handle_mov_seg(dcontext, targetf->tag);
}
#endif
ASSERT(dr_get_isa_mode(dcontext) ==
FRAG_ISA_MODE(targetf->flags)
IF_X64(||
(dr_get_isa_mode(dcontext) == DR_ISA_IA32 &&
!FRAG_IS_32(targetf->flags) && DYNAMO_OPTION(x86_to_x64))));
if (TEST(FRAG_SHARED, targetf->flags))
fcache_enter = get_fcache_enter_shared_routine(dcontext);
else
fcache_enter = get_fcache_enter_private_routine(dcontext);
enter_fcache(
dcontext,
(fcache_enter_func_t)
/* DEFAULT_ISA_MODE as we want the ISA mode of our gencode */
convert_data_to_function(PC_AS_JMP_TGT(DEFAULT_ISA_MODE, (app_pc)fcache_enter)),
#ifdef AARCH64
/* Entry to fcache requires indirect branch. */
PC_AS_JMP_TGT(FRAG_ISA_MODE(targetf->flags), FCACHE_PREFIX_ENTRY_PC(targetf))
#else
PC_AS_JMP_TGT(FRAG_ISA_MODE(targetf->flags), FCACHE_ENTRY_PC(targetf))
#endif
);
#ifdef UNIX
if (dcontext->signals_pending > 0) {
/* i#2019: the fcache_enter generated code starts with a check for pending
* signals, allowing the signal handling code to simply queue signals that
* arrive in DR code and only attempt to unlink for interruption points known
* to be safe for unlinking.
*/
KSTOP_NOT_MATCHING(fcache_default);
dcontext->whereami = DR_WHERE_DISPATCH;
enter_couldbelinking(dcontext, NULL, true);
dcontext->next_tag = dcontext->asynch_target;
LOG(THREAD, LOG_DISPATCH, 2,
"Signal arrived while in DR: aborting fcache_enter; next_tag is " PFX "\n",
dcontext->next_tag);
STATS_INC(num_entrances_aborted);
trace_abort(dcontext);
receive_pending_signal(dcontext);
return false;
}
#endif
ASSERT_NOT_REACHED();
return false;
}
/* Enters the cache at the specified entrance routine to execute the
* target pc.
* Does not return.
* Caller must do a KSTART to avoid kstats stack mismatches.
* FIXME: only allow access to fcache_enter routine through here?
* Indirect routine needs special treatment for handle_callback_return
*/
static void
enter_fcache(dcontext_t *dcontext, fcache_enter_func_t entry, cache_pc pc)
{
ASSERT(!is_couldbelinking(dcontext));
ASSERT(entry != NULL);
ASSERT(pc != NULL);
ASSERT(check_should_be_protected(DATASEC_RARELY_PROT));
/* CANNOT hold any locks across cache execution, as our thread synch
* assumes none are held
*/
ASSERT_OWN_NO_LOCKS();
ASSERT(dcontext->try_except.try_except_state == NULL);
/* prepare to enter fcache */
LOG(THREAD, LOG_DISPATCH, 4, "fcache_enter = " PFX ", target = " PFX "\n", entry, pc);
set_fcache_target(dcontext, pc);
ASSERT(pc != NULL);
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times) {
/* prepare to enter fcache */
dcontext->prev_fragment = NULL;
/* top ten cache times */
dcontext->cache_frag_count = (uint64)0;
dcontext->cache_enter_time = get_time();
}
#endif
dcontext->whereami = DR_WHERE_FCACHE;
(*entry)(dcontext);
IF_WINDOWS(ASSERT_NOT_REACHED()); /* returns for signals on unix */
}
/* Handles special tags in DR or elsewhere that do interesting things.
* All PCs checked in here must be in DR or be BACK_TO_NATIVE_AFTER_SYSCALL.
* Does not return if we've hit a stopping point; otherwise returns with an
* updated next_tag for continued dispatch.
*/
static void
handle_special_tag(dcontext_t *dcontext)
{
if (native_exec_is_back_from_native(dcontext->next_tag)) {
/* This can happen if we start interpreting a native module. */
ASSERT(DYNAMO_OPTION(native_exec));
interpret_back_from_native(dcontext); /* updates next_tag */
}
if (is_stopping_point(dcontext, dcontext->next_tag) ||
/* We don't want this to be part of is_stopping_point() b/c we don't
* want bb building for state xl8 to look at it.
*/
dcontext->go_native) {
LOG(THREAD, LOG_INTERP, 1, "\n%s: thread " TIDFMT " returning to app @" PFX "\n",
dcontext->go_native ? "Requested to go native"
: "Found DynamoRIO stopping point",
d_r_get_thread_id(), dcontext->next_tag);
#ifdef DR_APP_EXPORTS
if (dcontext->next_tag == (app_pc)dr_app_stop)
send_all_other_threads_native();
#endif
dispatch_enter_native(dcontext);
ASSERT_NOT_REACHED(); /* noreturn */
}
}
#if defined(DR_APP_EXPORTS) || defined(UNIX)
static void
dispatch_at_stopping_point(dcontext_t *dcontext)
{
/* start/stop interface */
KSTOP_NOT_MATCHING(dispatch_num_exits);
/* if we stop in middle of tracing, thread-shared state may be messed
* up (e.g., monitor grabs fragment lock for unlinking),
* so abort the trace
*/
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_INTERP, 1, "squashing trace-in-progress\n");
trace_abort(dcontext);
}
LOG(THREAD, LOG_INTERP, 1, "\nappstart_cleanup: found stopping point\n");
# ifdef DEBUG
# ifdef DR_APP_EXPORTS
if (dcontext->next_tag == (app_pc)dynamo_thread_exit)
LOG(THREAD, LOG_INTERP, 1, "\t==dynamo_thread_exit\n");
else if (dcontext->next_tag == (app_pc)dynamorio_app_exit)
LOG(THREAD, LOG_INTERP, 1, "\t==dynamorio_app_exit\n");
else if (dcontext->next_tag == (app_pc)dr_app_stop)
LOG(THREAD, LOG_INTERP, 1, "\t==dr_app_stop\n");
else if (dcontext->next_tag == (app_pc)dr_app_stop_and_cleanup)
LOG(THREAD, LOG_INTERP, 1, "\t==dr_app_stop_and_cleanup\n");
else if (dcontext->next_tag == (app_pc)dr_app_stop_and_cleanup_with_stats)
LOG(THREAD, LOG_INTERP, 1, "\t==dr_app_stop_and_cleanup_with_stats\n");
# endif
# endif
/* XXX i#95: should we add an instrument_thread_detach_event()? */
# ifdef DR_APP_EXPORTS
/* not_under will be called by dynamo_shared_exit so skip it here. */
if (dcontext->next_tag != (app_pc)dr_app_stop_and_cleanup &&
dcontext->next_tag != (app_pc)dr_app_stop_and_cleanup_with_stats)
# endif
dynamo_thread_not_under_dynamo(dcontext);
dcontext->go_native = false;
}
#endif
/* Called when we reach an interpretation stopping point either for
* start/stop control of DR or for native_exec. In both cases we give up
* control and "go native", but we do not clean up the current thread,
* assuming we will either take control back, or the app will explicitly
* request we clean up.
*/
static void
dispatch_enter_native(dcontext_t *dcontext)
{
/* The new fcache_enter's clean dstack design makes it usable for
* entering native execution as well as the fcache.
*/
fcache_enter_func_t go_native =
(fcache_enter_func_t)convert_data_to_function(PC_AS_JMP_TGT(
DEFAULT_ISA_MODE, (app_pc)get_fcache_enter_gonative_routine(dcontext)));
set_last_exit(dcontext, (linkstub_t *)get_native_exec_linkstub());
ASSERT_OWN_NO_LOCKS();
if (dcontext->next_tag == BACK_TO_NATIVE_AFTER_SYSCALL) {
/* we're simply going native again after an intercepted syscall,
* not finalizing this thread or anything
*/
IF_WINDOWS(DEBUG_DECLARE(extern dcontext_t * early_inject_load_helper_dcontext;))
ASSERT(DYNAMO_OPTION(native_exec_syscalls)); /* else wouldn't have intercepted */
/* Assert here we have a reason for going back to native (-native_exec and
* non-empty native_exec_areas, RUNNING_WITHOUT_CODE_CACHE, hotp nudge thread
* pretending to be native while loading a dll, or on win2k
* early_inject_init() pretending to be native to find the inject address). */
ASSERT((DYNAMO_OPTION(native_exec) && native_exec_areas != NULL &&
!vmvector_empty(native_exec_areas)) ||
IF_WINDOWS((DYNAMO_OPTION(early_inject) &&
early_inject_load_helper_dcontext ==
get_thread_private_dcontext()) ||)
IF_HOTP(dcontext->nudge_thread ||)
/* clients requesting native execution come here */
IF_CLIENT_INTERFACE(dr_bb_hook_exists() ||) dcontext->currently_stopped ||
RUNNING_WITHOUT_CODE_CACHE());
ASSERT(dcontext->native_exec_postsyscall != NULL);
LOG(THREAD, LOG_ASYNCH, 1, "Returning to native " PFX " after a syscall\n",
dcontext->native_exec_postsyscall);
dcontext->next_tag =
PC_AS_JMP_TGT(dr_get_isa_mode(dcontext), dcontext->native_exec_postsyscall);
dcontext->native_exec_postsyscall = NULL;
LOG(THREAD, LOG_DISPATCH, 2,
"Entry into native_exec after intercepted syscall\n");
/* restore state as though never came out for syscall */
KSTOP_NOT_MATCHING_DC(dcontext, dispatch_num_exits);
#ifdef KSTATS
if (!dcontext->currently_stopped)
KSTART_DC(dcontext, fcache_default);
#endif
enter_nolinking(dcontext, NULL, true);
} else {
#if defined(DR_APP_EXPORTS) || defined(UNIX)
dispatch_at_stopping_point(dcontext);
enter_nolinking(dcontext, NULL, false);
#else
ASSERT_NOT_REACHED();
#endif
}
set_fcache_target(dcontext, dcontext->next_tag);
dcontext->whereami = DR_WHERE_APP;
#ifdef UNIX
do {
(*go_native)(dcontext);
/* If fcache_enter returns, there's a pending signal. It must
* be an alarm signal so we drop it as the simplest solution.
*/
ASSERT(dcontext->signals_pending);
dcontext->signals_pending = false;
} while (true);
#else
(*go_native)(dcontext);
#endif
ASSERT_NOT_REACHED();
}
static void
dispatch_enter_dynamorio(dcontext_t *dcontext)
{
/* We're transitioning to DynamoRIO from somewhere: either the fcache,
* the kernel (DR_WHERE_TRAMPOLINE), or the app itself via our start/stop API.
* N.B.: set whereami to DR_WHERE_APP iff this is the first d_r_dispatch() entry
* for this thread!
*/
dr_where_am_i_t wherewasi = dcontext->whereami;
#if defined(UNIX) && !defined(X64)
if (!(wherewasi == DR_WHERE_FCACHE || wherewasi == DR_WHERE_TRAMPOLINE ||
wherewasi == DR_WHERE_APP) &&
get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
/* This is probably our own syscalls hitting our own sysenter
* hook (PR 212570), since we're not completely user library
* independent (PR 206369).
* The primary calls I'm worried about are dl{open,close}.
* Note that we can't go jump to vsyscall_syscall_end_pc here b/c
* fcache_return cleared the dstack, so we can't really recover.
* We could put in a custom exit stub and return routine and recover,
* but we need to get library independent anyway so it's not worth it.
*/
/* PR 356503: clients using libraries that make syscalls can end up here */
IF_CLIENT_INTERFACE(found_client_sysenter());
ASSERT_BUG_NUM(
206369, false && "DR's own syscall (via user library) hit the sysenter hook");
}
#endif
ASSERT(wherewasi == DR_WHERE_FCACHE || wherewasi == DR_WHERE_TRAMPOLINE ||
wherewasi == DR_WHERE_APP ||
/* If the thread was waiting at check_wait_at_safe_point when getting
* suspended, we were in dispatch (ref i#3427). We will be here after the
* thread's context is being reset before sending it native.
*/
(dcontext->go_native && wherewasi == DR_WHERE_DISPATCH));
dcontext->whereami = DR_WHERE_DISPATCH;
ASSERT_LOCAL_HEAP_UNPROTECTED(dcontext);
ASSERT(check_should_be_protected(DATASEC_RARELY_PROT));
/* CANNOT hold any locks across cache execution, as our thread synch
* assumes none are held
*/
ASSERT_OWN_NO_LOCKS();
#if defined(UNIX) && defined(DEBUG)
/* i#238/PR 499179: check that libc errno hasn't changed */
/* w/ private loader, our errno is disjoint from app's */
if (IF_CLIENT_INTERFACE_ELSE(!INTERNAL_OPTION(private_loader), true))
dcontext->libc_errno = get_libc_errno();
os_enter_dynamorio();
#endif
DOLOG(2, LOG_INTERP, {
if (wherewasi == DR_WHERE_APP) {
LOG(THREAD, LOG_INTERP, 2, "\ninitial d_r_dispatch: target = " PFX "\n",
dcontext->next_tag);
dump_mcontext_callstack(dcontext);
dump_mcontext(get_mcontext(dcontext), THREAD, DUMP_NOT_XML);
}
});
/* We have to perform some tasks with last_exit early, before we
* become couldbelinking -- the rest are done in dispatch_exit_fcache().
* It's ok to de-reference last_exit since even though deleter may assume
* no one has ptrs to it, cannot delete until we're officially out of the
* cache, which doesn't happen until enter_couldbelinking -- still kind of
* messy that we're violating assumption of no ptrs...
*/
if (wherewasi == DR_WHERE_APP) { /* first entrance */
if (dcontext->last_exit == get_syscall_linkstub()) {
/* i#813: the app hit our post-sysenter hook while native.
* XXX: should we try to process ni syscalls here? But we're only
* seeing post- and not pre-.
*/
LOG(THREAD, LOG_INTERP, 2, "hit post-sysenter hook while native\n");
ASSERT(dcontext->currently_stopped || IS_CLIENT_THREAD(dcontext));
dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL;
dcontext->native_exec_postsyscall =
IF_UNIX_ELSE(vsyscall_sysenter_displaced_pc, vsyscall_syscall_end_pc);
} else {
ASSERT(dcontext->last_exit == get_starting_linkstub() ||
/* The start/stop API will set this linkstub. */
IF_APP_EXPORTS(dcontext->last_exit == get_native_exec_linkstub() ||)
/* new thread */
IF_WINDOWS_ELSE_0(dcontext->last_exit == get_asynch_linkstub()));
}
} else {
/* MUST be set, if only to a fake linkstub_t */
ASSERT(dcontext->last_exit != NULL);
/* cache last_exit's fragment */
dcontext->last_fragment = linkstub_fragment(dcontext, dcontext->last_exit);
/* If we exited from an indirect branch then dcontext->next_tag
* already has the next tag value; otherwise we must set it here,
* before we might dive back into the cache for a system call.
*/
if (LINKSTUB_DIRECT(dcontext->last_exit->flags)) {
if (INTERNAL_OPTION(cbr_single_stub)) {
linkstub_t *nxt = linkstub_shares_next_stub(
dcontext, dcontext->last_fragment, dcontext->last_exit);
if (nxt != NULL) {
/* must distinguish the two based on eflags */
dcontext->last_exit = linkstub_cbr_disambiguate(
dcontext, dcontext->last_fragment, dcontext->last_exit, nxt);
ASSERT(dcontext->last_fragment ==
linkstub_fragment(dcontext, dcontext->last_exit));
STATS_INC(cbr_disambiguations);
}
}
dcontext->next_tag =
EXIT_TARGET_TAG(dcontext, dcontext->last_fragment, dcontext->last_exit);
} else {
/* get src info from coarse ibl exit into the right place */
if (DYNAMO_OPTION(coarse_units)) {
if (is_ibl_sourceless_linkstub((const linkstub_t *)dcontext->last_exit))
set_coarse_ibl_exit(dcontext);
else if (DYNAMO_OPTION(use_persisted) &&
dcontext->last_exit == get_coarse_exit_linkstub()) {
/* i#670: for frozen unit, shift from persist-time mod base
* to use-time mod base
*/
coarse_info_t *info = dcontext->coarse_exit.dir_exit;
ASSERT(info != NULL);
if (info->mod_shift != 0 &&
dcontext->next_tag >= info->persist_base &&
dcontext->next_tag <
info->persist_base + (info->end_pc - info->base_pc))
dcontext->next_tag -= info->mod_shift;
}
}
}
dispatch_exit_fcache_stats(dcontext);
/* Maybe-permanent native transitions (dr_app_stop()) have to pop kstack,
* and thus so do temporary native_exec transitions. Thus, for neither
* is there anything to pop here.
*/
if (dcontext->last_exit != get_native_exec_linkstub() &&
dcontext->last_exit != get_native_exec_syscall_linkstub())
KSTOP_NOT_MATCHING(dispatch_num_exits);
}
/* KSWITCHed next time around for a better explanation */
KSTART_DC(dcontext, dispatch_num_exits);
if (wherewasi != DR_WHERE_APP) { /* if not first entrance */
if (get_at_syscall(dcontext))
handle_post_system_call(dcontext);
#ifdef X86
/* If the next basic block starts at a debug register value,
* we fire a single step exception before getting to the basic block. */
if (debug_register_fire_on_addr(dcontext->next_tag)) {
LOG(THREAD, LOG_DISPATCH, 2, "Generates single step before " PFX "\n",
dcontext->next_tag);
os_forge_exception(dcontext->next_tag, SINGLE_STEP_EXCEPTION);
ASSERT_NOT_REACHED();
}
#endif
/* A non-ignorable syscall or cb return ending a bb must be acted on
* We do it here to avoid becoming couldbelinking twice.
*
*/
if (exited_due_to_ni_syscall(dcontext)
IF_CLIENT_INTERFACE(|| instrument_invoke_another_syscall(dcontext))) {
handle_system_call(dcontext);
/* will return here if decided to skip the syscall; else, back to d_r_dispatch
*/
}
#ifdef WINDOWS
else if (TEST(LINK_CALLBACK_RETURN, dcontext->last_exit->flags)) {
handle_callback_return(dcontext);
ASSERT_NOT_REACHED();
}
#endif
#ifdef AARCH64
if (dcontext->last_exit == get_selfmod_linkstub()) {
app_pc begin = (app_pc)dcontext->local_state->spill_space.r2;
app_pc end = (app_pc)dcontext->local_state->spill_space.r3;
dcontext->next_tag = (app_pc)dcontext->local_state->spill_space.r4;
flush_fragments_from_region(dcontext, begin, end - begin, true,
NULL /*flush_completion_callback*/,
NULL /*user_data*/);
}
#endif
if (TEST(LINK_SPECIAL_EXIT, dcontext->last_exit->flags)) {
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_SELFMOD) {
/* Case 8177: If we have a flushed fragment hit a self-write, we
* cannot delete it in our self-write handler (b/c of case 3559's
* incoming links union). But, our self-write handler needs to be
* nolinking and needs to check sandbox2ro_threshold. So, we do our
* self-write check first, but we don't actually delete there for
* FRAG_WAS_DELETED fragments.
*/
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
/* this fragment overwrote its original memory image */
fragment_self_write(dcontext);
/* FIXME: optimize this to stay writable if we're going to
* be exiting d_r_dispatch as well -- no very quick check though
*/
SELF_PROTECT_LOCAL(dcontext, READONLY);
} else if (dcontext->upcontext.upcontext.exit_reason >=
EXIT_REASON_FLOAT_PC_FNSAVE &&
dcontext->upcontext.upcontext.exit_reason <=
EXIT_REASON_FLOAT_PC_XSAVE64) {
float_pc_update(dcontext);
STATS_INC(float_pc_from_dispatch);
/* Restore */
dcontext->upcontext.upcontext.exit_reason = EXIT_REASON_SELFMOD;
} else if (dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_SINGLE_STEP) {
/* Delete basic block to generate only one single step exception. */
ASSERT(!TEST(FRAG_SHARED, dcontext->last_fragment->flags));
fragment_delete(dcontext, dcontext->last_fragment, FRAGDEL_ALL);
/* Restore */
dcontext->upcontext.upcontext.exit_reason = EXIT_REASON_SELFMOD;
/* Forge single step exception with right address. */
os_forge_exception(dcontext->next_tag, SINGLE_STEP_EXCEPTION);
ASSERT_NOT_REACHED();
} else if (dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_RSEQ_ABORT) {
#ifdef LINUX
rseq_process_native_abort(dcontext);
#else
ASSERT_NOT_REACHED();
#endif
/* Unset the reason. */
dcontext->upcontext.upcontext.exit_reason = EXIT_REASON_SELFMOD;
} else {
/* When adding any new reason, be sure to clear exit_reason,
* as selfmod exits do not bother to set the reason field to
* 0 for performance reasons (they are assumed to be more common
* than any other "special exit").
*/
ASSERT_NOT_REACHED();
}
}
}
/* make sure to tell flushers that we are now going to be mucking
* with link info
*/
if (!enter_couldbelinking(dcontext, dcontext->last_fragment, true)) {
LOG(THREAD, LOG_DISPATCH, 2, "Just flushed last_fragment\n");
/* last_fragment flushed, but cannot access here to copy it
* to fake linkstub_t, so assert that callee did (either when freeing or
* when noticing pending deletion flag)
*/
ASSERT(LINKSTUB_FAKE(dcontext->last_exit));
}
if (wherewasi != DR_WHERE_APP) { /* if not first entrance */
/* now fully process the last cache exit as couldbelinking */
dispatch_exit_fcache(dcontext);
}
}
/* Processing of the last exit from the cache.
* Invariant: dcontext->last_exit != NULL, though it may be a sentinel (see below).
*
* Note that the last exit and its owning fragment may be _fake_, i.e., just
* a copy of the key fields we typically check, for the following cases:
* - last fragment was flushed: fully deleted at cache exit synch point
* - last fragment was deleted since it overwrote itself (selfmod)
* - last fragment was deleted since it was a private trace building copy
* - last fragment was deleted for other reasons?!?
* - briefly during trace emitting, nobody should care though
* - coarse grain fragment exits, for which we have no linkstub_t or other
* extraneous bookkeeping
*
* For some cases we do not currently keep the key fields at all:
* - last fragment was flushed: detected at write fault
* And some times we are unable to keep the key fields:
* - last fragment was flushed: targeted in ibl via target_deleted path
* These last two cases are the only exits from fragment for which we
* do not know the key fields. For the former, we exit in the middle of
* a fragment that was already created, so not knowing does not affect
* security policies or other checks much. The latter is the most problematic,
* as we have a number of checks depending on knowing the last exit when indirect.
*
* We have other types of exits from the cache that never involved a real
* fragment, for which we also use fake linkstubs:
* - no real last fragment: system call
* - no real last fragment: sigreturn
* - no real last fragment: native_exec return
* - callbacks clear last_exit, but should come out of the cache at a syscall
* (bug 2464 was back when tried to carry last_exit through syscall)
* so this will end up looking like the system call case
*/
static void
dispatch_exit_fcache(dcontext_t *dcontext)
{
/* case 7966: no distinction of islinking-ness for hotp_only & thin_client */
ASSERT(RUNNING_WITHOUT_CODE_CACHE() || is_couldbelinking(dcontext));
#if defined(WINDOWS) && defined(CLIENT_INTERFACE) && defined(DEBUG)
if (should_swap_teb_nonstack_fields()) {
ASSERT(!is_dynamo_address(dcontext->app_fls_data));
ASSERT(dcontext->app_fls_data == NULL ||
dcontext->app_fls_data != dcontext->priv_fls_data);
ASSERT(!is_dynamo_address(dcontext->app_nt_rpc));
ASSERT(dcontext->app_nt_rpc == NULL ||
dcontext->app_nt_rpc != dcontext->priv_nt_rpc);
ASSERT(!is_dynamo_address(dcontext->app_nls_cache));
ASSERT(!is_dynamo_address(dcontext->app_static_tls));
ASSERT(!is_dynamo_address(dcontext->app_stack_limit) ||
IS_CLIENT_THREAD(dcontext));
ASSERT(!is_dynamo_address((byte *)dcontext->app_stack_base - 1) ||
IS_CLIENT_THREAD(dcontext));
ASSERT((SWAP_TEB_STACKBASE() &&
is_dynamo_address((byte *)d_r_get_tls(TOP_STACK_TIB_OFFSET) - 1)) ||
(!SWAP_TEB_STACKBASE() &&
!is_dynamo_address((byte *)d_r_get_tls(TOP_STACK_TIB_OFFSET) - 1)));
ASSERT((SWAP_TEB_STACKLIMIT() &&
is_dynamo_address(d_r_get_tls(BASE_STACK_TIB_OFFSET))) ||
(!SWAP_TEB_STACKLIMIT() &&
!is_dynamo_address(d_r_get_tls(BASE_STACK_TIB_OFFSET))));
/* DrMi#1723: ensure client hitting app guard page updated TEB.StackLimit.
* Unfortunately this does happen with fiber code that updates TEB before
* swapping the stack in the next bb so we make it a curiosity.
*/
ASSERT_CURIOSITY_ONCE(
(SWAP_TEB_STACKLIMIT() &&
get_mcontext(dcontext)->xsp >= (reg_t)dcontext->app_stack_limit) ||
(!SWAP_TEB_STACKLIMIT() &&
get_mcontext(dcontext)->xsp >= (reg_t)d_r_get_tls(BASE_STACK_TIB_OFFSET)));
ASSERT(dcontext->app_nls_cache == NULL ||
dcontext->app_nls_cache != dcontext->priv_nls_cache);
ASSERT(dcontext->app_static_tls == NULL ||
dcontext->app_static_tls != dcontext->priv_static_tls);
}
#endif
if (LINKSTUB_INDIRECT(dcontext->last_exit->flags)) {
/* indirect branch exit processing */
#if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH)
/* PR 204770: use trace component bb tag for RCT source address */
app_pc src_tag = dcontext->last_fragment->tag;
if (!LINKSTUB_FAKE(dcontext->last_exit) &&
TEST(FRAG_IS_TRACE, dcontext->last_fragment->flags)) {
/* FIXME: should we call this for direct exits as well, up front? */
src_tag = get_trace_exit_component_tag(dcontext, dcontext->last_fragment,
dcontext->last_exit);
}
#endif
#ifdef RETURN_AFTER_CALL
/* This is the permission check for any new return target, it
* also double checks the findings of the indirect lookup
* routine
*/
if (dynamo_options.ret_after_call &&
TEST(LINK_RETURN, dcontext->last_exit->flags)) {
/* ret_after_call will raise a security violation on failure */
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
ret_after_call_check(dcontext, dcontext->next_tag, src_tag);
SELF_PROTECT_LOCAL(dcontext, READONLY);
}
#endif /* RETURN_AFTER_CALL */
#ifdef RCT_IND_BRANCH
/* permission check for any new indirect call or jump target */
/* we care to detect violations only if blocking or at least
* reporting the corresponding branch types
*/
if (TESTANY(OPTION_REPORT | OPTION_BLOCK, DYNAMO_OPTION(rct_ind_call)) ||
TESTANY(OPTION_REPORT | OPTION_BLOCK, DYNAMO_OPTION(rct_ind_jump))) {
if ((EXIT_IS_CALL(dcontext->last_exit->flags) &&
TESTANY(OPTION_REPORT | OPTION_BLOCK, DYNAMO_OPTION(rct_ind_call))) ||
(EXIT_IS_JMP(dcontext->last_exit->flags) &&
TESTANY(OPTION_REPORT | OPTION_BLOCK, DYNAMO_OPTION(rct_ind_jump)))) {
/* case 4995: current shared syscalls implementation
* reuses the indirect jump table and marks its
* fake linkstub as such.
*/
if (LINKSTUB_FAKE(dcontext->last_exit) /* quick check */ &&
IS_SHARED_SYSCALLS_LINKSTUB(dcontext->last_exit)) {
ASSERT(IF_WINDOWS_ELSE(DYNAMO_OPTION(shared_syscalls), false));
ASSERT(EXIT_IS_JMP(dcontext->last_exit->flags));
} else {
/* rct_ind_branch_check will raise a security violation on failure */
rct_ind_branch_check(dcontext, dcontext->next_tag, src_tag);
}
}
}
#endif /* RCT_IND_BRANCH */
/* update IBL target tables for any indirect branch exit */
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
/* update IBL target table if target is a valid IBT */
/* FIXME: This is good for modularity but adds
* extra lookups in the fragment table. If it is
* performance problem can we do it better?
* Probably best to get bb2bb to work better and
* not worry about optimizing DR code.
*/
fragment_add_ibl_target(dcontext, dcontext->next_tag,
extract_branchtype(dcontext->last_exit->flags));
/* FIXME: optimize this to stay writable if we're going to
* be building a bb as well -- no very quick check though
*/
SELF_PROTECT_LOCAL(dcontext, READONLY);
} /* LINKSTUB_INDIRECT */
/* ref bug 2323, we need monitor to restore last fragment now,
* before we break out of the loop to build a new fragment
* ASSUMPTION: all unusual cache exits (asynch events) abort the current
* trace, so this is the only place we need to restore anything.
* monitor_cache_enter() asserts that for us.
* NOTE : we wait till after the cache exit stats and logs to call
* monitor_cache_exit since it might change the flags of the last
* fragment and screw up the stats
*/
monitor_cache_exit(dcontext);
#ifdef SIDELINE
/* sideline synchronization */
if (dynamo_options.sideline) {
thread_id_t tid = d_r_get_thread_id();
if (pause_for_sideline == tid) {
d_r_mutex_lock(&sideline_lock);
if (pause_for_sideline == tid) {
LOG(THREAD, LOG_DISPATCH | LOG_THREADS | LOG_SIDELINE, 2,
"Thread %d waiting for sideline thread\n", tid);
signal_event(paused_for_sideline_event);
STATS_INC(num_wait_sideline);
wait_for_event(resume_from_sideline_event, 0);
d_r_mutex_unlock(&sideline_lock);
LOG(THREAD, LOG_DISPATCH | LOG_THREADS | LOG_SIDELINE, 2,
"Thread %d resuming after sideline thread\n", tid);
sideline_cleanup_replacement(dcontext);
} else
d_r_mutex_unlock(&sideline_lock);
}
}
#endif
#ifdef UNIX
if (dcontext->signals_pending) {
/* FIXME: can overflow app stack if stack up too many signals
* by interrupting prev handlers -- exacerbated by RAC lack of
* caching (case 1858), which causes a cache exit prior to
* executing every single sigreturn!
*/
receive_pending_signal(dcontext);
}
#endif
#ifdef CLIENT_INTERFACE
/* is ok to put the lock after the null check, this is only
* place they can be deleted
*/
if (dcontext->client_data != NULL && dcontext->client_data->to_do != NULL) {
client_todo_list_t *todo;
/* FIXME PR 200409: we're removing all API routines that use this
* todo list so we should never get here
*/
if (SHARED_FRAGMENTS_ENABLED()) {
USAGE_ERROR("CLIENT_INTERFACE incompatible with -shared_{bbs,traces}"
" at this time");
}
# ifdef CLIENT_SIDELINE
d_r_mutex_lock(&(dcontext->client_data->sideline_mutex));
# endif
todo = dcontext->client_data->to_do;
while (todo != NULL) {
client_todo_list_t *next_todo = todo->next;
fragment_t *f = fragment_lookup(dcontext, todo->tag);
if (f != NULL) {
if (todo->ilist != NULL) {
/* doing a replacement */
fragment_t *new_f;
uint orig_flags = f->flags;
void *vmlist = NULL;
DEBUG_DECLARE(bool ok;)
LOG(THREAD, LOG_INTERP, 3,
"Going to do a client fragment replacement at " PFX " F%d\n",
f->tag, f->id);
/* prevent emit from deleting f, we still need it */
/* FIXME: if f is shared we must hold change_linking_lock
* for the flags and vm area operations here
*/
ASSERT(!TEST(FRAG_SHARED, f->flags));
f->flags |= FRAG_CANNOT_DELETE;
DEBUG_DECLARE(ok =)
vm_area_add_to_list(dcontext, f->tag, &vmlist, orig_flags, f,
false /*no locks*/);
ASSERT(ok); /* should never fail for private fragments */
d_r_mangle(dcontext, todo->ilist, &f->flags, true, true);
/* mangle shouldn't change the flags here */
ASSERT(f->flags == (orig_flags | FRAG_CANNOT_DELETE));
new_f = emit_invisible_fragment(dcontext, todo->tag, todo->ilist,
orig_flags, vmlist);
f->flags = orig_flags; /* FIXME: ditto about change_linking_lock */
instrlist_clear_and_destroy(dcontext, todo->ilist);
fragment_copy_data_fields(dcontext, f, new_f);
shift_links_to_new_fragment(dcontext, f, new_f);
fragment_replace(dcontext, f, new_f);
DOLOG(2, LOG_INTERP, {
LOG(THREAD, LOG_INTERP, 3,
"Finished emitting replacement fragment %d\n", new_f->id);
disassemble_fragment(dcontext, new_f, d_r_stats->loglevel < 3);
});
}
/* delete [old] fragment */
if ((f->flags & FRAG_CANNOT_DELETE) == 0) {
uint actions;
LOG(THREAD, LOG_INTERP, 3, "Client deleting old F%d\n", f->id);
if (todo->ilist != NULL) {
/* for the fragment replacement case, the fragment should
* already be unlinked and removed from the hash table.
*/
actions = FRAGDEL_NO_UNLINK | FRAGDEL_NO_HTABLE;
} else {
actions = FRAGDEL_ALL;
}
fragment_delete(dcontext, f, actions);
STATS_INC(num_fragments_deleted_client);
} else {
LOG(THREAD, LOG_INTERP, 2, "Couldn't let client delete F%d\n", f->id);
}
} else {
LOG(THREAD, LOG_INTERP, 2,
"Failed to delete/replace fragment at tag " PFX
" because was already "
"deleted\n",
todo->tag);
}
HEAP_TYPE_FREE(dcontext, todo, client_todo_list_t, ACCT_CLIENT, PROTECTED);
todo = next_todo;
}
dcontext->client_data->to_do = NULL;
# ifdef CLIENT_SIDELINE
d_r_mutex_unlock(&(dcontext->client_data->sideline_mutex));
# endif
}
#endif /* CLIENT_INTERFACE */
}
/* stats and logs on why we exited the code cache */
static void
dispatch_exit_fcache_stats(dcontext_t *dcontext)
{
#if defined(DEBUG) || defined(KSTATS)
fragment_t *next_f;
fragment_t *last_f;
fragment_t coarse_f;
#endif
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times) {
int i, j;
uint64 end_time, total_time;
profile_fragment_dispatch(dcontext);
/* top ten cache times */
end_time = get_time();
total_time = end_time - dcontext->cache_enter_time;
for (i = 0; i < 10; i++) {
if (total_time > dcontext->cache_time[i]) {
/* insert */
for (j = 9; j > i; j--) {
dcontext->cache_time[j] = dcontext->cache_time[j - 1];
dcontext->cache_count[j] = dcontext->cache_count[j - 1];
}
dcontext->cache_time[i] = total_time;
dcontext->cache_count[i] = dcontext->cache_frag_count;
break;
}
}
}
#endif
#if defined(DEBUG) || defined(KSTATS)
STATS_INC(num_exits);
ASSERT(dcontext->last_exit != NULL);
/* special exits that aren't from real fragments */
if (dcontext->last_exit == get_syscall_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from system call\n");
STATS_INC(num_exits_syscalls);
# ifdef CLIENT_INTERFACE
/* PR 356503: clients using libraries that make syscalls, invoked from
* a clean call, will not trigger the whereami check below: so we
* locate here via mismatching kstat top-of-stack.
*/
KSTAT_THREAD(fcache_default, {
if (ks->node[ks->depth - 1].var == pv) {
found_client_sysenter();
}
});
# endif
KSTOP_NOT_PROPAGATED(syscall_fcache);
return;
} else if (dcontext->last_exit == get_selfmod_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from fragment via code mod\n");
STATS_INC(num_exits_code_mod_flush);
KSWITCH_STOP_NOT_PROPAGATED(fcache_default);
return;
} else if (dcontext->last_exit == get_ibl_deleted_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from fragment deleted but hit in ibl\n");
STATS_INC(num_exits_ibl_deleted);
KSWITCH_STOP_NOT_PROPAGATED(fcache_default);
return;
} else if (dcontext->last_exit == get_asynch_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from asynch event\n");
STATS_INC(num_exits_asynch);
/* w/ -shared_syscalls can also be a fragment kstart */
KSTOP_NOT_MATCHING_NOT_PROPAGATED(syscall_fcache);
return;
} else if (dcontext->last_exit == get_native_exec_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from native_exec execution\n");
STATS_INC(num_exits_native_exec);
/* may be a quite large kstat count */
KSWITCH_STOP_NOT_PROPAGATED(native_exec_fcache);
return;
} else if (dcontext->last_exit == get_native_exec_syscall_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from native_exec syscall trampoline\n");
STATS_INC(num_exits_native_exec_syscall);
/* may be a quite large kstat count */
# if defined(DEBUG) || defined(KSTATS)
/* Being native for the start/stop API is different from native_exec:
* the former has the kstack cleared, so there's nothing to stop here
* (xref i#813, i#1140).
*/
if (dcontext->currently_stopped)
LOG(THREAD, LOG_DISPATCH, 2, "Thread is start/stop native\n");
else
KSWITCH_STOP_NOT_PROPAGATED(native_exec_fcache);
# endif
return;
} else if (dcontext->last_exit == get_reset_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit due to %s\n",
dcontext->go_native ? "request to go native" : "proactive reset");
DOSTATS({
if (dcontext->go_native)
STATS_INC(num_exits_native);
else
STATS_INC(num_exits_reset);
});
KSWITCH_STOP_NOT_PROPAGATED(fcache_default);
return;
}
# ifdef WINDOWS
else if (IS_SHARED_SYSCALLS_UNLINKED_LINKSTUB(dcontext->last_exit)) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from unlinked shared syscall\n");
STATS_INC(num_unlinked_shared_syscalls_exits);
KSWITCH_STOP_NOT_PROPAGATED(fcache_default);
return;
} else if (IS_SHARED_SYSCALLS_LINKSTUB(dcontext->last_exit)) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from shared syscall (%s)\n",
IS_SHARED_SYSCALLS_TRACE_LINKSTUB(dcontext->last_exit) ? "trace" : "bb");
DOSTATS({
if (IS_SHARED_SYSCALLS_TRACE_LINKSTUB(dcontext->last_exit))
STATS_INC(num_shared_syscalls_trace_exits);
else
STATS_INC(num_shared_syscalls_bb_exits);
});
KSWITCH_STOP_NOT_PROPAGATED(fcache_default);
return;
}
# endif
# ifdef HOT_PATCHING_INTERFACE
else if (dcontext->last_exit == get_hot_patch_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from hot patch routine\n");
STATS_INC(num_exits_hot_patch);
KSWITCH_STOP_NOT_PROPAGATED(fcache_default);
return;
}
# endif
# ifdef CLIENT_INTERFACE
else if (dcontext->last_exit == get_client_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from client redirection\n");
STATS_INC(num_exits_client_redirect);
KSWITCH_STOP_NOT_PROPAGATED(fcache_default);
return;
}
# endif
/* normal exits from real fragments, though the last_fragment may
* be deleted and we are working off a copy of its important fields
*/
/* FIXME: this lookup is needed for KSTATS and STATS_*. STATS_* are only
* printed at loglevel 1, but maintained at loglevel 0, and if
* we want an external agent to examine them at 0 we will want
* to keep this...leaving for now
*/
next_f = fragment_lookup_fine_and_coarse(dcontext, dcontext->next_tag, &coarse_f,
dcontext->last_exit);
last_f = dcontext->last_fragment;
DOKSTATS({
/* FIXME (case 4988): read top of kstats stack to get src
* type, and then split by last_fragment type as well
*/
KSWITCH_STOP_NOT_PROPAGATED(fcache_default);
});
if (is_ibl_sourceless_linkstub((const linkstub_t *)dcontext->last_exit)) {
if (DYNAMO_OPTION(coarse_units)) {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from coarse ibl from tag " PFX ": %s %s",
dcontext->coarse_exit.src_tag,
TEST(FRAG_IS_TRACE, last_f->flags) ? "trace" : "bb",
TEST(LINK_RETURN, dcontext->last_exit->flags)
? "ret"
: EXIT_IS_CALL(dcontext->last_exit->flags) ? "call*" : "jmp*");
} else {
/* We can get here for -indirect_stubs via client special ibl */
LOG(THREAD, LOG_DISPATCH, 2, "Exit from sourceless ibl: %s %s",
TEST(FRAG_IS_TRACE, last_f->flags) ? "trace" : "bb",
TEST(LINK_RETURN, dcontext->last_exit->flags)
? "ret"
: EXIT_IS_CALL(dcontext->last_exit->flags) ? "call*" : "jmp*");
}
} else if (dcontext->last_exit == get_coarse_exit_linkstub()) {
DOLOG(2, LOG_DISPATCH, {
coarse_info_t *info = dcontext->coarse_exit.dir_exit;
cache_pc stub;
ASSERT(info != NULL); /* though not initialized to NULL... */
stub = coarse_stub_lookup_by_target(dcontext, info, dcontext->next_tag);
LOG(THREAD, LOG_DISPATCH, 2,
"Exit from sourceless coarse-grain fragment via stub " PFX "\n", stub);
});
/* FIXME: this stat is not mutually exclusive of reason-for-exit stats */
STATS_INC(num_exits_coarse);
} else if (dcontext->last_exit == get_coarse_trace_head_exit_linkstub()) {
LOG(THREAD, LOG_DISPATCH, 2,
"Exit from sourceless coarse-grain fragment targeting trace head");
/* FIXME: this stat is not mutually exclusive of reason-for-exit stats */
STATS_INC(num_exits_coarse_trace_head);
} else {
LOG(THREAD, LOG_DISPATCH, 2, "Exit from F%d(" PFX ")." PFX, last_f->id,
last_f->tag, EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit));
}
DOSTATS({
if (TEST(FRAG_IS_TRACE, last_f->flags))
STATS_INC(num_trace_exits);
else
STATS_INC(num_bb_exits);
});
LOG(THREAD, LOG_DISPATCH, 2, "%s%s",
IF_X64_ELSE(FRAG_IS_32(last_f->flags) ? " (32-bit)" : "", ""),
TEST(FRAG_SHARED, last_f->flags) ? " (shared)" : "");
DOLOG(2, LOG_SYMBOLS, {
char symbuf[MAXIMUM_SYMBOL_LENGTH];
print_symbolic_address(last_f->tag, symbuf, sizeof(symbuf), true);
LOG(THREAD, LOG_SYMBOLS, 2, "\t%s\n", symbuf);
});
# if defined(DEBUG) && defined(DGC_DIAGNOSTICS)
if (TEST(FRAG_DYNGEN, last_f->flags) && !is_dyngen_vsyscall(last_f->tag)) {
char buf[MAXIMUM_SYMBOL_LENGTH];
bool stack = is_address_on_stack(dcontext, last_f->tag);
app_pc translated_pc;
print_symbolic_address(dcontext->next_tag, buf, sizeof(buf), false);
LOG(THREAD, LOG_DISPATCH, 1,
"Exit from dyngen F%d(" PFX "%s%s) w/ %s targeting " PFX " %s:", last_f->id,
last_f->tag, stack ? " stack" : "",
(last_f->flags & FRAG_DYNGEN_RESTRICTED) != 0 ? " BAD" : "",
LINKSTUB_DIRECT(dcontext->last_exit->flags) ? "db" : "ib", dcontext->next_tag,
buf);
/* FIXME: risky if last fragment is deleted -- should check for that
* here and instead just print type from last_exit, since recreate
* may fail
*/
translated_pc = recreate_app_pc(
dcontext, EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit),
dcontext->last_fragment);
if (translated_pc != NULL) {
disassemble(dcontext, translated_pc, THREAD);
LOG(THREAD, LOG_DISPATCH, 1, "\n");
}
DOLOG(stack ? 1U : 2U, LOG_DISPATCH, {
LOG(THREAD, LOG_DISPATCH, 1, "DGC bb:\n");
disassemble_app_bb(dcontext, last_f->tag, THREAD);
});
}
# endif /* defined(DEBUG) && defined(DGC_DIAGNOSTICS) */
if (LINKSTUB_INDIRECT(dcontext->last_exit->flags)) {
# ifdef RETURN_AFTER_CALL
bool ok = false;
# endif
STATS_INC(num_exits_ind_total);
if (next_f == NULL) {
LOG(THREAD, LOG_DISPATCH, 2, " (target " PFX " not in cache)",
dcontext->next_tag);
STATS_INC(num_exits_ind_good_miss);
KSWITCH(num_exits_ind_good_miss);
} else if (is_building_trace(dcontext) &&
!TEST(LINK_LINKED, dcontext->last_exit->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (in trace-building mode)");
STATS_INC(num_exits_ind_trace_build);
} else if (TEST(FRAG_WAS_DELETED, last_f->flags) || !INTERNAL_OPTION(link_ibl)) {
LOG(THREAD, LOG_DISPATCH, 2, " (src unlinked)");
STATS_INC(num_exits_ind_src_unlinked);
} else {
LOG(THREAD, LOG_DISPATCH, 2,
" (target " PFX " in cache but not lookup table)", dcontext->next_tag);
STATS_INC(num_exits_ind_bad_miss);
if (TEST(FRAG_IS_TRACE, last_f->flags)) {
STATS_INC(num_exits_ind_bad_miss_trace);
if (next_f && TEST(FRAG_IS_TRACE, next_f->flags)) {
STATS_INC(num_exits_ind_bad_miss_trace2trace);
KSWITCH(num_exits_ind_bad_miss_trace2trace);
} else if (next_f && !TEST(FRAG_IS_TRACE, next_f->flags)) {
if (!TEST(FRAG_IS_TRACE_HEAD, next_f->flags)) {
STATS_INC(num_exits_ind_bad_miss_trace2bb_nth);
KSWITCH(num_exits_ind_bad_miss_trace2bb_nth);
} else {
STATS_INC(num_exits_ind_bad_miss_trace2bb_th);
KSWITCH(num_exits_ind_bad_miss_trace2bb_th);
}
}
} else {
STATS_INC(num_exits_ind_bad_miss_bb);
if (next_f && TEST(FRAG_IS_TRACE, next_f->flags)) {
STATS_INC(num_exits_ind_bad_miss_bb2trace);
KSWITCH(num_exits_ind_bad_miss_bb2trace);
} else if (next_f && !TEST(FRAG_IS_TRACE, next_f->flags)) {
DOSTATS({
if (TEST(FRAG_IS_TRACE_HEAD, next_f->flags))
STATS_INC(num_exits_ind_bad_miss_bb2bb_th);
});
STATS_INC(num_exits_ind_bad_miss_bb2bb);
KSWITCH(num_exits_ind_bad_miss_bb2bb);
}
}
}
DOSTATS({
if (!TEST(FRAG_IS_TRACE, last_f->flags))
STATS_INC(num_exits_ind_non_trace);
});
# ifdef RETURN_AFTER_CALL
/* split by ind branch type */
if (TEST(LINK_RETURN, dcontext->last_exit->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (return from " PFX " non-trace tgt " PFX ")",
EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit),
dcontext->next_tag);
STATS_INC(num_exits_ret);
DOSTATS({
if (TEST(FRAG_IS_TRACE, last_f->flags))
STATS_INC(num_exits_ret_trace);
});
} else if (TESTANY(LINK_CALL | LINK_JMP, dcontext->last_exit->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (ind %s from " PFX " non-trace tgt " PFX ")",
EXIT_IS_CALL(dcontext->last_exit->flags) ? "call" : "jmp",
EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit),
dcontext->next_tag);
DOSTATS({
if (EXIT_IS_CALL(dcontext->last_exit->flags)) {
STATS_INC(num_exits_ind_call);
} else if (EXIT_IS_JMP(dcontext->last_exit->flags)) {
STATS_INC(num_exits_ind_jmp);
} else
ASSERT_NOT_REACHED();
});
} else if (!ok) {
LOG(THREAD, LOG_DISPATCH, 2,
"WARNING: unknown indirect exit from " PFX ", in %s fragment " PFX,
EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit),
(TEST(FRAG_IS_TRACE, last_f->flags)) ? "trace" : "bb", last_f);
STATS_INC(num_exits_ind_unknown);
ASSERT_NOT_REACHED();
}
# endif /* RETURN_AFTER_CALL */
} else { /* DIRECT LINK */
ASSERT(LINKSTUB_DIRECT(dcontext->last_exit->flags) ||
IS_COARSE_LINKSTUB(dcontext->last_exit));
if (exited_due_to_ni_syscall(dcontext)) {
LOG(THREAD, LOG_DISPATCH, 2, " (block ends with syscall)");
STATS_INC(num_exits_dir_syscall);
/* FIXME: it doesn't matter whether next_f exists or not we're still in "
* a syscall
*/
KSWITCH(num_exits_dir_syscall);
}
# ifdef WINDOWS
else if (TEST(LINK_CALLBACK_RETURN, dcontext->last_exit->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (block ends with callback return)");
STATS_INC(num_exits_dir_cbret);
}
# endif
else if (next_f == NULL) {
LOG(THREAD, LOG_DISPATCH, 2, " (target " PFX " not in cache)",
dcontext->next_tag);
STATS_INC(num_exits_dir_miss);
KSWITCH(num_exits_dir_miss);
}
/* for SHARED_FRAGMENTS_ENABLED(), we do not grab the change_linking_lock
* for our is_linkable call since that leads to a lot of
* contention (and we don't want to go to a read-write model
* when most uses, and all non-debug uses, are writes!).
* instead, since we don't want to change state, we have no synch
* at all, which is ok since the state could have changed anyway
* (see comment at end of cases below)
*/
# ifdef DEBUG
else if (IS_COARSE_LINKSTUB(dcontext->last_exit)) {
LOG(THREAD, LOG_DISPATCH, 2, " (not lazily linked yet)");
} else if (!is_linkable(dcontext, dcontext->last_fragment, dcontext->last_exit,
next_f, false /*don't own link lock*/,
false /*do not change trace head state*/)) {
STATS_INC(num_exits_dir_nolink);
LOG(THREAD, LOG_DISPATCH, 2, " (cannot link F%d->F%d)", last_f->id,
next_f->id);
if (is_building_trace(dcontext) &&
!TEST(LINK_LINKED, dcontext->last_exit->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (in trace-building mode)");
STATS_INC(num_exits_dir_trace_build);
}
# ifndef TRACE_HEAD_CACHE_INCR
else if (TEST(FRAG_IS_TRACE_HEAD, next_f->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (target F%d is trace head)", next_f->id);
STATS_INC(num_exits_dir_trace_head);
}
# endif
else if ((last_f->flags & FRAG_SHARED) != (next_f->flags & FRAG_SHARED)) {
LOG(THREAD, LOG_DISPATCH, 2, " (cannot link shared to private)",
last_f->id, next_f->id);
STATS_INC(num_exits_dir_nolink_sharing);
}
# ifdef DGC_DIAGNOSTICS
else if ((next_f->flags & FRAG_DYNGEN) != (last_f->flags & FRAG_DYNGEN)) {
LOG(THREAD, LOG_DISPATCH, 2, " (cannot link DGC to non-DGC)", last_f->id,
next_f->id);
}
# endif
else if (INTERNAL_OPTION(nolink)) {
LOG(THREAD, LOG_DISPATCH, 2, " (nolink option is on)");
} else if (!TEST(FRAG_LINKED_OUTGOING, last_f->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (F%d is unlinked-out)", last_f->id);
} else if (!TEST(FRAG_LINKED_INCOMING, next_f->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (F%d is unlinked-in)", next_f->id);
} else {
LOG(THREAD, LOG_DISPATCH, 2, " (unknown reason)");
/* link info could have changed after we exited cache so
* this is probably not a problem, not much we can do
* to distinguish race from real problem, so no assertion.
* race can happen even w/ single_thread_in_DR.
*/
STATS_INC(num_exits_dir_race);
}
}
# ifdef TRACE_HEAD_CACHE_INCR
else if (TEST(FRAG_IS_TRACE_HEAD, next_f->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (trace head F%d now hot!)", next_f->id);
STATS_INC(num_exits_dir_trace_hot);
}
# endif
else if (TEST(FRAG_IS_TRACE, next_f->flags) && TEST(FRAG_SHARED, last_f->flags)) {
LOG(THREAD, LOG_DISPATCH, 2,
" (shared trace head shadowed by private trace F%d)", next_f->id);
STATS_INC(num_exits_dir_nolink_sharing);
} else if (dcontext->next_tag == last_f->tag && next_f != last_f) {
/* invisible emission and replacement */
LOG(THREAD, LOG_DISPATCH, 2, " (self-loop in F%d, replaced by F%d)",
last_f->id, next_f->id);
STATS_INC(num_exits_dir_self_replacement);
}
# ifdef UNIX
else if (dcontext->signals_pending) {
/* this may not always be the reason...the interrupted fragment
* field is modularly hidden in unix/signal.c though
*/
LOG(THREAD, LOG_DISPATCH, 2, " (interrupted by delayable signal)");
STATS_INC(num_exits_dir_signal);
}
# endif
else if (TEST(FRAG_COARSE_GRAIN, next_f->flags) &&
!TEST(FRAG_COARSE_GRAIN, last_f->flags)) {
LOG(THREAD, LOG_DISPATCH, 2, " (fine fragment targeting coarse trace head)");
/* FIXME: We would assert that FRAG_IS_TRACE_HEAD is set, but
* we have no way of setting that up for fine to coarse links
*/
/* stats are done in monitor_cache_enter() */
} else {
LOG(THREAD, LOG_DISPATCH, 2, " (UNKNOWN DIRECT EXIT F%d." PFX "->F%d)",
last_f->id, EXIT_CTI_PC(dcontext->last_fragment, dcontext->last_exit),
next_f->id);
/* link info could have changed after we exited cache so
* this is probably not a problem, not much we can do
* to distinguish race from real problem, so no assertion.
* race can happen even w/ single_thread_in_DR.
*/
STATS_INC(num_exits_dir_race);
}
# endif /* DEBUG */
}
if (dcontext->last_exit == get_deleted_linkstub(dcontext)) {
LOG(THREAD, LOG_DISPATCH, 2, " (fragment was flushed)");
}
LOG(THREAD, LOG_DISPATCH, 2, "\n");
DOLOG(5, LOG_DISPATCH,
{ dump_mcontext(get_mcontext(dcontext), THREAD, DUMP_NOT_XML); });
DOLOG(6, LOG_DISPATCH, { dump_mcontext_callstack(dcontext); });
DOKSTATS({ DOLOG(6, LOG_DISPATCH, { kstats_dump_stack(dcontext); }); });
#endif /* defined(DEBUG) || defined(KSTATS) */
}
/***************************************************************************
* SYSTEM CALLS
*/
#ifdef UNIX
static void
adjust_syscall_continuation(dcontext_t *dcontext)
{
/* PR 212570: for linux sysenter, we hooked the sysenter return-to-user-mode
* point to go to post-do-vsyscall. So we end up here w/o any extra
* work pre-syscall; and since we put the hook-displaced code in the nop
* space immediately after the sysenter instr, which is our normal
* continuation pc, we have no work to do here either (except for
* 4.4.8+ kernels: i#1939)!
*/
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
# ifdef MACOS
if (!dcontext->sys_was_int) {
priv_mcontext_t *mc = get_mcontext(dcontext);
LOG(THREAD, LOG_SYSCALLS, 3,
"post-sysenter: xdx + asynch_target => " PFX " (were " PFX ", " PFX ")\n",
dcontext->app_xdx, mc->xdx, dcontext->asynch_target);
mc->xdx = dcontext->app_xdx;
dcontext->asynch_target = (app_pc)mc->xdx;
}
# else
/* we still see some int syscalls (for SYS_clone in particular) */
ASSERT(dcontext->sys_was_int ||
dcontext->asynch_target == vsyscall_syscall_end_pc ||
/* dr_syscall_invoke_another() hits this */
dcontext->asynch_target == vsyscall_sysenter_displaced_pc);
/* i#1939: we do need to adjust for 4.4.8+ kernels */
if (!dcontext->sys_was_int && vsyscall_sysenter_displaced_pc != NULL) {
dcontext->asynch_target = vsyscall_sysenter_displaced_pc;
LOG(THREAD, LOG_SYSCALLS, 3, "%s: asynch_target => " PFX "\n", __FUNCTION__,
dcontext->asynch_target);
}
# endif
} else if (vsyscall_syscall_end_pc != NULL &&
/* PR 341469: 32-bit apps (LOL64) on AMD hardware have
* OP_syscall in a vsyscall page
*/
get_syscall_method() != SYSCALL_METHOD_SYSCALL) {
/* If we fail to hook we currently bail out to int; but we then
* need to manually jump to the sysenter return point.
* Once we have PR 288330 we can remove this.
*/
if (dcontext->asynch_target == vsyscall_syscall_end_pc) {
ASSERT(vsyscall_sysenter_return_pc != NULL);
dcontext->asynch_target = vsyscall_sysenter_return_pc;
LOG(THREAD, LOG_SYSCALLS, 3, "%s: asynch_target => " PFX "\n", __FUNCTION__,
dcontext->asynch_target);
}
}
}
#endif
/* used to execute a system call instruction in the code cache
* dcontext->next_tag is store elsewhere and restored after the system call
* for resumption of execution post-syscall
*/
void
handle_system_call(dcontext_t *dcontext)
{
fcache_enter_func_t fcache_enter = get_fcache_enter_private_routine(dcontext);
app_pc do_syscall = (app_pc)get_do_syscall_entry(dcontext);
#ifdef CLIENT_INTERFACE
bool execute_syscall = true;
priv_mcontext_t *mc = get_mcontext(dcontext);
int sysnum = os_normalized_sysnum((int)MCXT_SYSNUM_REG(mc), NULL, dcontext);
#endif
app_pc saved_next_tag = dcontext->next_tag;
bool repeat = false;
#ifdef WINDOWS
/* make sure to ask about syscall before pre_syscall, which will swap new mc in! */
bool use_prev_dcontext = is_cb_return_syscall(dcontext);
#elif defined(X86)
if (TEST(LINK_NI_SYSCALL_INT, dcontext->last_exit->flags)) {
LOG(THREAD, LOG_SYSCALLS, 2, "Using do_int_syscall\n");
do_syscall = (app_pc)get_do_int_syscall_entry(dcontext);
/* last_exit will be for the syscall so set a flag (could alternatively
* set up a separate exit stub but this is simpler) */
dcontext->sys_was_int = true;
# ifdef VMX86_SERVER
if (is_vmkuw_sysnum(mc->xax)) {
/* Even w/ syscall # shift int80 => ENOSYS */
do_syscall = get_do_vmkuw_syscall_entry(dcontext);
LOG(THREAD, LOG_SYSCALLS, 2, "Using do_vmkuw_syscall\n");
}
# endif
} else if (TEST(LINK_SPECIAL_EXIT, dcontext->last_exit->flags)) {
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_NI_SYSCALL_INT_0x81)
do_syscall = (app_pc)get_do_int81_syscall_entry(dcontext);
else {
ASSERT(dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_NI_SYSCALL_INT_0x82);
do_syscall = (app_pc)get_do_int82_syscall_entry(dcontext);
}
dcontext->sys_was_int = true;
} else {
dcontext->sys_was_int = false;
IF_NOT_X64(IF_VMX86(ASSERT(!is_vmkuw_sysnum(mc->xax))));
}
#endif
#ifdef CLIENT_INTERFACE
/* We invoke here rather than inside pre_syscall() primarily so we can
* set use_prev_dcontext(), but also b/c the windows and linux uses
* are identical. We do want this prior to xbp-param changes for linux
* sysenter-to-int (PR 313715) since to client this should
* look like the original sysenter. For Windows we could put this
* after sysenter handling but it's not clear which is better: we'll
* assert if client changes xsp/xdx but that's fine.
*/
/* set pc so client can tell where syscall invoked from.
* note that this is pc _after_ syscall instr.
*/
get_mcontext(dcontext)->pc = get_fcache_target(dcontext);
/* i#202: ignore native syscalls in early_inject_init() */
if (IF_WINDOWS(dynamo_initialized &&) !instrument_pre_syscall(dcontext, sysnum)) {
/* we won't execute post-syscall so we do not need to store
* dcontext->sys_*
*/
execute_syscall = false;
LOG(THREAD, LOG_SYSCALLS, 2, "skipping syscall %d on client request\n",
MCXT_SYSNUM_REG(mc));
}
# ifdef WINDOWS
/* re-set in case client changed the number */
use_prev_dcontext = is_cb_return_syscall(dcontext);
# endif
#endif
/* some syscalls require modifying local memory
* FIXME: move this unprot down to those syscalls to avoid unprot-prot-unprot-prot
* with the new clean dstack design -- though w/ shared_syscalls perhaps most
* syscalls coming through here will need this
*/
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
KSWITCH(num_exits_dir_syscall); /* encapsulates syscall overhead */
LOG(THREAD, LOG_SYSCALLS, 2,
"Entry into do_syscall to execute a non-ignorable system call\n");
#ifdef SIDELINE
/* clear cur-trace field so we don't think cur trace is still running */
sideline_trace = NULL;
#endif
/* our flushing design assumes our syscall handlers are nolinking,
* to avoid multiple-flusher deadlocks
*/
ASSERT(!is_couldbelinking(dcontext));
/* we need to store the next pc since entering the fcache will clobber it
* with the do_syscall entry point.
* we store in a dcontext slot since some syscalls need to view or modify it
* (the asynch ones: sigreturn, ntcontinue, etc., hence the name asynch_target).
* Yes this works with an NtContinue being interrupted in the kernel for an APC --
* we want to know the NtContinue target, there is no other target to remember.
* The only problem is if a syscall that modifies asynch_target fails -- then we
* want the old value, so we store it here.
*/
dcontext->asynch_target = get_fcache_target(dcontext);
#ifdef WINDOWS
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
/* kernel sends control directly to 0x7ffe0304 so we need
* to mangle the return address
*/
/* Ref case 5461 - edx will become top of stack post-syscall */
ASSERT(get_mcontext(dcontext)->xsp == get_mcontext(dcontext)->xdx);
# ifdef HOT_PATCHING_INTERFACE
/* For hotp_only, vsyscall_syscall_end_pc can be NULL as dr will never
* interp a system call. Also, for hotp_only, control can came here
* from native only to do a syscall that was hooked.
*/
ASSERT(!DYNAMO_OPTION(hotp_only) ||
(DYNAMO_OPTION(hotp_only) &&
dcontext->next_tag == BACK_TO_NATIVE_AFTER_SYSCALL));
# else
ASSERT(vsyscall_syscall_end_pc != NULL || get_os_version() >= WINDOWS_VERSION_8);
# endif
/* NOTE - the stack mangling must match that of intercept_nt_continue()
* and shared_syscall as not all routines looking at the stack
* differentiate. */
if (dcontext->asynch_target == vsyscall_syscall_end_pc ||
/* win8 x86 syscalls have inlined sysenter routines */
(get_os_version() >= WINDOWS_VERSION_8 &&
dcontext->thread_record->under_dynamo_control)) {
# ifdef HOT_PATCHING_INTERFACE
/* Don't expect to be here for -hotp_only */
ASSERT_CURIOSITY(!DYNAMO_OPTION(hotp_only));
# endif
ASSERT(dcontext->next_tag != BACK_TO_NATIVE_AFTER_SYSCALL);
/* currently pc is the ret after sysenter, we need it to be the return point
* (the ret after the call to the vsyscall sysenter)
* we do not need to keep the old asynch_target -- if we decide not to do
* the syscall we just have to pop the retaddr
*/
dcontext->asynch_target = *((app_pc *)get_mcontext(dcontext)->xsp);
ASSERT(dcontext->thread_record->under_dynamo_control);
} else {
/* else, special case like native_exec_syscall */
LOG(THREAD, LOG_ALL, 2, "post-sysenter target is non-vsyscall " PFX "\n",
dcontext->asynch_target);
ASSERT(DYNAMO_OPTION(native_exec_syscalls) &&
!dcontext->thread_record->under_dynamo_control);
}
/* FIXME A lack of write access to %esp will generate an exception
* originating from DR though it's really an app problem (unless we
* screwed up wildly). Should we call is_writeable(%esp) and force
* a new UNWRITEABLE_MEMORY_EXECUTION_EXCEPTION so that we don't take
* the blame?
*/
if (DYNAMO_OPTION(sygate_sysenter)) {
/* So stack looks like
* esp +0 app_ret_addr
* +4 app_val1
* for the case 5441 Sygate hack the sysenter needs to have a ret
* address that's in ntdll.dll, but we also need to redirect
* control back to do_syscall. So we mangle to
* esp +0 sysenter_ret_address (ret in ntdll)
* +4 after_do_syscall
* dc->sysenter_storage app_val1
* dc->asynch_target app_ret_addr
* After do_syscall we push app_val1 (since stack is popped twice)
* and send control to asynch_target (implicitly doing the
* post_sysenter ret instr).
*/
dcontext->sysenter_storage =
*((app_pc *)(get_mcontext(dcontext)->xsp + XSP_SZ));
*((app_pc *)get_mcontext(dcontext)->xsp) = sysenter_ret_address;
*((app_pc *)(get_mcontext(dcontext)->xsp + XSP_SZ)) =
after_do_syscall_code(dcontext);
} else {
*((app_pc *)get_mcontext(dcontext)->xsp) = after_do_syscall_code(dcontext);
}
}
#endif
#ifdef MACOS
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && !dcontext->sys_was_int) {
/* The kernel returns control to whatever user-mode places in edx.
* We want to put this in even if we skip the syscall as we'll still call
* adjust_syscall_continuation for a skip.
*/
byte *post_sysenter = after_do_syscall_addr(dcontext);
priv_mcontext_t *mc = get_mcontext(dcontext);
dcontext->app_xdx = mc->xdx;
mc->xdx = (reg_t)post_sysenter;
}
#endif
/* first do the pre-system-call */
if (IF_CLIENT_INTERFACE(execute_syscall &&) pre_system_call(dcontext)) {
/* now do the actual syscall instruction */
#ifdef UNIX
/* FIXME: move into some routine inside unix/?
* if so, move #include of sys/syscall.h too
*/
if (is_thread_create_syscall(dcontext)) {
/* Code for after clone is in generated code do_clone_syscall. */
do_syscall = (app_pc)get_do_clone_syscall_entry(dcontext);
} else if (is_sigreturn_syscall(dcontext)) {
/* HACK: sigreturn goes straight to fcache_return, which expects
* app eax to already be in mcontext. pre-syscall cannot do that since
* do_syscall needs the syscall num in eax!
* so we have to do it here (alternative is to be like NtContinue handling
* with a special entry point, ends up being same sort of thing as here)
*/
/* pre-sigreturn handler put dest eax in next_tag
* save it in sys_param1, which is not used already in pre/post
*/
/* for CLIENT_INTERFACE, pre-sigreturn handler took eax after
* client had chance to change it, so we have the proper value here.
*/
dcontext->sys_param1 = (reg_t)dcontext->next_tag;
LOG(THREAD, LOG_SYSCALLS, 3, "for sigreturn, set sys_param1 to " PFX "\n",
dcontext->sys_param1);
}
#else
if (use_prev_dcontext) {
/* get the current, but now swapped out, dcontext */
dcontext_t *tmp_dcontext = dcontext;
LOG(THREAD, LOG_SYSCALLS, 1, "handling a callback return\n");
dcontext = get_prev_swapped_dcontext(tmp_dcontext);
LOG(THREAD, LOG_SYSCALLS, 1, "swapped dcontext from " PFX " to " PFX "\n",
tmp_dcontext, dcontext);
/* we have special fcache_enter that uses different dcontext,
* FIXME: but what if syscall fails? need to unswap dcontexts!
*/
fcache_enter = get_fcache_enter_indirect_routine(dcontext);
/* avoid synch errors with d_r_dispatch -- since enter_fcache will set
* whereami for prev dcontext, not real one!
*/
tmp_dcontext->whereami = DR_WHERE_FCACHE;
}
#endif
SELF_PROTECT_LOCAL(dcontext, READONLY);
set_at_syscall(dcontext, true);
KSTART_DC(dcontext, syscall_fcache); /* stopped in dispatch_exit_fcache_stats */
do {
#ifdef UNIX
/* We've already updated the signal mask as though the handler is
* completely finished, so we cannot go and receive a signal before
* executing the sigreturn syscall.
* Similarly, we've already done some clone work.
* Sigreturn and clone will come back to d_r_dispatch so there's no worry
* about unbounded delay.
*/
if ((is_sigreturn_syscall(dcontext) || is_thread_create_syscall(dcontext)) &&
dcontext->signals_pending > 0)
dcontext->signals_pending = -1;
#endif
enter_fcache(dcontext,
(fcache_enter_func_t)
/* DEFAULT_ISA_MODE as we want the ISA mode of our gencode */
convert_data_to_function(
PC_AS_JMP_TGT(DEFAULT_ISA_MODE, (app_pc)fcache_enter)),
PC_AS_JMP_TGT(DEFAULT_ISA_MODE, do_syscall));
#ifdef UNIX
if ((is_sigreturn_syscall(dcontext) || is_thread_create_syscall(dcontext)) &&
dcontext->signals_pending > 0)
repeat = true;
else
break;
#endif
} while (repeat);
#ifdef UNIX
if (dcontext->signals_pending) {
/* i#2019: see comments in dispatch_enter_fcache() */
KSTOP(syscall_fcache);
dcontext->whereami = DR_WHERE_DISPATCH;
set_at_syscall(dcontext, false);
/* We need to remember both the post-syscall resumption point and
* the fact that we need to execute a syscall, but we only have
* a single PC field to place it into inside our sigreturn frame
* and other places. Our solution is to point back at the
* syscall instruction itself. The walk-backward scheme here is a
* little hacky perhaps. We'll make a bb just for this syscall, which
* will not know the syscall number: but any re-execution in a loop
* will go back to the main bb.
*/
dcontext->next_tag = saved_next_tag -
syscall_instr_length(dcontext->last_fragment == NULL
? DEFAULT_ISA_MODE
: FRAG_ISA_MODE(dcontext->last_fragment->flags));
ASSERT(is_syscall_at_pc(dcontext, dcontext->next_tag));
LOG(THREAD, LOG_DISPATCH, 2,
"Signal arrived in DR: aborting syscall enter; interrupted " PFX "\n",
dcontext->next_tag);
STATS_INC(num_entrances_aborted);
trace_abort(dcontext);
receive_pending_signal(dcontext);
} else
#endif
/* will handle post processing in handle_post_system_call */
ASSERT_NOT_REACHED();
} else {
LOG(THREAD, LOG_DISPATCH, 2, "Skipping actual syscall invocation\n");
#ifdef CLIENT_INTERFACE
/* give the client its post-syscall event since we won't be calling
* post_system_call(), unless the client itself was the one who skipped.
*/
if (execute_syscall) {
instrument_post_syscall(dcontext, dcontext->sys_num);
}
#endif
#ifdef WINDOWS
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
/* decided to skip syscall -- pop retaddr, restore sysenter storage
* (if applicable) and set next target */
get_mcontext(dcontext)->xsp += XSP_SZ;
if (DYNAMO_OPTION(sygate_sysenter)) {
*((app_pc *)get_mcontext(dcontext)->xsp) = dcontext->sysenter_storage;
}
set_fcache_target(dcontext, dcontext->asynch_target);
} else if (get_syscall_method() == SYSCALL_METHOD_WOW64 &&
get_os_version() == WINDOWS_VERSION_7) {
/* win7 has an add 4,esp after the call* in the syscall wrapper,
* so we need to negate it since not making the call*
*/
get_mcontext(dcontext)->xsp -= XSP_SZ;
}
#else
adjust_syscall_continuation(dcontext);
set_fcache_target(dcontext, dcontext->asynch_target);
#endif
}
SELF_PROTECT_LOCAL(dcontext, READONLY);
}
static void
handle_post_system_call(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
bool skip_adjust = false;
ASSERT(!is_couldbelinking(dcontext));
ASSERT(get_at_syscall(dcontext));
set_at_syscall(dcontext, false);
/* some syscalls require modifying local memory */
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
#ifdef UNIX
/* restore mcontext values prior to invoking instrument_post_syscall() */
if (was_sigreturn_syscall(dcontext)) {
/* restore app xax/r0 */
LOG(THREAD, LOG_SYSCALLS, 3,
"post-sigreturn: setting xax/r0 to " PFX ", asynch_target=" PFX "\n",
dcontext->sys_param1, dcontext->asynch_target);
mc->IF_X86_ELSE(xax, r0) = dcontext->sys_param1;
# ifdef MACOS
/* We need to skip the use app_xdx, as we've changed the context.
* We can't just set app_xdx from handle_sigreturn() as the
* pre-sysenter code clobbers app_xdx, and we want to handle
* a failed SYS_sigreturn.
*/
skip_adjust = true;
# endif
}
#endif
#ifdef CLIENT_INTERFACE
/* i#1661: ensure we set the right pc for dr_get_mcontext() */
get_mcontext(dcontext)->pc = dcontext->asynch_target;
#endif
post_system_call(dcontext);
/* restore state for continuation in instruction after syscall */
/* FIXME: need to handle syscall failure -- those that clobbered asynch_target
* need to restore it to its previous value, which has to be stored somewhere!
*/
#ifdef WINDOWS
if (DYNAMO_OPTION(sygate_sysenter) &&
get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
/* restore sysenter_storage, note stack was popped twice for
* syscall so need to push the value */
get_mcontext(dcontext)->xsp -= XSP_SZ;
*((app_pc *)get_mcontext(dcontext)->xsp) = dcontext->sysenter_storage;
}
#else
if (!skip_adjust)
adjust_syscall_continuation(dcontext);
#endif
set_fcache_target(dcontext, dcontext->asynch_target);
#ifdef WINDOWS
/* We no longer need asynch_target so zero it out. Other pieces of DR
* -- callback & APC handling, detach -- test asynch_target to determine
* where the next app pc to execute is stored. If asynch_target != 0,
* it holds the value, else it's in the esi slot.
*/
dcontext->asynch_target = 0;
#endif
LOG(THREAD, LOG_SYSCALLS, 3, "finished handling system call\n");
SELF_PROTECT_LOCAL(dcontext, READONLY);
/* caller will go back to couldbelinking status */
}
#ifdef WINDOWS
/* in callback.c */
extern void
callback_start_return(priv_mcontext_t *mc);
/* used to execute an int 2b instruction in code cache */
static void
handle_callback_return(dcontext_t *dcontext)
{
dcontext_t *prev_dcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
fcache_enter_func_t fcache_enter = get_fcache_enter_indirect_routine(dcontext);
LOG(THREAD, LOG_ASYNCH, 3, "handling a callback return\n");
/* may have to abort trace -> local heap */
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
KSWITCH(num_exits_dir_cbret);
callback_start_return(mc);
/* get the current, but now swapped out, dcontext */
prev_dcontext = get_prev_swapped_dcontext(dcontext);
SELF_PROTECT_LOCAL(dcontext, READONLY);
/* obey flushing protocol, plus set whereami (both using real dcontext) */
dcontext->whereami = DR_WHERE_FCACHE;
set_at_syscall(dcontext, true); /* will be set to false on other end's post-syscall */
ASSERT(!is_couldbelinking(dcontext));
/* if we get an APC it should be after returning to prev cxt, so don't need
* to worry about asynch_target
*/
/* make sure set the next_tag of prev_dcontext, not dcontext! */
set_fcache_target(prev_dcontext, (app_pc)get_do_callback_return_entry(prev_dcontext));
DOLOG(4, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 3, "passing prev dcontext " PFX ", next_tag " PFX ":\n",
prev_dcontext, prev_dcontext->next_tag);
dump_mcontext(get_mcontext(prev_dcontext), THREAD, DUMP_NOT_XML);
});
/* make sure to pass prev_dcontext, this is a special fcache enter routine
* that indirects through the dcontext passed to it (so ignores the switch-to
* dcontext that callback_start_return swapped into the main dcontext)
*/
KSTART_DC(dcontext, syscall_fcache); /* continue the interrupted syscall handling */
(*fcache_enter)(prev_dcontext);
/* callback return does not return to here! */
DOLOG(1, LOG_ASYNCH, {
LOG(THREAD, LOG_SYSCALLS, 1, "ERROR: int 2b returned!\n");
dump_mcontext(get_mcontext(dcontext), THREAD, DUMP_NOT_XML);
});
ASSERT_NOT_REACHED();
}
#endif /* WINDOWS */
/* used to execute a system call instruction in code cache
* not expected to return
* caller must set up mcontext with proper system call number and arguments
*/
void
issue_last_system_call_from_app(dcontext_t *dcontext)
{
LOG(THREAD, LOG_SYSCALLS, 2, "issue_last_system_call_from_app(" PIFX ")\n",
MCXT_SYSNUM_REG(get_mcontext(dcontext)));
/* it's up to the caller to let go of the bb building lock if it was held
* on this path, since not all paths to here hold it
*/
if (is_couldbelinking(dcontext))
enter_nolinking(dcontext, NULL, true);
KSTART(syscall_fcache); /* stopped in dispatch_exit_fcache_stats */
enter_fcache(
dcontext,
(fcache_enter_func_t)
/* DEFAULT_ISA_MODE as we want the ISA mode of our gencode */
convert_data_to_function(PC_AS_JMP_TGT(
DEFAULT_ISA_MODE, (app_pc)get_fcache_enter_private_routine(dcontext))),
PC_AS_JMP_TGT(DEFAULT_ISA_MODE, get_global_do_syscall_entry()));
ASSERT_NOT_REACHED();
}
/* Stores the register parameters into the mcontext and calls d_r_dispatch.
* Checks whether currently on d_r_initstack and if so clears the initstack_mutex.
* Does not return.
*/
void
transfer_to_dispatch(dcontext_t *dcontext, priv_mcontext_t *mc, bool full_DR_state)
{
app_pc cur_xsp;
bool using_initstack = false;
copy_mcontext(mc, get_mcontext(dcontext));
GET_STACK_PTR(cur_xsp);
if (is_on_initstack(cur_xsp))
using_initstack = true;
#ifdef WINDOWS
/* i#249: swap PEB pointers unless already in DR state */
if (!full_DR_state)
swap_peb_pointer(dcontext, true /*to priv*/);
#endif
LOG(THREAD, LOG_ASYNCH, 2,
"transfer_to_dispatch: pc=0x%08x, xsp=" PFX ", on-initstack=%d\n",
dcontext->next_tag, mc->xsp, using_initstack);
/* next, want to switch to dstack, and if using d_r_initstack, free mutex.
* finally, call d_r_dispatch(dcontext).
* note that we switch to the base of dstack, deliberately squashing
* what may have been there before, for both new dcontext and reuse dcontext
* options.
*/
call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))d_r_dispatch,
using_initstack ? &initstack_mutex : NULL,
false /*do not return on error*/);
ASSERT_NOT_REACHED();
}
| 1 | 22,001 | Could this instead keep the `go_native` and add to it "or the last exit was the special reset exit"? | DynamoRIO-dynamorio | c |
@@ -42,9 +42,9 @@ ConfigPanelDialog::ConfigPanelDialog(LXQtPanel *panel, QWidget *parent):
addPage(mPluginsPage, tr("Widgets"), QLatin1String("preferences-plugin"));
connect(this, &ConfigPanelDialog::reset, mPluginsPage, &ConfigPluginsWidget::reset);
- connect(this, &ConfigPanelDialog::accepted, [panel] {
+ connect(this, &ConfigPanelDialog::accepted, this, [panel] {
panel->saveSettings();
- });
+ });
}
void ConfigPanelDialog::showConfigPanelPage() | 1 | /* BEGIN_COMMON_COPYRIGHT_HEADER
* (c)LGPL2+
*
* LXQt - a lightweight, Qt based, desktop toolset
* https://lxqt.org
*
* Copyright: 2010-2011 Razor team
* Authors:
* Marat "Morion" Talipov <[email protected]>
*
* This program or library is free software; you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General
* Public License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA
*
* END_COMMON_COPYRIGHT_HEADER */
#include "configpaneldialog.h"
ConfigPanelDialog::ConfigPanelDialog(LXQtPanel *panel, QWidget *parent):
LXQt::ConfigDialog(tr("Configure Panel"), panel->settings(), parent),
mPanelPage(nullptr),
mPluginsPage(nullptr)
{
setAttribute(Qt::WA_DeleteOnClose);
mPanelPage = new ConfigPanelWidget(panel, this);
addPage(mPanelPage, tr("Panel"), QLatin1String("configure"));
connect(this, &ConfigPanelDialog::reset, mPanelPage, &ConfigPanelWidget::reset);
mPluginsPage = new ConfigPluginsWidget(panel, this);
addPage(mPluginsPage, tr("Widgets"), QLatin1String("preferences-plugin"));
connect(this, &ConfigPanelDialog::reset, mPluginsPage, &ConfigPluginsWidget::reset);
connect(this, &ConfigPanelDialog::accepted, [panel] {
panel->saveSettings();
});
}
void ConfigPanelDialog::showConfigPanelPage()
{
showPage(mPanelPage);
}
void ConfigPanelDialog::showConfigPluginsPage()
{
showPage(mPluginsPage);
}
void ConfigPanelDialog::updateIconThemeSettings()
{
mPanelPage->updateIconThemeSettings();
}
| 1 | 6,787 | Oh, I missed this one: The other instances of `this` you've added as lambda contexts aren't really needed, although they're harmless. However, in the above connection, the missing context is `panel`, not `this`. Please correct it! Clazy may show warnings about lambda contexts (I don't use Clazy) but, if so, that's a problem in clazy: it isn't smart enough to know about them. | lxqt-lxqt-panel | cpp |
@@ -160,14 +160,9 @@ class Service(object):
except AttributeError:
pass
self.process.terminate()
- self.process.kill()
self.process.wait()
+ self.process.kill()
self.process = None
except OSError:
# kill may not be available under windows environment
pass
-
- def __del__(self):
- # subprocess.Popen doesn't send signal on __del__;
- # we have to try to stop the launched process.
- self.stop() | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import errno
import os
import platform
import subprocess
from subprocess import PIPE
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
try:
from subprocess import DEVNULL
_HAS_NATIVE_DEVNULL = True
except ImportError:
DEVNULL = -3
_HAS_NATIVE_DEVNULL = False
class Service(object):
def __init__(self, executable, port=0, log_file=DEVNULL, env=None, start_error_message=""):
self.path = executable
self.port = port
if self.port == 0:
self.port = utils.free_port()
if not _HAS_NATIVE_DEVNULL and log_file == DEVNULL:
log_file = open(os.devnull, 'wb')
self.start_error_message = start_error_message
self.log_file = log_file
self.env = env or os.environ
@property
def service_url(self):
"""
Gets the url of the Service
"""
return "http://%s" % utils.join_host_port('localhost', self.port)
def command_line_args(self):
raise NotImplemented("This method needs to be implemented in a sub class")
def start(self):
"""
Starts the Service.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
cmd = [self.path]
cmd.extend(self.command_line_args())
self.process = subprocess.Popen(cmd, env=self.env,
close_fds=platform.system() != 'Windows',
stdout=self.log_file, stderr=self.log_file)
except TypeError:
raise
except OSError as err:
if err.errno == errno.ENOENT:
raise WebDriverException(
"'%s' executable needs to be in PATH. %s" % (
os.path.basename(self.path), self.start_error_message)
)
elif err.errno == errno.EACCES:
raise WebDriverException(
"'%s' executable may have wrong permissions. %s" % (
os.path.basename(self.path), self.start_error_message)
)
else:
raise
except Exception as e:
raise WebDriverException(
"The executable %s needs to be available in the path. %s\n%s" %
(os.path.basename(self.path), self.start_error_message, str(e)))
count = 0
while True:
self.assert_process_still_running()
if self.is_connectable():
break
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to the Service %s" % self.path)
def assert_process_still_running(self):
return_code = self.process.poll()
if return_code is not None:
raise WebDriverException(
'Service %s unexpectedly exited. Status code was: %s'
% (self.path, return_code)
)
def is_connectable(self):
return utils.is_connectable(self.port)
def send_remote_shutdown_command(self):
try:
from urllib import request as url_request
URLError = url_request.URLError
except ImportError:
import urllib2 as url_request
import urllib2
URLError = urllib2.URLError
try:
url_request.urlopen("%s/shutdown" % self.service_url)
except URLError:
return
count = 0
while self.is_connectable():
if count == 30:
break
count += 1
time.sleep(1)
def stop(self):
"""
Stops the service.
"""
if self.log_file != PIPE and not (self.log_file == DEVNULL and _HAS_NATIVE_DEVNULL):
try:
self.log_file.close()
except Exception:
pass
if self.process is None:
return
try:
self.send_remote_shutdown_command()
except TypeError:
pass
try:
if self.process:
for stream in [self.process.stdin,
self.process.stdout,
self.process.stderr]:
try:
stream.close()
except AttributeError:
pass
self.process.terminate()
self.process.kill()
self.process.wait()
self.process = None
except OSError:
# kill may not be available under windows environment
pass
def __del__(self):
# subprocess.Popen doesn't send signal on __del__;
# we have to try to stop the launched process.
self.stop()
| 1 | 13,965 | can we add a try / except around this to make it more stable? I like the idea of any mistakenly un-quit drivers closing down after the process is quit (the java server does this... also i forget to do driver.quit often when i use the command line repl :) ) | SeleniumHQ-selenium | py |
@@ -68,9 +68,12 @@ func (vd *volAPI) cloudMigrateStatus(w http.ResponseWriter, r *http.Request) {
return
}
- if err := json.NewDecoder(r.Body).Decode(statusReq); err != nil {
- vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
- return
+ // Use empty request if nothing was sent
+ if r.ContentLength != 0 {
+ if err := json.NewDecoder(r.Body).Decode(statusReq); err != nil {
+ vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
+ return
+ }
}
statusResp, err := d.CloudMigrateStatus(statusReq) | 1 | package server
import (
"encoding/json"
"net/http"
"github.com/libopenstorage/openstorage/api"
ost_errors "github.com/libopenstorage/openstorage/api/errors"
)
func (vd *volAPI) cloudMigrateStart(w http.ResponseWriter, r *http.Request) {
startReq := &api.CloudMigrateStartRequest{}
method := "cloudMigrateStart"
if err := json.NewDecoder(r.Body).Decode(startReq); err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
return
}
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
response, err := d.CloudMigrateStart(startReq)
if err != nil {
if _, ok := err.(*ost_errors.ErrExists); ok {
w.WriteHeader(http.StatusConflict)
return
}
vd.sendError(method, startReq.TargetId, w, err.Error(), http.StatusInternalServerError)
return
}
json.NewEncoder(w).Encode(response)
}
func (vd *volAPI) cloudMigrateCancel(w http.ResponseWriter, r *http.Request) {
cancelReq := &api.CloudMigrateCancelRequest{}
method := "cloudMigrateCancel"
if err := json.NewDecoder(r.Body).Decode(cancelReq); err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
return
}
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
err = d.CloudMigrateCancel(cancelReq)
if err != nil {
vd.sendError(method, cancelReq.TaskId, w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (vd *volAPI) cloudMigrateStatus(w http.ResponseWriter, r *http.Request) {
statusReq := &api.CloudMigrateStatusRequest{}
method := "cloudMigrateState"
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
if err := json.NewDecoder(r.Body).Decode(statusReq); err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
return
}
statusResp, err := d.CloudMigrateStatus(statusReq)
if err != nil {
vd.sendError(method, "", w, err.Error(), http.StatusInternalServerError)
return
}
json.NewEncoder(w).Encode(statusResp)
}
| 1 | 7,714 | when would this happen? backward compatibility? | libopenstorage-openstorage | go |
@@ -0,0 +1,13 @@
+/**
+ * Returns the tagName,
+ * if it is a HTMLElement it gets lowercased
+ * @param {Element} node element
+ * @return {String} normalized tagName
+ */
+axe.utils.getTagName = function(node) {
+ if (node.namespaceURI === 'http://www.w3.org/1999/xhtml') {
+ return node.tagName.toLowerCase();
+ }
+
+ return node.tagName;
+}; | 1 | 1 | 11,790 | I'm not sure what the value of this is. So far we've solved this by always doing `tagName.toUpperCase()` for everything. I think we should stick with this. | dequelabs-axe-core | js |
|
@@ -25,6 +25,16 @@ import (
"github.com/spf13/cobra"
)
+var (
+ snapshotListCommandHelpText = `
+Usage: mayactl snapshot list [options]
+
+$ mayactl snapshot list --volname <vol>
+
+This command displays status of available snapshot.
+`
+)
+
// NewCmdSnapshotCreate creates a snapshot of OpenEBS Volume
func NewCmdSnapshotList() *cobra.Command {
options := CmdSnaphotCreateOptions{} | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"errors"
"fmt"
"github.com/openebs/maya/pkg/client/mapiserver"
"github.com/openebs/maya/pkg/util"
"github.com/spf13/cobra"
)
// NewCmdSnapshotCreate creates a snapshot of OpenEBS Volume
func NewCmdSnapshotList() *cobra.Command {
options := CmdSnaphotCreateOptions{}
cmd := &cobra.Command{
Use: "list",
Short: "Lists all the snapshots of a Volume",
//Long: SnapshotCreateCommandHelpText,
Run: func(cmd *cobra.Command, args []string) {
util.CheckErr(options.ValidateList(cmd), util.Fatal)
util.CheckErr(options.RunSnapshotList(cmd), util.Fatal)
},
}
cmd.Flags().StringVarP(&options.volName, "volname", "n", options.volName,
"unique volume name.")
cmd.MarkPersistentFlagRequired("volname")
return cmd
}
// ValidateList validates the flag values
func (c *CmdSnaphotCreateOptions) ValidateList(cmd *cobra.Command) error {
if c.volName == "" {
return errors.New("--volname is missing. Please specify an unique name")
}
return nil
}
// RunSnapshotList does tasks related to mayaserver.
func (c *CmdSnaphotCreateOptions) RunSnapshotList(cmd *cobra.Command) error {
resp := mapiserver.ListSnapshot(c.volName)
if resp != nil {
return fmt.Errorf("Error list available snapshot: %v", resp)
}
return nil
}
| 1 | 8,197 | This command displays available snapshots on a volume. | openebs-maya | go |
@@ -38,7 +38,7 @@ import (
"golang.org/x/tools/go/types/typeutil"
cpb "kythe.io/kythe/proto/common_go_proto"
- spb "kythe.io/kythe/proto/storage_go_proto"
+ stpb "kythe.io/kythe/proto/storage_go_proto"
)
// EmitOptions control the behaviour of the Emit function. A nil options | 1 | /*
* Copyright 2016 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package indexer
import (
"context"
"fmt"
"go/ast"
"go/token"
"go/types"
"log"
"net/url"
"path"
"strconv"
"strings"
"kythe.io/kythe/go/extractors/govname"
"kythe.io/kythe/go/util/metadata"
"kythe.io/kythe/go/util/schema/edges"
"kythe.io/kythe/go/util/schema/facts"
"kythe.io/kythe/go/util/schema/nodes"
"github.com/golang/protobuf/proto"
"golang.org/x/tools/go/types/typeutil"
cpb "kythe.io/kythe/proto/common_go_proto"
spb "kythe.io/kythe/proto/storage_go_proto"
)
// EmitOptions control the behaviour of the Emit function. A nil options
// pointer provides default values.
type EmitOptions struct {
// If true, emit nodes for standard library packages when they are first
// encountered. This is helpful if you want to index a package in isolation
// where data for the standard library are not available.
EmitStandardLibs bool
// If true, emit code facts containing MarkedSource messages.
EmitMarkedSource bool
// If true, emit linkages specified by metadata rules.
EmitLinkages bool
// If true, emit childof edges for an anchor's semantic scope.
EmitAnchorScopes bool
// If set, use this as the base URL for links to godoc. The import path is
// appended to the path of this URL to obtain the target URL to link to.
DocBase *url.URL
}
func (e *EmitOptions) emitMarkedSource() bool {
if e == nil {
return false
}
return e.EmitMarkedSource
}
func (e *EmitOptions) emitAnchorScopes() bool {
if e == nil {
return false
}
return e.EmitAnchorScopes
}
// shouldEmit reports whether the indexer should emit a node for the given
// vname. Presently this is true if vname denotes a standard library and the
// corresponding option is enabled.
func (e *EmitOptions) shouldEmit(vname *spb.VName) bool {
return e != nil && e.EmitStandardLibs && govname.IsStandardLibrary(vname)
}
// docURL returns a documentation URL for the specified package, if one is
// specified by the options, or "" if not.
func (e *EmitOptions) docURL(pi *PackageInfo) string {
if e != nil && e.DocBase != nil {
u := *e.DocBase
u.Path = path.Join(u.Path, pi.ImportPath)
return u.String()
}
return ""
}
// An impl records that a type A implements an interface B.
type impl struct{ A, B types.Object }
// Emit generates Kythe facts and edges to represent pi, and writes them to
// sink. In case of errors, processing continues as far as possible before the
// first error encountered is reported.
func (pi *PackageInfo) Emit(ctx context.Context, sink Sink, opts *EmitOptions) error {
e := &emitter{
ctx: ctx,
pi: pi,
sink: sink,
opts: opts,
impl: make(map[impl]struct{}),
anchored: make(map[ast.Node]struct{}),
}
// Emit a node to represent the package as a whole.
e.writeFact(pi.VName, facts.NodeKind, nodes.Package)
if url := e.opts.docURL(pi); url != "" {
e.writeFact(pi.VName, facts.DocURI, url)
}
e.emitPackageMarkedSource(pi)
// Emit facts for all the source files claimed by this package.
for file, text := range pi.SourceText {
vname := pi.FileVName(file)
e.writeFact(vname, facts.NodeKind, nodes.File)
e.writeFact(vname, facts.Text, text)
// All Go source files are encoded as UTF-8, which is the default.
e.writeEdge(vname, pi.VName, edges.ChildOf)
}
// Traverse the AST of each file in the package for xref entries.
for _, file := range pi.Files {
e.cmap = ast.NewCommentMap(pi.FileSet, file, file.Comments)
e.writeDoc(file.Doc, pi.VName) // capture package comments
e.writeRef(file.Name, pi.VName, edges.DefinesBinding) // define a binding for the package
ast.Walk(newASTVisitor(func(node ast.Node, stack stackFunc) bool {
switch n := node.(type) {
case *ast.Ident:
e.visitIdent(n, stack)
case *ast.FuncDecl:
e.visitFuncDecl(n, stack)
case *ast.FuncLit:
e.visitFuncLit(n, stack)
case *ast.ValueSpec:
e.visitValueSpec(n, stack)
case *ast.TypeSpec:
e.visitTypeSpec(n, stack)
case *ast.ImportSpec:
e.visitImportSpec(n, stack)
case *ast.AssignStmt:
e.visitAssignStmt(n, stack)
case *ast.RangeStmt:
e.visitRangeStmt(n, stack)
case *ast.CompositeLit:
e.visitCompositeLit(n, stack)
}
return true
}), file)
}
// Emit edges from each named type to the interface types it satisfies, for
// those interface types that are known to this compiltion.
e.emitSatisfactions()
// TODO(fromberger): Add diagnostics for type-checker errors.
for _, err := range pi.Errors {
log.Printf("WARNING: Type resolution error: %v", err)
}
return e.firstErr
}
type emitter struct {
ctx context.Context
pi *PackageInfo
sink Sink
opts *EmitOptions
impl map[impl]struct{} // see checkImplements
rmap map[*ast.File]map[int]metadata.Rules // see applyRules
anchored map[ast.Node]struct{} // see writeAnchor
firstErr error
cmap ast.CommentMap // current file's CommentMap
}
// visitIdent handles referring identifiers. Declaring identifiers are handled
// as part of their parent syntax.
func (e *emitter) visitIdent(id *ast.Ident, stack stackFunc) {
obj := e.pi.Info.Uses[id]
if obj == nil {
// Defining identifiers are handled by their parent nodes.
return
}
target := e.pi.ObjectVName(obj)
if target == nil {
// This should not happen in well-formed packages, but can if the
// extractor gets confused. Avoid emitting confusing references in such
// cases. Note that in this case we need to emit a fresh anchor, since
// we aren't otherwise emitting a reference.
e.writeNodeDiagnostic(id, diagnostic{
Message: fmt.Sprintf("Unable to identify the package for %q", id.Name),
})
return
}
ref := e.writeRef(id, target, edges.Ref)
if e.opts.emitAnchorScopes() {
e.writeEdge(ref, e.callContext(stack).vname, edges.ChildOf)
}
if call, ok := isCall(id, obj, stack); ok {
callAnchor := e.writeRef(call, target, edges.RefCall)
// Paint an edge to the function blamed for the call, or if there is
// none then to the package initializer.
e.writeEdge(callAnchor, e.callContext(stack).vname, edges.ChildOf)
}
}
// visitFuncDecl handles function and method declarations and their parameters.
func (e *emitter) visitFuncDecl(decl *ast.FuncDecl, stack stackFunc) {
info := &funcInfo{vname: new(spb.VName)}
e.pi.function[decl] = info
// Get the type of this function, even if its name is blank.
obj, _ := e.pi.Info.Defs[decl.Name].(*types.Func)
if obj == nil {
return // a redefinition, for example
}
// Special case: There may be multiple package-level init functions, so
// override the normal signature generation to include a discriminator.
if decl.Recv == nil && obj.Name() == "init" {
e.pi.numInits++
e.pi.sigs[obj] = fmt.Sprintf("%s#%d", e.pi.Signature(obj), e.pi.numInits)
}
info.vname = e.mustWriteBinding(decl.Name, nodes.Function, nil)
e.writeDef(decl, info.vname)
e.writeDoc(decl.Doc, info.vname)
// For concrete methods: Emit the receiver if named, and connect the method
// to its declaring type.
sig := obj.Type().(*types.Signature)
if sig.Recv() != nil {
// The receiver is treated as parameter 0.
if names := decl.Recv.List[0].Names; names != nil {
if recv := e.writeBinding(names[0], nodes.Variable, info.vname); recv != nil {
e.writeEdge(info.vname, recv, edges.ParamIndex(0))
}
}
// The method should be a child of its (named) enclosing type.
if named, _ := deref(sig.Recv().Type()).(*types.Named); named != nil {
base := e.pi.ObjectVName(named.Obj())
e.writeEdge(info.vname, base, edges.ChildOf)
}
}
e.emitParameters(decl.Type, sig, info)
}
// emitTApp emits a tapp node and returns its VName. The new tapp is emitted
// with given constructor and parameters. The constructor's kind is also
// emitted if this is the first time seeing it.
func (e *emitter) emitTApp(ms *cpb.MarkedSource, ctorKind string, ctor *spb.VName, params ...*spb.VName) *spb.VName {
if e.pi.typeEmitted.Add(ctor.Signature) {
e.writeFact(ctor, facts.NodeKind, ctorKind)
if ctorKind == nodes.TBuiltin {
e.emitBuiltinMarkedSource(ctor)
}
}
components := []interface{}{ctor}
for _, p := range params {
components = append(components, p)
}
v := &spb.VName{Language: govname.Language, Signature: hashSignature(components)}
if e.pi.typeEmitted.Add(v.Signature) {
e.writeFact(v, facts.NodeKind, nodes.TApp)
e.writeEdge(v, ctor, edges.ParamIndex(0))
for i, p := range params {
e.writeEdge(v, p, edges.ParamIndex(i+1))
}
if ms != nil && e.opts.emitMarkedSource() {
e.emitCode(v, ms)
}
}
return v
}
// emitType emits the type as a node and returns its VName. VNames are cached
// so the type nodes are only emitted the first time they are seen.
func (e *emitter) emitType(typ types.Type) *spb.VName {
v, ok := e.pi.typeVName[typ]
if ok {
return v
}
switch typ := typ.(type) {
case *types.Named:
v = e.pi.ObjectVName(typ.Obj())
case *types.Basic:
v = govname.BasicType(typ)
if e.pi.typeEmitted.Add(v.Signature) {
e.writeFact(v, facts.NodeKind, nodes.TBuiltin)
e.emitBuiltinMarkedSource(v)
}
case *types.Array:
v = e.emitTApp(arrayTAppMS(typ.Len()), nodes.TBuiltin, govname.ArrayConstructorType(typ.Len()), e.emitType(typ.Elem()))
case *types.Slice:
v = e.emitTApp(sliceTAppMS, nodes.TBuiltin, govname.SliceConstructorType(), e.emitType(typ.Elem()))
case *types.Pointer:
v = e.emitTApp(pointerTAppMS, nodes.TBuiltin, govname.PointerConstructorType(), e.emitType(typ.Elem()))
case *types.Chan:
v = e.emitTApp(chanTAppMS(typ.Dir()), nodes.TBuiltin, govname.ChanConstructorType(typ.Dir()), e.emitType(typ.Elem()))
case *types.Map:
v = e.emitTApp(mapTAppMS, nodes.TBuiltin, govname.MapConstructorType(), e.emitType(typ.Key()), e.emitType(typ.Elem()))
case *types.Tuple: // function return types
v = e.emitTApp(tupleTAppMS, nodes.TBuiltin, govname.TupleConstructorType(), e.visitTuple(typ)...)
case *types.Signature: // function types
ms := &cpb.MarkedSource{
Kind: cpb.MarkedSource_TYPE,
Child: []*cpb.MarkedSource{{
Kind: cpb.MarkedSource_PARAMETER_LOOKUP_BY_PARAM,
LookupIndex: 3,
PreText: "func(",
PostChildText: ", ",
PostText: ")",
}},
}
params := e.visitTuple(typ.Params())
if typ.Variadic() && len(params) > 0 {
// Convert last parameter type from slice type to variadic type.
last := len(params) - 1
if slice, ok := typ.Params().At(last).Type().(*types.Slice); ok {
params[last] = e.emitTApp(variadicTAppMS, nodes.TBuiltin, govname.VariadicConstructorType(), e.emitType(slice.Elem()))
}
}
var ret *spb.VName
if typ.Results().Len() == 1 {
ret = e.emitType(typ.Results().At(0).Type())
} else {
ret = e.emitType(typ.Results())
}
if typ.Results().Len() != 0 {
ms.Child = append(ms.Child, &cpb.MarkedSource{
Kind: cpb.MarkedSource_BOX,
PreText: " ",
Child: []*cpb.MarkedSource{{
Kind: cpb.MarkedSource_LOOKUP_BY_PARAM,
LookupIndex: 1,
}},
})
}
var recv *spb.VName
if r := typ.Recv(); r != nil {
recv = e.emitType(r.Type())
ms.Child = append([]*cpb.MarkedSource{{
Kind: cpb.MarkedSource_BOX,
PreText: "(",
PostText: ") ",
Child: []*cpb.MarkedSource{{
Kind: cpb.MarkedSource_LOOKUP_BY_PARAM,
LookupIndex: 2,
}},
}}, ms.Child...)
} else {
recv = e.emitType(types.NewTuple())
}
v = e.emitTApp(ms, nodes.TBuiltin, govname.FunctionConstructorType(),
append([]*spb.VName{ret, recv}, params...)...)
case *types.Interface:
v = &spb.VName{Language: govname.Language, Signature: hashSignature(typ)}
if e.pi.typeEmitted.Add(v.Signature) {
e.writeFact(v, facts.NodeKind, nodes.Interface)
if e.opts.emitMarkedSource() {
e.emitCode(v, &cpb.MarkedSource{
Kind: cpb.MarkedSource_TYPE,
PreText: typ.String(),
})
}
}
case *types.Struct:
v = &spb.VName{Language: govname.Language, Signature: hashSignature(typ)}
if e.pi.typeEmitted.Add(v.Signature) {
e.writeFact(v, facts.NodeKind, nodes.Record)
if e.opts.emitMarkedSource() {
e.emitCode(v, &cpb.MarkedSource{
Kind: cpb.MarkedSource_TYPE,
PreText: typ.String(),
})
}
}
default:
log.Printf("WARNING: unknown type %T: %+v", typ, typ)
}
e.pi.typeVName[typ] = v
return v
}
func (e *emitter) emitTypeOf(expr ast.Expr) *spb.VName { return e.emitType(e.pi.Info.TypeOf(expr)) }
func (e *emitter) visitTuple(t *types.Tuple) []*spb.VName {
size := t.Len()
ts := make([]*spb.VName, size)
for i := 0; i < size; i++ {
ts[i] = e.emitType(t.At(i).Type())
}
return ts
}
// visitFuncLit handles function literals and their parameters. The signature
// for a function literal is named relative to the signature of its parent
// function, or the file scope if the literal is at the top level.
func (e *emitter) visitFuncLit(flit *ast.FuncLit, stack stackFunc) {
fi := e.callContext(stack)
if fi == nil {
log.Panic("Function literal without a context: ", flit)
}
fi.numAnons++
info := &funcInfo{vname: proto.Clone(fi.vname).(*spb.VName)}
info.vname.Language = govname.Language
info.vname.Signature += "$" + strconv.Itoa(fi.numAnons)
e.pi.function[flit] = info
e.writeDef(flit, info.vname)
e.writeFact(info.vname, facts.NodeKind, nodes.Function)
if sig, ok := e.pi.Info.Types[flit].Type.(*types.Signature); ok {
e.emitParameters(flit.Type, sig, info)
}
}
// visitValueSpec handles variable and constant bindings.
func (e *emitter) visitValueSpec(spec *ast.ValueSpec, stack stackFunc) {
kind := nodes.Variable
if stack(1).(*ast.GenDecl).Tok == token.CONST {
kind = nodes.Constant
}
doc := specComment(spec, stack)
for _, id := range spec.Names {
target := e.writeBinding(id, kind, e.nameContext(stack))
if target == nil {
continue // type error (reported elsewhere)
}
e.writeDoc(doc, target)
}
// Handle members of anonymous types declared in situ.
if spec.Type != nil {
e.emitAnonMembers(spec.Type)
}
for _, v := range spec.Values {
if lit, ok := v.(*ast.CompositeLit); ok {
e.emitAnonMembers(lit.Type)
}
}
}
// visitTypeSpec handles type declarations, including the bindings for fields
// of struct types and methods of interfaces.
func (e *emitter) visitTypeSpec(spec *ast.TypeSpec, stack stackFunc) {
obj := e.pi.Info.Defs[spec.Name]
if obj == nil {
return // type error
}
target := e.mustWriteBinding(spec.Name, "", e.nameContext(stack))
e.writeDef(spec, target)
e.writeDoc(specComment(spec, stack), target)
// Emit type-specific structure.
switch t := obj.Type().Underlying().(type) {
case *types.Struct:
e.writeFact(target, facts.NodeKind, nodes.Record)
e.writeFact(target, facts.Subkind, nodes.Struct)
// Add parent edges for all fields, including promoted ones.
for i, n := 0, t.NumFields(); i < n; i++ {
e.writeEdge(e.pi.ObjectVName(t.Field(i)), target, edges.ChildOf)
}
// Add bindings for the explicitly-named fields in this declaration.
// Parent edges were already added, so skip them here.
if st, ok := spec.Type.(*ast.StructType); ok {
mapFields(st.Fields, func(i int, id *ast.Ident) {
target := e.writeVarBinding(id, nodes.Field, nil)
f := st.Fields.List[i]
e.writeDoc(firstNonEmptyComment(f.Doc, f.Comment), target)
e.emitAnonMembers(f.Type)
})
// Handle anonymous fields. Such fields behave as if they were
// named by the base identifier of their type.
for _, field := range st.Fields.List {
if len(field.Names) != 0 {
continue // already handled above
}
id, ok := e.pi.findFieldName(field.Type)
obj := e.pi.Info.Defs[id]
if ok && obj != nil {
// Don't write a fresh anchor here; we already wrote one as
// part of the ref to the type, and we don't want duplicate
// outputs.
anchor := e.pi.AnchorVName(e.pi.Span(id))
target := e.pi.ObjectVName(obj)
e.writeEdge(anchor, target, edges.DefinesBinding)
e.writeFact(target, facts.NodeKind, nodes.Variable)
e.writeFact(target, facts.Subkind, nodes.Field)
e.writeDoc(firstNonEmptyComment(field.Doc, field.Comment), target)
}
}
}
case *types.Interface:
e.writeFact(target, facts.NodeKind, nodes.Interface)
// Add parent edges for all methods, including inherited ones.
for i, n := 0, t.NumMethods(); i < n; i++ {
e.writeEdge(e.pi.ObjectVName(t.Method(i)), target, edges.ChildOf)
}
// Mark the interface as an extension of any embedded interfaces.
for i, n := 0, t.NumEmbeddeds(); i < n; i++ {
if eobj := t.Embedded(i).Obj(); e.checkImplements(obj, eobj) {
e.writeEdge(target, e.pi.ObjectVName(eobj), edges.Extends)
}
}
// Add bindings for the explicitly-named methods in this declaration.
// Parent edges were already added, so skip them here.
if it, ok := spec.Type.(*ast.InterfaceType); ok {
mapFields(it.Methods, func(_ int, id *ast.Ident) {
e.writeBinding(id, nodes.Function, nil)
})
}
default:
// We model a newtype form whose underlying type is not already a
// struct (e.g., "type Foo int") as if it were a record with a single
// unexported field of the underlying type. That is not really what Go
// does, but it is close enough for the graph model to work. Since
// there is no actual field declaration, however, we don't emit that.
e.writeFact(target, facts.NodeKind, nodes.Record)
e.writeFact(target, facts.Subkind, nodes.Type)
}
}
// visitImportSpec handles references to imported packages.
func (e *emitter) visitImportSpec(spec *ast.ImportSpec, stack stackFunc) {
ipath, _ := strconv.Unquote(spec.Path.Value)
if vPath, ok := e.pi.Vendored[ipath]; ok {
ipath = vPath
}
pkg := e.pi.Dependencies[ipath]
target := e.pi.PackageVName[pkg]
if target == nil {
log.Printf("Unable to resolve import path %q", ipath)
return
}
e.writeRef(spec.Path, target, edges.RefImports)
if e.opts.shouldEmit(target) && !e.pi.standardLib.Contains(ipath) {
e.writeFact(target, facts.NodeKind, nodes.Package)
e.pi.standardLib.Add(ipath)
}
}
// visitAssignStmt handles bindings introduced by short-declaration syntax in
// assignment statments, e.g., "x, y := 1, 2".
func (e *emitter) visitAssignStmt(stmt *ast.AssignStmt, stack stackFunc) {
if stmt.Tok != token.DEFINE {
return // no new bindings in this statement
}
// Not all the names in a short declaration assignment may be defined here.
// We only add bindings for newly-defined ones, of which there must be at
// least one in a well-typed program.
up := e.nameContext(stack)
for _, expr := range stmt.Lhs {
if id, _ := expr.(*ast.Ident); id != nil {
// Add a binding only if this is the definition site for the name.
if obj := e.pi.Info.Defs[id]; obj != nil && obj.Pos() == id.Pos() {
e.mustWriteBinding(id, nodes.Variable, up)
}
}
}
// TODO(fromberger): Add information about initializers where available.
}
// visitRangeStmt handles the bindings introduced by a for ... range statement.
func (e *emitter) visitRangeStmt(stmt *ast.RangeStmt, stack stackFunc) {
if stmt.Tok != token.DEFINE {
return // no new bindings in this statement
}
// In a well-typed program, the key and value will always be identifiers.
up := e.nameContext(stack)
if key, _ := stmt.Key.(*ast.Ident); key != nil {
e.writeBinding(key, nodes.Variable, up)
}
if val, _ := stmt.Value.(*ast.Ident); val != nil {
e.writeBinding(val, nodes.Variable, up)
}
}
// visitCompositeLit handles references introduced by positional initializers
// in composite literals that construct (pointer to) struct values. Named
// initializers are handled separately.
func (e *emitter) visitCompositeLit(expr *ast.CompositeLit, stack stackFunc) {
if len(expr.Elts) == 0 {
return // no fields to initialize
}
tv, ok := e.pi.Info.Types[expr]
if !ok {
log.Printf("WARNING: Unable to determine composite literal type (%s)", e.pi.FileSet.Position(expr.Pos()))
return
}
sv, ok := deref(tv.Type.Underlying()).(*types.Struct)
if !ok {
return // non-struct type, e.g. a slice; nothing to do here
}
if n := sv.NumFields(); n < len(expr.Elts) {
// Embedded struct fields from an imported package may not appear in
// the list if the import did not succeed. To remain robust against
// such cases, don't try to read into the fields of a struct type if
// the counts don't line up. The information we emit will still be
// correct, we'll just miss some initializers.
log.Printf("ERROR: Struct has %d fields but %d initializers (skipping)", n, len(expr.Elts))
return
}
for i, elt := range expr.Elts {
// The keys for key-value initializers are handled upstream of us, so
// we need only handle the values. But note that key-value initializers
// may not be in order, so we have to take care to get the right field.
// Positional fields must be in order, in well-formed code.
switch t := elt.(type) {
case *ast.KeyValueExpr:
f, ok := fieldIndex(t.Key, sv)
if !ok {
log.Printf("ERROR: Found no field index for %v (skipping)", t.Key)
continue
}
e.emitPosRef(t.Value, sv.Field(f), edges.RefInit)
default:
e.emitPosRef(t, sv.Field(i), edges.RefInit)
}
}
}
// emitPosRef emits an anchor spanning loc, pointing to obj.
func (e *emitter) emitPosRef(loc ast.Node, obj types.Object, kind string) {
target := e.pi.ObjectVName(obj)
file, start, end := e.pi.Span(loc)
anchor := e.pi.AnchorVName(file, start, end)
e.writeAnchor(loc, anchor, start, end)
e.writeEdge(anchor, target, kind)
}
// emitParameters emits parameter edges for the parameters of a function type,
// given the type signature and info of the enclosing declaration or function
// literal.
func (e *emitter) emitParameters(ftype *ast.FuncType, sig *types.Signature, info *funcInfo) {
paramIndex := 0
// If there is a receiver, it is treated as param.0.
if sig.Recv() != nil {
paramIndex++
}
// Emit bindings and parameter edges for the parameters.
mapFields(ftype.Params, func(i int, id *ast.Ident) {
if sig.Params().At(i) != nil {
if param := e.writeBinding(id, nodes.Variable, info.vname); param != nil {
e.writeEdge(info.vname, param, edges.ParamIndex(paramIndex))
field := ftype.Params.List[i]
e.emitAnonMembers(field.Type)
// Field object does not associate any comments with the parameter; use CommentMap to find them
e.writeDoc(firstNonEmptyComment(e.cmap.Filter(field).Comments()...), param)
}
}
paramIndex++
})
// Emit bindings for any named result variables.
// Results are not considered parameters.
mapFields(ftype.Results, func(i int, id *ast.Ident) {
e.writeBinding(id, nodes.Variable, info.vname)
})
}
// emitAnonMembers checks whether expr denotes an anonymous struct or interface
// type, and if so emits bindings for its member fields/methods. The resulting
// members do not parent to the type, since it has no referential identity; but
// we do capture documentation in the unlikely event someone wrote any.
func (e *emitter) emitAnonMembers(expr ast.Expr) {
if st, ok := expr.(*ast.StructType); ok {
mapFields(st.Fields, func(i int, id *ast.Ident) {
target := e.writeVarBinding(id, nodes.Field, nil) // no parent
e.writeDoc(firstNonEmptyComment(st.Fields.List[i].Doc, st.Fields.List[i].Comment), target)
})
} else if it, ok := expr.(*ast.InterfaceType); ok {
mapFields(it.Methods, func(i int, id *ast.Ident) {
target := e.writeBinding(id, nodes.Function, nil) // no parent
e.writeDoc(firstNonEmptyComment(it.Methods.List[i].Doc, it.Methods.List[i].Comment), target)
})
}
}
// An override represents the relationship that x overrides y.
type override struct {
x, y types.Object
}
// overrides represents a set of override relationships we've already generated.
type overrides map[override]bool
// seen reports whether an x overrides y was already cached, and if not adds it
// to the set.
func (o overrides) seen(x, y types.Object) bool {
ov := override{x: x, y: y}
ok := o[ov]
if !ok {
o[ov] = true
}
return ok
}
// emitSatisfactions visits each named type known through the compilation being
// indexed, and emits edges connecting it to any known interfaces its method
// set satisfies.
func (e *emitter) emitSatisfactions() {
// Find the names of all defined types mentioned in this compilation.
var allNames []*types.TypeName
// For the current source package, use all names, even local ones.
for _, obj := range e.pi.Info.Defs {
if obj, ok := obj.(*types.TypeName); ok {
if _, ok := obj.Type().(*types.Named); ok {
allNames = append(allNames, obj)
}
}
}
// For dependencies, we only have access to package-level types, not those
// defined by inner scopes.
for _, pkg := range e.pi.Dependencies {
scope := pkg.Scope()
for _, name := range scope.Names() {
if obj, ok := scope.Lookup(name).(*types.TypeName); ok {
// Note that the names of some "named" types that are brought
// in from dependencies may not be known at this point -- the
// compiled package headers omit the names if they are not
// needed. Skip such cases, even though they would qualify if
// we had the source package.
if _, ok := obj.Type().(*types.Named); ok && obj.Name() != "" {
allNames = append(allNames, obj)
}
}
}
}
// Cache the method set of each named type in this package.
var msets typeutil.MethodSetCache
// Cache the overrides we've noticed to avoid duplicate entries.
cache := make(overrides)
for _, xobj := range allNames {
if xobj.Pkg() != e.pi.Package {
continue // not from this package
}
// Check whether x is a named type with methods; if not, skip it.
x := xobj.Type()
if len(typeutil.IntuitiveMethodSet(x, &msets)) == 0 {
continue // no methods to consider
}
// N.B. This implementation is quadratic in the number of visible
// interfaces, but that's probably OK since are only considering a
// single compilation.
// Check the method sets of both x and pointer-to-x for overrides.
xmset := msets.MethodSet(x)
pxmset := msets.MethodSet(types.NewPointer(x))
for _, yobj := range allNames {
if xobj == yobj {
continue
}
y := yobj.Type()
ymset := msets.MethodSet(y)
ifx, ify := isInterface(x), isInterface(y)
switch {
case ifx && ify && ymset.Len() > 0:
// x and y are both interfaces. Note that extension is handled
// elsewhere as part of the type spec for the interface.
if types.AssignableTo(x, y) {
e.writeSatisfies(xobj, yobj)
}
if types.AssignableTo(y, x) {
e.writeSatisfies(yobj, xobj)
}
case ifx:
// y is a concrete type
pymset := msets.MethodSet(types.NewPointer(y))
if types.AssignableTo(y, x) {
e.writeSatisfies(yobj, xobj)
e.emitOverrides(ymset, pymset, xmset, cache)
} else if py := types.NewPointer(y); types.AssignableTo(py, x) {
e.writeSatisfies(yobj, xobj)
e.emitOverrides(ymset, pymset, xmset, cache)
}
case ify && ymset.Len() > 0:
// x is a concrete type
if types.AssignableTo(x, y) {
e.writeSatisfies(xobj, yobj)
e.emitOverrides(xmset, pxmset, ymset, cache)
} else if px := types.NewPointer(x); types.AssignableTo(px, y) {
e.writeSatisfies(xobj, yobj)
e.emitOverrides(xmset, pxmset, ymset, cache)
}
default:
// Both x and y are concrete.
}
}
}
}
// Add xm-(overrides)-ym for each concrete method xm with a corresponding
// abstract method ym.
func (e *emitter) emitOverrides(xmset, pxmset, ymset *types.MethodSet, cache overrides) {
for i, n := 0, ymset.Len(); i < n; i++ {
ym := ymset.At(i)
yobj := ym.Obj()
xm := xmset.Lookup(yobj.Pkg(), yobj.Name())
if xm == nil {
if pxmset != nil {
xm = pxmset.Lookup(yobj.Pkg(), yobj.Name())
}
if xm == nil {
continue // this method is not part of the interface we're probing
}
}
xobj := xm.Obj()
if cache.seen(xobj, yobj) {
continue
}
xvname := e.pi.ObjectVName(xobj)
yvname := e.pi.ObjectVName(yobj)
e.writeEdge(xvname, yvname, edges.Overrides)
xt := e.emitType(xobj.Type())
yt := e.emitType(yobj.Type())
if e.pi.typeEmitted.Add(xt.Signature + "+" + yt.Signature) {
e.writeEdge(xt, yt, edges.Satisfies)
}
}
}
func isInterface(typ types.Type) bool { _, ok := typ.Underlying().(*types.Interface); return ok }
func (e *emitter) check(err error) {
if err != nil && e.firstErr == nil {
e.firstErr = err
log.Printf("ERROR indexing %q: %v", e.pi.ImportPath, err)
}
}
func (e *emitter) checkImplements(src, tgt types.Object) bool {
i := impl{A: src, B: tgt}
if _, ok := e.impl[i]; ok {
return false
}
e.impl[i] = struct{}{}
return true
}
func (e *emitter) writeSatisfies(src, tgt types.Object) {
if e.checkImplements(src, tgt) {
e.writeEdge(e.pi.ObjectVName(src), e.pi.ObjectVName(tgt), edges.Satisfies)
}
}
func (e *emitter) writeFact(src *spb.VName, name, value string) {
e.check(e.sink.writeFact(e.ctx, src, name, value))
}
func (e *emitter) writeEdge(src, tgt *spb.VName, kind string) {
e.check(e.sink.writeEdge(e.ctx, src, tgt, kind))
}
func (e *emitter) writeAnchor(node ast.Node, src *spb.VName, start, end int) {
if _, ok := e.anchored[node]; ok {
return // this node already has an anchor
}
e.anchored[node] = struct{}{}
e.check(e.sink.writeAnchor(e.ctx, src, start, end))
}
func (e *emitter) writeDiagnostic(src *spb.VName, d diagnostic) {
e.check(e.sink.writeDiagnostic(e.ctx, src, d))
}
func (e *emitter) writeNodeDiagnostic(src ast.Node, d diagnostic) {
file, start, end := e.pi.Span(src)
anchor := e.pi.AnchorVName(file, start, end)
e.writeAnchor(src, anchor, start, end)
e.writeDiagnostic(anchor, d)
}
// writeRef emits an anchor spanning origin and referring to target with an
// edge of the given kind. The vname of the anchor is returned.
func (e *emitter) writeRef(origin ast.Node, target *spb.VName, kind string) *spb.VName {
file, start, end := e.pi.Span(origin)
anchor := e.pi.AnchorVName(file, start, end)
e.writeAnchor(origin, anchor, start, end)
e.writeEdge(anchor, target, kind)
// Check whether we are intended to emit metadata linkage edges, and if so,
// whether there are any to process.
e.applyRules(file, start, end, kind, func(rule metadata.Rule) {
if rule.Reverse {
e.writeEdge(rule.VName, target, rule.EdgeOut)
} else {
e.writeEdge(target, rule.VName, rule.EdgeOut)
}
})
return anchor
}
// mustWriteBinding is as writeBinding, but panics if id does not resolve. Use
// this in cases where the object is known already to exist.
func (e *emitter) mustWriteBinding(id *ast.Ident, kind string, parent *spb.VName) *spb.VName {
if target := e.writeBinding(id, kind, parent); target != nil {
return target
}
panic("unresolved definition") // logged in writeBinding
}
// writeVarBinding is as writeBinding, assuming the kind is "variable".
// If subkind != "", it is also emitted as a subkind.
func (e *emitter) writeVarBinding(id *ast.Ident, subkind string, parent *spb.VName) *spb.VName {
vname := e.writeBinding(id, nodes.Variable, parent)
if vname != nil && subkind != "" {
e.writeFact(vname, facts.Subkind, subkind)
}
return vname
}
// writeBinding emits a node of the specified kind for the target of id. If
// the identifier is not "_", an anchor for a binding definition of the target
// is also emitted at id. If parent != nil, the target is also recorded as its
// child. The target vname is returned.
func (e *emitter) writeBinding(id *ast.Ident, kind string, parent *spb.VName) *spb.VName {
obj := e.pi.Info.Defs[id]
if obj == nil {
loc := e.pi.FileSet.Position(id.Pos())
log.Printf("ERROR: Missing definition for id %q at %s", id.Name, loc)
return nil
}
target := e.pi.ObjectVName(obj)
if kind != "" {
e.writeFact(target, facts.NodeKind, kind)
}
if id.Name != "_" {
e.writeRef(id, target, edges.DefinesBinding)
}
if parent != nil {
e.writeEdge(target, parent, edges.ChildOf)
}
if e.opts.emitMarkedSource() {
e.emitCode(target, e.pi.MarkedSource(obj))
}
e.writeEdge(target, e.emitTypeOf(id), edges.Typed)
return target
}
// writeDef emits a spanning anchor and defines edge for the specified node.
// This function does not create the target node.
func (e *emitter) writeDef(node ast.Node, target *spb.VName) { e.writeRef(node, target, edges.Defines) }
// writeDoc adds associations between comment groups and a documented node.
// It also handles marking deprecated facts on the target.
func (e *emitter) writeDoc(comments *ast.CommentGroup, target *spb.VName) {
if comments == nil || len(comments.List) == 0 || target == nil {
return
}
var lines []string
for _, comment := range comments.List {
lines = append(lines, trimComment(comment.Text))
}
docNode := proto.Clone(target).(*spb.VName)
docNode.Signature += " doc"
e.writeFact(docNode, facts.NodeKind, nodes.Doc)
e.writeFact(docNode, facts.Text, escComment.Replace(strings.Join(lines, "\n")))
e.writeEdge(docNode, target, edges.Documents)
e.emitDeprecation(target, lines)
}
// emitDeprecation emits a deprecated fact for the specified target if the
// comment lines indicate it is deprecated per https://github.com/golang/go/wiki/Deprecated
func (e *emitter) emitDeprecation(target *spb.VName, lines []string) {
var deplines []string
for _, line := range lines {
if len(deplines) == 0 {
if msg := strings.TrimPrefix(line, "Deprecated:"); msg != line {
deplines = append(deplines, strings.TrimSpace(msg))
}
} else if line == "" {
break
} else {
deplines = append(deplines, strings.TrimSpace(line))
}
}
if len(deplines) > 0 {
e.writeFact(target, facts.Deprecated, strings.Join(deplines, " "))
}
}
// isCall reports whether id is a call to obj. This holds if id is in call
// position ("id(...") or is the RHS of a selector in call position
// ("x.id(...)"). If so, the nearest enclosing call expression is also
// returned.
//
// This will not match if there are redundant parentheses in the expression.
func isCall(id *ast.Ident, obj types.Object, stack stackFunc) (*ast.CallExpr, bool) {
if _, ok := obj.(*types.Func); ok {
if call, ok := stack(1).(*ast.CallExpr); ok && call.Fun == id {
return call, true // id(...)
}
if sel, ok := stack(1).(*ast.SelectorExpr); ok && sel.Sel == id {
if call, ok := stack(2).(*ast.CallExpr); ok && call.Fun == sel {
return call, true // x.id(...)
}
}
}
return nil, false
}
// callContext returns funcInfo for the nearest enclosing parent function, not
// including the node itself, or the enclosing package initializer if the node
// is at the top level.
func (e *emitter) callContext(stack stackFunc) *funcInfo {
for i := 1; ; i++ {
switch p := stack(i).(type) {
case *ast.FuncDecl, *ast.FuncLit:
return e.pi.function[p]
case *ast.File:
fi := e.pi.packageInit[p]
if fi == nil {
// Lazily emit a virtual node to represent the static
// initializer for top-level expressions in this file of the
// package. We only do this if there are expressions that need
// to be initialized.
vname := proto.Clone(e.pi.VName).(*spb.VName)
vname.Signature += fmt.Sprintf(".<init>@%d", p.Package)
fi = &funcInfo{vname: vname}
e.pi.packageInit[p] = fi
e.writeFact(vname, facts.NodeKind, nodes.Function)
e.writeEdge(vname, e.pi.VName, edges.ChildOf)
// The callgraph requires we provide the caller with a
// definition (http://www.kythe.io/docs/schema/callgraph.html).
// Since there is no location, attach it to the beginning of
// the file itself.
anchor := e.pi.AnchorVName(p, 0, 0)
e.check(e.sink.writeAnchor(e.ctx, anchor, 0, 0))
e.writeEdge(anchor, vname, edges.Defines)
}
return fi
}
}
}
// nameContext returns the vname for the nearest enclosing parent node, not
// including the node itself, or the enclosing package vname if the node is at
// the top level.
func (e *emitter) nameContext(stack stackFunc) *spb.VName {
if fi := e.callContext(stack); !e.pi.isPackageInit(fi) {
return fi.vname
}
return e.pi.VName
}
// applyRules calls apply for each metadata rule matching the given combination
// of location and kind.
func (e *emitter) applyRules(file *ast.File, start, end int, kind string, apply func(r metadata.Rule)) {
if e.opts == nil || !e.opts.EmitLinkages {
return // nothing to do
} else if e.rmap == nil {
e.rmap = make(map[*ast.File]map[int]metadata.Rules)
}
// Lazily populate a cache of file :: start :: rules mappings, so that we
// need only scan the rules coincident on the starting point of the range
// we care about. In almost all cases that will be just one, if any.
rules, ok := e.rmap[file]
if !ok {
rules = make(map[int]metadata.Rules)
for _, rule := range e.pi.Rules[file] {
rules[rule.Begin] = append(rules[rule.Begin], rule)
}
e.rmap[file] = rules
}
for _, rule := range rules[start] {
if rule.End == end && rule.EdgeIn == kind {
apply(rule)
}
}
}
// A visitFunc visits a node of the Go AST. The function can use stack to
// retrieve AST nodes on the path from the node up to the root. If the return
// value is true, the children of node are also visited; otherwise they are
// skipped.
type visitFunc func(node ast.Node, stack stackFunc) bool
// A stackFunc returns the ith stack entry above of an AST node, where 0
// denotes the node itself. If the ith entry does not exist, the function
// returns nil.
type stackFunc func(i int) ast.Node
// astVisitor implements ast.Visitor, passing each visited node to a callback
// function.
type astVisitor struct {
stack []ast.Node
visit visitFunc
}
func newASTVisitor(f visitFunc) ast.Visitor { return &astVisitor{visit: f} }
// Visit implements the required method of the ast.Visitor interface.
func (w *astVisitor) Visit(node ast.Node) ast.Visitor {
if node == nil {
w.stack = w.stack[:len(w.stack)-1] // pop
return w
}
w.stack = append(w.stack, node) // push
if !w.visit(node, w.parent) {
return nil
}
return w
}
func (w *astVisitor) parent(i int) ast.Node {
if i >= len(w.stack) {
return nil
}
return w.stack[len(w.stack)-1-i]
}
// deref returns the base type of T if it is a pointer, otherwise T itself.
func deref(T types.Type) types.Type {
if U, ok := T.Underlying().(*types.Pointer); ok {
return U.Elem()
}
return T
}
// mapFields applies f to each identifier declared in fields. Each call to f
// is given the offset and the identifier.
func mapFields(fields *ast.FieldList, f func(i int, id *ast.Ident)) {
if fields == nil {
return
}
for i, field := range fields.List {
for _, id := range field.Names {
f(i, id)
}
}
}
// fieldIndex reports whether sv has a field named by expr, which must be of
// type *ast.Ident, and returns its positional index if so.
//
// N.B. This is a linear scan, but the count of fields should almost always be
// small enough not to worry about it.
func fieldIndex(expr ast.Expr, sv *types.Struct) (int, bool) {
want := expr.(*ast.Ident).Name
for i := 0; i < sv.NumFields(); i++ {
if sv.Field(i).Name() == want {
return i, true
}
}
return -1, false
}
var escComment = strings.NewReplacer("[", `\[`, "]", `\]`, `\`, `\\`)
// trimComment removes the comment delimiters from a comment. For single-line
// comments, it also removes a single leading space, if present; for multi-line
// comments it discards leading and trailing whitespace.
func trimComment(text string) string {
if single := strings.TrimPrefix(text, "//"); single != text {
return strings.TrimPrefix(single, " ")
}
return strings.TrimSpace(strings.TrimSuffix(strings.TrimPrefix(text, "/*"), "*/"))
}
// specComment returns the innermost comment associated with spec, or nil.
func specComment(spec ast.Spec, stack stackFunc) *ast.CommentGroup {
var comment *ast.CommentGroup
switch t := spec.(type) {
case *ast.TypeSpec:
comment = firstNonEmptyComment(t.Doc, t.Comment)
case *ast.ValueSpec:
comment = firstNonEmptyComment(t.Doc, t.Comment)
case *ast.ImportSpec:
comment = firstNonEmptyComment(t.Doc, t.Comment)
}
if comment == nil {
if t, ok := stack(1).(*ast.GenDecl); ok {
return t.Doc
}
}
return comment
}
func firstNonEmptyComment(cs ...*ast.CommentGroup) *ast.CommentGroup {
for _, c := range cs {
if c != nil && len(c.List) > 0 {
return c
}
}
return nil
}
| 1 | 11,925 | `spb` is the standard package name we use for this proto. | kythe-kythe | go |
@@ -533,13 +533,13 @@ nsCommandProcessor.prototype.execute = function(jsonCommandString,
* Changes the context of the caller to the specified window.
* @param {fxdriver.CommandResponse} response The response object to send the
* command response in.
- * @param {{name: string}} parameters The command parameters.
+ * @param {{handle: string}} parameters The command parameters.
* @param {number} opt_searchAttempt Which attempt this is at finding the
* window to switch to.
*/
nsCommandProcessor.prototype.switchToWindow = function(response, parameters,
opt_searchAttempt) {
- var lookFor = parameters.name;
+ var lookFor = parameters.handle;
var matches = function(win, lookFor) {
return !win.closed &&
(win.top && win.top.fxdriver) && | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview Contains a Javascript implementation for
* nsICommandProcessor.idl. The implemented XPCOM component is exposed to
* the content page as a global property so that it can be used from
* unpriviledged code.
*/
goog.provide('fxdriver.CommandResponse');
goog.require('FirefoxDriver');
goog.require('Utils');
goog.require('WebElement');
goog.require('bot.ErrorCode');
goog.require('bot.locators');
goog.require('bot.userAgent');
goog.require('fxdriver.Timer');
goog.require('fxdriver.error');
goog.require('fxdriver.logging');
goog.require('fxdriver.modals');
goog.require('fxdriver.moz');
goog.require('fxdriver.profiler');
goog.require('goog.array');
goog.require('goog.log');
goog.require('wdSessionStoreService');
/**
* Encapsulates the result of a command to the {@code nsCommandProcessor}.
* @param {Object} command JSON object describing the command to execute.
* @param {nsIResponseHandler} responseHandler The handler to send the response
* to.
* @constructor
*/
fxdriver.CommandResponse = function(command, responseHandler) {
this.statusBarLabel_ = null;
this.responseHandler_ = responseHandler;
this.json_ = {
name: command ? command.name : 'Unknown command',
sessionId: command['sessionId'],
status: bot.ErrorCode.SUCCESS,
value: ''
};
if (goog.isObject(this.json_['sessionId'])) {
this.json_['sessionId'] = this.json_['sessionId']['value'];
}
this.session = null;
};
fxdriver.CommandResponse.prototype = {
/**
* Updates the extension status label to indicate we are about to execute a
* command.
* @param {window} win The content window that the command will be executed on.
*/
startCommand: function(win) {
this.statusBarLabel_ = win.document.getElementById('fxdriver-label');
if (this.statusBarLabel_) {
this.statusBarLabel_.style.color = 'red';
}
},
/**
* Sends the encapsulated response to the registered callback.
*/
send: function() {
if (this.responseSent_) {
// We shouldn't ever send the same response twice.
return;
}
// Indicate that we are no longer executing a command.
if (this.statusBarLabel_) {
this.statusBarLabel_.style.color = 'black';
}
this.responseHandler_.handleResponse(JSON.stringify(this.json_));
// Neuter ourselves
this.responseSent_ = true;
},
/**
* Sends a WebDriver error response.
* @param {WebDriverError} e The error to send.
*/
sendError: function(e) {
// if (e instanceof WebDriverError) won't work here since
// WebDriverError is defined in the utils.js subscript which is
// loaded independently in this component and in the main driver
// component.
this.status = e.isWebDriverError ? e.code : bot.ErrorCode.UNKNOWN_ERROR;
this.value = fxdriver.error.toJSON(e);
this.send();
},
set name(name) { this.json_.name = name; },
get name() { return this.json_.name; },
get sessionId() { return this.json_.sessionId; },
set sessionId(sessionId) { this.json_.sessionId = sessionId; },
set status(newStatus) { this.json_.status = newStatus; },
get status() { return this.json_.status; },
set value(val) { this.json_.value = val; },
get value() { return this.json_.value; }
};
/**
* Handles executing a command from the {@code CommandProcessor} once the window
* has fully loaded.
* @param {FirefoxDriver} driver The FirefoxDriver instance to execute the
* command with.
* @param {Object} command JSON object describing the command to execute.
* @param {fxdriver.CommandResponse} response The response object to send the
* command response in.
* @param {Number} opt_sleepDelay The amount of time to wait before attempting
* the command again if the window is not ready.
* @constructor
*/
var DelayedCommand = function(driver, command, response, opt_sleepDelay) {
this.driver_ = driver;
this.command_ = command;
this.response_ = response;
this.onBlank_ = false;
this.sleepDelay_ = opt_sleepDelay || DelayedCommand.DEFAULT_SLEEP_DELAY;
var activeWindow = response.session.getWindow();
try {
if (!activeWindow || activeWindow.closed) {
this.loadGroup_ = {
isPending: function() { return false; }
};
} else {
var webNav = activeWindow.
QueryInterface(Components.interfaces.nsIInterfaceRequestor).
getInterface(Components.interfaces.nsIWebNavigation);
this.loadGroup_ = webNav.
QueryInterface(Components.interfaces.nsIInterfaceRequestor).
getInterface(Components.interfaces.nsILoadGroup);
}
} catch (ex) {
// Well this sucks. This can happen if the DOM gets trashed or if the window
// is unexpectedly closed. We need to report this error to the user so they
// can let us (webdriver-eng) know that the FirefoxDriver is busted.
response.sendError(ex);
// Re-throw the error so the command will be aborted.
throw ex;
}
};
/**
* Default amount of time, in milliseconds, to wait before (re)attempting a
* {@code DelayedCommand}.
* @type {Number}
*/
DelayedCommand.DEFAULT_SLEEP_DELAY = 100;
/**
* @private {goog.log.Logger}
* @const
*/
DelayedCommand.LOG_ = fxdriver.logging.getLogger('fxdriver.DelayedCommand');
/**
* Executes the command after the specified delay.
* @param {Number} ms The delay in milliseconds.
*/
DelayedCommand.prototype.execute = function(ms) {
if (this.response_.session.getWaitForPageLoad() && !this.yieldedForBackgroundExecution_) {
this.yieldedForBackgroundExecution_ = true;
fxdriver.profiler.log(
{'event': 'YIELD_TO_PAGE_LOAD', 'startorend': 'start'});
}
var self = this;
this.driver_.window.setTimeout(function() {
self.executeInternal_();
}, ms);
};
/**
* @return {boolean} Whether this instance should delay execution of its
* command for a pending request in the current window's nsILoadGroup.
*/
DelayedCommand.prototype.shouldDelayExecutionForPendingRequest_ = function() {
if (!this.response_.session.getWaitForPageLoad()) {
return false;
}
if (this.loadGroup_.isPending()) {
var hasOnLoadBlocker = false;
var numPending = 0;
var requests = this.loadGroup_.requests;
while (requests.hasMoreElements()) {
var request = null;
var rawRequest = requests.getNext();
try {
request = rawRequest.QueryInterface(Components.interfaces.nsIRequest);
} catch (e) {
// This may happen for pages that use WebSockets.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=765618
goog.log.info(DelayedCommand.LOG_,
'Ignoring non-nsIRequest: ' + rawRequest);
continue;
}
var isPending = false;
try {
isPending = request.isPending();
} catch (e) {
// Normal during page load, which means we should just return "true"
return true;
}
if (isPending) {
numPending += 1;
hasOnLoadBlocker = hasOnLoadBlocker ||
(request.name == 'about:document-onload-blocker');
if (numPending > 1) {
// More than one pending request, need to wait.
return true;
}
}
}
if (numPending && !hasOnLoadBlocker) {
goog.log.info(DelayedCommand.LOG_,
'Ignoring pending about:document-onload-blocker ' +
'request');
// If we only have one pending request and it is not a
// document-onload-blocker, we need to wait. We do not wait for
// document-onload-blocker requests since these are created when
// one of document.[open|write|writeln] is called. If document.close is
// never called, the document-onload-blocker request will not be
// completed.
return true;
}
}
fxdriver.profiler.log(
{'event': 'YIELD_TO_PAGE_LOAD', 'startorend': 'end'});
return false;
};
DelayedCommand.prototype.checkPreconditions_ = function(preconditions, respond, parameters) {
if (!preconditions) {
return;
}
var toThrow = null;
var length = preconditions.length;
for (var i = 0; i < length; i++) {
toThrow = preconditions[i](respond.session.getDocument(), parameters);
if (toThrow) {
throw toThrow;
}
}
};
/**
* Attempts to execute the command. If the window is not ready for the command
* to execute, will set a timeout to try again.
* @private
*/
DelayedCommand.prototype.executeInternal_ = function() {
if (this.shouldDelayExecutionForPendingRequest_()) {
return this.execute(this.sleepDelay_);
}
// Ugh! New windows open on "about:blank" before going to their
// destination URL. This check attempts to tell the difference between a
// newly opened window and someone actually wanting to do something on
// about:blank.
if (this.driver_.window.location == 'about:blank' && !this.onBlank_) {
this.onBlank_ = true;
return this.execute(this.sleepDelay_);
} else {
try {
this.response_.name = this.command_.name;
// TODO(simon): This is rampantly ugly, but allows an alert to kill the command
// TODO(simon): This is never cleared, but _should_ be okay, because send wipes itself
this.driver_.response_ = this.response_;
var response = this.response_;
DelayedCommand.execTimer = new fxdriver.Timer();
var startTime = new Date().getTime();
var endTime = startTime + this.response_.session.getImplicitWait();
var name = this.command_.name;
var driverFunction = this.driver_[name] || WebElement[name];
var parameters = this.command_.parameters;
var func = goog.bind(driverFunction, this.driver_,
this.response_, parameters);
var guards = goog.bind(this.checkPreconditions_, this,
driverFunction.preconditions, this.response_, parameters);
var toExecute = function() {
try {
guards();
func();
} catch (e) {
if (new Date().getTime() < endTime) {
DelayedCommand.execTimer.setTimeout(toExecute, 100);
} else {
if (!e.isWebDriverError) {
goog.log.error(
DelayedCommand.LOG_,
'Exception caught by driver: ' + name + '(' + parameters + ')',
e);
}
response.sendError(e);
}
}
};
toExecute();
} catch (e) {
if (!e.isWebDriverError) {
goog.log.error(DelayedCommand.LOG_,
'Exception caught by driver: ' + this.command_.name +
'(' + this.command_.parameters + ')', e);
}
this.response_.sendError(e);
}
}
};
/**
* Class for dispatching WebDriver requests. Handles window locating commands
* (e.g. switching, searching, etc.), all other commands are executed with the
* {@code FirefoxDriver} through reflection. Note this is a singleton class.
* @constructor
*/
var nsCommandProcessor = function() {
this.wrappedJSObject = this;
this.wm = Components.classes['@mozilla.org/appshell/window-mediator;1'].
getService(Components.interfaces.nsIWindowMediator);
};
/**
* @private {goog.log.Logger}
* @const
*/
nsCommandProcessor.LOG_ = fxdriver.logging.getLogger(
'fxdriver.nsCommandProcessor');
/**
* Flags for the {@code nsIClassInfo} interface.
* @type {Number}
*/
nsCommandProcessor.prototype.flags =
Components.interfaces.nsIClassInfo.DOM_OBJECT;
/**
* Implementaiton language detail for the {@code nsIClassInfo} interface.
* @type {String}
*/
nsCommandProcessor.prototype.implementationLanguage =
Components.interfaces.nsIProgrammingLanguage.JAVASCRIPT;
/**
* Processes a command request for the {@code FirefoxDriver}.
* @param {string} jsonCommandString The command to execute, specified in a
* JSON string.
* @param {nsIResponseHandler} responseHandler The callback to send the response
* to.
*/
nsCommandProcessor.prototype.execute = function(jsonCommandString,
responseHandler) {
var command, response;
try {
command = JSON.parse(jsonCommandString);
} catch (ex) {
response = JSON.stringify({
'status': bot.ErrorCode.UNKNOWN_ERROR,
'value': 'Error parsing command: "' + jsonCommandString + '"'
});
responseHandler.handleResponse(response);
return;
}
response = new fxdriver.CommandResponse(command, responseHandler);
// These commands do not require a session.
if (command.name == 'newSession' ||
command.name == 'quit' ||
command.name == 'getStatus' ||
command.name == 'getWindowHandles') {
goog.log.info(nsCommandProcessor.LOG_,
'Received command: ' + command.name);
try {
this[command.name](response, command.parameters);
} catch (ex) {
response.sendError(ex);
}
return;
}
var sessionId = command.sessionId;
if (!sessionId) {
response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR,
'No session ID specified'));
return;
}
try {
response.session = Components.
classes['@googlecode.com/webdriver/wdsessionstoreservice;1'].
getService(Components.interfaces.nsISupports).
wrappedJSObject.
getSession(sessionId).
wrappedJSObject;
} catch (ex) {
response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR,
'Session not found: ' + sessionId));
return;
}
goog.log.info(nsCommandProcessor.LOG_, 'Received command: ' + command.name);
if (command.name == 'getSessionCapabilities' ||
command.name == 'switchToWindow' ||
command.name == 'getLog' ||
command.name == 'getAvailableLogTypes') {
return this[command.name](response, command.parameters);
}
var sessionWindow = response.session.getChromeWindow();
var driver = sessionWindow.fxdriver; // TODO(jmleyba): We only need to store an ID on the window!
if (!driver) {
response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR,
'Session [' + response.session.getId() + '] has no driver.' +
' The browser window may have been closed.'));
return;
}
try {
var contentWindow = sessionWindow.getBrowser().contentWindow;
if (!contentWindow) {
response.sendError(new WebDriverError(bot.ErrorCode.NO_SUCH_WINDOW,
'Window not found. The browser window may have been closed.'));
return;
}
} catch (ff45) {
response.sendError(new WebDriverError(bot.ErrorCode.NO_SUCH_WINDOW,
'Window not found. The browser window may have been closed.'));
return;
}
if (driver.modalOpen) {
if (command.name != 'getAlertText' &&
command.name != 'setAlertValue' &&
command.name != 'acceptAlert' &&
command.name != 'dismissAlert') {
var modalText = driver.modalOpen;
var unexpectedAlertBehaviour = fxdriver.modals.getUnexpectedAlertBehaviour();
switch (unexpectedAlertBehaviour) {
case 'accept':
fxdriver.modals.closeUnhandledAlert(response, driver, true);
break;
case 'ignore':
// do nothing, ignore the alert
response.sendError(new WebDriverError(bot.ErrorCode.UNEXPECTED_ALERT_OPEN,
'Modal dialog present', {alert: {text: modalText}}));
break;
// Dismiss is the default
case 'dismiss':
default:
fxdriver.modals.closeUnhandledAlert(response, driver, false);
break;
}
return;
}
}
if (typeof driver[command.name] != 'function' && typeof WebElement[command.name] != 'function') {
response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_COMMAND,
'Unrecognised command: ' + command.name));
goog.log.error(nsCommandProcessor.LOG_,
'Unknown command: ' + command.name);
return;
}
if(command.name == 'get' || command.name == 'refresh') {
response.session.setWaitForPageLoad(false);
}
// TODO: should we delay commands if the page is reloaded on itself?
// var pageLoadTimeout = response.session.getPageLoadTimeout();
// var shouldWaitForPageLoad = response.session.getWaitForPageLoad();
// if (pageLoadTimeout != 0 && shouldWaitForPageLoad) {
// driver.window.setTimeout(function () {
// response.session.setWaitForPageLoad(false);
// }, pageLoadTimeout);
// }
response.startCommand(sessionWindow);
new DelayedCommand(driver, command, response).execute(0);
};
/**
* Changes the context of the caller to the specified window.
* @param {fxdriver.CommandResponse} response The response object to send the
* command response in.
* @param {{name: string}} parameters The command parameters.
* @param {number} opt_searchAttempt Which attempt this is at finding the
* window to switch to.
*/
nsCommandProcessor.prototype.switchToWindow = function(response, parameters,
opt_searchAttempt) {
var lookFor = parameters.name;
var matches = function(win, lookFor) {
return !win.closed &&
(win.top && win.top.fxdriver) &&
(win.content && win.content.name == lookFor) ||
(win.top && win.top.fxdriver && win.top.fxdriver.id == lookFor);
};
var windowFound = this.searchWindows_('navigator:browser', function(win) {
if (matches(win, lookFor)) {
win.focus();
if (win.top.fxdriver) {
response.session.setChromeWindow(win.top);
response.value = response.session.getId();
response.send();
} else {
response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR,
'No driver found attached to top window!'));
}
// Found the desired window, stop the search.
return true;
}
});
// It is possible that the window won't be found on the first attempt. This is
// typically true for anchors with a target attribute set. This search could
// execute before the target window has finished loaded, meaning the content
// window won't have a name or FirefoxDriver instance yet (see matches above).
// If we don't find the window, set a timeout and try again.
if (!windowFound) {
// TODO(jmleyba): We should be sniffing the current windows to detect if
// one is still loading vs. a brute force "try again"
var searchAttempt = opt_searchAttempt || 0;
if (searchAttempt > 3) {
response.sendError(new WebDriverError(bot.ErrorCode.NO_SUCH_WINDOW,
'Unable to locate window "' + lookFor + '"'));
} else {
var self = this;
this.wm.getMostRecentWindow('navigator:browser').
setTimeout(function() {
self.switchToWindow(response, parameters, (searchAttempt + 1));
}, 500);
}
}
};
/**
* Retrieves a list of all known FirefoxDriver windows.
* @param {fxdriver.CommandResponse} response The response object to send the
* command response in.
*/
nsCommandProcessor.prototype.getWindowHandles = function(response) {
var res = [];
this.searchWindows_('navigator:browser', function(win) {
if (win.top && win.top.fxdriver) {
res.push(win.top.fxdriver.id);
}
});
response.value = res;
response.send();
};
/**
* Retrieves the log for the given type.
*
* @param {!fxdriver.CommandResponse} response The response object to send the
* response in.
* @param {!Object.<string, *>} parameters The parameters for the call.
*/
nsCommandProcessor.prototype.getLog = function(response, parameters) {
var res = fxdriver.logging.getLog(parameters.type);
// Convert log level object to string
goog.array.forEach(res, function(entry) {
entry.level = entry.level.name;
});
response.value = res;
response.send();
};
/**
* Retrieves available log types.
*
* @param {!fxdriver.CommandResponse} response The response object to send the
* response in.
* @param {Object.<string, *>} parameters The parameters for the call.
*/
nsCommandProcessor.prototype.getAvailableLogTypes = function(response,
parameters) {
response.value = fxdriver.logging.getAvailableLogTypes();
response.send();
};
/**
* Searches over a selection of windows, calling a visitor function on each
* window found in the search.
* @param {?string} search_criteria The category of windows to search or
* {@code null} to search all windows.
* @param {function(!Window)} visitor_fn A visitor function to call with each
* window. The function may return true to indicate that the window search
* should abort early.
* @return {boolean} Whether the visitor function short circuited the search.
*/
nsCommandProcessor.prototype.searchWindows_ = function(search_criteria,
visitor_fn) {
var allWindows = this.wm.getEnumerator(search_criteria);
while (allWindows.hasMoreElements()) {
var win = allWindows.getNext();
if (visitor_fn(win)) {
return true;
}
}
return false;
};
/**
* Responds with general status information about this process.
* @param {fxdriver.CommandResponse} response The object to send the command
* response in.
*/
nsCommandProcessor.prototype.getStatus = function(response) {
var xulRuntime = Components.classes['@mozilla.org/xre/app-info;1'].
getService(Components.interfaces.nsIXULRuntime);
response.value = {
'os': {
'arch': (function() {
try {
// See https://developer.mozilla.org/en/XPCOM_ABI
return (xulRuntime.XPCOMABI || 'unknown').split('-')[0];
} catch (ignored) {
return 'unknown';
}
})(),
// See https://developer.mozilla.org/en/OS_TARGET
'name': xulRuntime.OS,
'version': 'unknown'
},
// TODO: load these values from build.properties
'build': {
'revision': 'unknown',
'time': 'unknown',
'version': 'unknown'
}
};
response.send();
};
/**
* Locates the most recently used FirefoxDriver window.
* @param {fxdriver.CommandResponse} response The object to send the command
* response in.
*/
nsCommandProcessor.prototype.newSession = function(response, parameters) {
var win = this.wm.getMostRecentWindow('navigator:browser');
var driver = win.fxdriver;
if (!driver) {
response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR,
'No drivers associated with the window'));
} else {
var sessionStore = Components.
classes['@googlecode.com/webdriver/wdsessionstoreservice;1'].
getService(Components.interfaces.nsISupports);
var desiredCapabilities = parameters['desiredCapabilities'];
var requiredCapabilities = parameters['requiredCapabilities'];
var session = sessionStore.wrappedJSObject.createSession(response,
desiredCapabilities, requiredCapabilities, driver);
session = session.wrappedJSObject; // XPConnect...
session.setChromeWindow(win);
if ('elementScrollBehavior' in desiredCapabilities) {
session.elementScrollBehavior = desiredCapabilities['elementScrollBehavior'];
}
response.session = session;
response.sessionId = session.getId();
goog.log.info(nsCommandProcessor.LOG_,
'Created a new session with id: ' + session.getId());
this.getSessionCapabilities(response);
}
response.send();
};
/**
* Describes a session.
* @param {fxdriver.CommandResponse} response The object to send the command
* response in.
*/
nsCommandProcessor.prototype.getSessionCapabilities = function(response) {
var appInfo = Components.classes['@mozilla.org/xre/app-info;1'].
getService(Components.interfaces.nsIXULAppInfo);
var xulRuntime = Components.classes['@mozilla.org/xre/app-info;1'].
getService(Components.interfaces.nsIXULRuntime);
response.value = {
'cssSelectorsEnabled': true,
'browserName': 'firefox',
'handlesAlerts': true,
'javascriptEnabled': true,
'nativeEvents': false,
// See https://developer.mozilla.org/en/OS_TARGET
'platform': (xulRuntime.OS == 'WINNT' ? 'WINDOWS' : xulRuntime.OS),
'rotatable': false,
'takesScreenshot': true,
'version': appInfo.version
};
var prefStore = fxdriver.moz.getService('@mozilla.org/preferences-service;1',
'nsIPrefService');
for (var cap in wdSessionStoreService.CAPABILITY_PREFERENCE_MAPPING) {
var pref = wdSessionStoreService.CAPABILITY_PREFERENCE_MAPPING[cap];
try {
response.value[cap] = prefStore.getBoolPref(pref);
} catch (e) {
try {
response.value[cap] = prefStore.getIntPref(pref);
} catch (e) {
try {
response.value[cap] = prefStore.getCharPref(pref);
} catch (e) {
}
}
}
}
response.send();
};
/**
* Forcefully shuts down the Firefox application.
* @param {fxdriver.CommandResponse} response The object to send the command
* response in.
*/
nsCommandProcessor.prototype.quit = function(response) {
// Go ahead and respond to the command request to acknowledge that we are
// shutting down. We do this because once we force a quit, there's no way
// to respond. Clients will just have to trust that this shutdown didn't
// fail. Or they could monitor the PID. Either way, not much we can do about
// it in here.
response.send();
wdSession.quitBrowser(500);
};
nsCommandProcessor.prototype.getInterfaces = function(count) {
var ifaces = [
Components.interfaces.nsICommandProcessor,
Components.interfaces.nsISupports
];
count.value = ifaces.length;
return ifaces;
};
nsCommandProcessor.prototype.QueryInterface = function(aIID) {
if (!aIID.equals(Components.interfaces.nsICommandProcessor) &&
!aIID.equals(Components.interfaces.nsISupports)) {
throw Components.results.NS_ERROR_NO_INTERFACE;
}
return this;
};
nsCommandProcessor.CLASS_ID =
Components.ID('{692e5117-a4a2-4b00-99f7-0685285b4db5}');
nsCommandProcessor.CLASS_NAME = 'Firefox WebDriver CommandProcessor';
nsCommandProcessor.CONTRACT_ID =
'@googlecode.com/webdriver/command-processor;1';
/**
* Factory object for obtaining a reference to the singleton instance of
* {@code CommandProcessor}.
*/
nsCommandProcessor.Factory = {
instance_: null,
createInstance: function(aOuter, aIID) {
if (aOuter != null) {
throw Components.results.NS_ERROR_NO_AGGREGATION;
}
if (!this.instance_) {
this.instance_ = new nsCommandProcessor();
}
return this.instance_.QueryInterface(aIID);
}
};
/**
* Module definition for registering this XPCOM component.
*/
nsCommandProcessor.Module = {
firstTime_: true,
registerSelf: function(aCompMgr, aFileSpec, aLocation, aType) {
if (this.firstTime_) {
this.firstTime_ = false;
throw Components.results.NS_ERROR_FACTORY_REGISTER_AGAIN;
}
aCompMgr.QueryInterface(Components.interfaces.nsIComponentRegistrar).
registerFactoryLocation(
nsCommandProcessor.CLASS_ID,
nsCommandProcessor.CLASS_NAME,
nsCommandProcessor.CONTRACT_ID,
aFileSpec, aLocation, aType);
},
unregisterSelf: function(aCompMgr, aLocation) {
aCompMgr.QueryInterface(Components.interfaces.nsIComponentRegistrar).
unregisterFactoryLocation(nsCommandProcessor.CLASS_ID, aLocation);
},
getClassObject: function(aCompMgr, aCID, aIID) {
if (!aIID.equals(Components.interfaces.nsIFactory)) {
throw Components.results.NS_ERROR_NOT_IMPLEMENTED;
} else if (!aCID.equals(nsCommandProcessor.CLASS_ID)) {
throw Components.results.NS_ERROR_NO_INTERFACE;
}
return nsCommandProcessor.Factory;
},
canUnload: function() {
return true;
}
};
/**
* Module initialization.
*/
NSGetModule = function() {
return nsCommandProcessor.Module;
};
nsCommandProcessor.prototype.classID = nsCommandProcessor.CLASS_ID;
fxdriver.moz.load('resource://gre/modules/XPCOMUtils.jsm');
if (XPCOMUtils.generateNSGetFactory) {
/** @const */ NSGetFactory = XPCOMUtils.generateNSGetFactory([nsCommandProcessor]);
}
| 1 | 13,586 | /javascript/firefox-driver is the Selenium implementation of a WebDriver for Firefox. Since it generally isn't W3C compatible, it shouldn't change. We can just drop this change. | SeleniumHQ-selenium | py |
@@ -369,8 +369,8 @@ func msgToApplication(msg model.Message) (*Application, error) {
return app, nil
}
-// TODO: upgrade to parallel process
// Process translate msg to application , process and send resp to edge
+// TODO: upgrade to parallel process
func (c *Center) Process(msg model.Message) {
app, err := msgToApplication(msg)
if err != nil { | 1 | package application
import (
"context"
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/klog/v2"
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/beehive/pkg/core/model"
"github.com/kubeedge/kubeedge/cloud/pkg/common/client"
"github.com/kubeedge/kubeedge/cloud/pkg/common/modules"
"github.com/kubeedge/kubeedge/cloud/pkg/dynamiccontroller/messagelayer"
"github.com/kubeedge/kubeedge/edge/pkg/common/message"
"github.com/kubeedge/kubeedge/edge/pkg/edgehub"
"github.com/kubeedge/kubeedge/pkg/metaserver"
)
// used to set Message.Route
const (
MetaServerSource = "metaserver"
ApplicationResource = "Application"
ApplicationResp = "applicationResponse"
Ignore = "ignore"
)
type applicationStatus string
const (
// set by agent
PreApplying applicationStatus = "PreApplying" // application is waiting to be sent to cloud
InApplying applicationStatus = "InApplying" // application is sending to cloud
// set by center
InProcessing applicationStatus = "InProcessing" // application is in processing by cloud
Approved applicationStatus = "Approved" // application is approved by cloud
Rejected applicationStatus = "Rejected" // application is rejected by cloud
// both
Failed applicationStatus = "Failed" // failed to get application resp from cloud
Completed applicationStatus = "Completed" // application is completed and waiting to be recycled
)
type applicationVerb string
const (
Get applicationVerb = "get"
List applicationVerb = "list"
Watch applicationVerb = "watch"
Create applicationVerb = "create"
Delete applicationVerb = "delete"
Update applicationVerb = "update"
UpdateStatus applicationVerb = "updatestatus"
Patch applicationVerb = "patch"
)
type PatchInfo struct {
Name string
PatchType types.PatchType
Data []byte
Options metav1.PatchOptions
Subresources []string
}
// record the resources that are in applying for requesting to be transferred down from the cloud, please:
// 0.use Agent.Generate to generate application
// 1.use Agent.Apply to apply application( generate msg and send it to cloud dynamiccontroller)
type Application struct {
ID string
Key string // group version resource namespaces name
Verb applicationVerb
Nodename string
Status applicationStatus
Reason string // why in this status
Option []byte //
ReqBody []byte // better a k8s api instance
RespBody []byte
ctx context.Context // to end app.Wait
cancel context.CancelFunc
count uint64 // count the number of current citations
countLock sync.Mutex
//TODO: add lock
}
func newApplication(ctx context.Context, key string, verb applicationVerb, nodename string, option interface{}, reqBody interface{}) *Application {
var v1 metav1.ListOptions
if internal, ok := option.(metainternalversion.ListOptions); ok {
err := metainternalversion.Convert_internalversion_ListOptions_To_v1_ListOptions(&internal, &v1, nil)
if err != nil {
// error here won't happen, log in case
klog.Errorf("failed to transfer internalListOption to v1ListOption, force set to empty")
}
option = v1
}
ctx2, cancel := context.WithCancel(ctx)
app := &Application{
Key: key,
Verb: verb,
Nodename: nodename,
Status: PreApplying,
Option: toBytes(option),
ReqBody: toBytes(reqBody),
ctx: ctx2,
cancel: cancel,
count: 0,
countLock: sync.Mutex{},
}
app.add()
return app
}
func (a *Application) Identifier() string {
if a.ID != "" {
return a.ID
}
b := []byte(a.Nodename)
b = append(b, []byte(a.Key)...)
b = append(b, []byte(a.Verb)...)
b = append(b, a.Option...)
b = append(b, a.ReqBody...)
a.ID = fmt.Sprintf("%x", md5.Sum(b))
return a.ID
}
func (a *Application) String() string {
return fmt.Sprintf("(NodeName=%v;Key=%v;Verb=%v;Status=%v;Reason=%v)", a.Nodename, a.Key, a.Verb, a.Status, a.Reason)
}
func (a *Application) ReqContent() interface{} {
return a.ReqBody
}
func (a *Application) RespContent() interface{} {
return a.RespBody
}
func (a *Application) ToListener(option metav1.ListOptions) *SelectorListener {
gvr, namespace, _ := metaserver.ParseKey(a.Key)
selector := NewSelector(option.LabelSelector, option.FieldSelector)
if namespace != "" {
selector.Field = fields.AndSelectors(selector.Field, fields.OneTermEqualSelector("metadata.namespace", namespace))
}
l := NewSelectorListener(a.Nodename, gvr, selector)
return l
}
// remember i must be a pointer to the initialized variable
func (a *Application) OptionTo(i interface{}) error {
err := json.Unmarshal(a.Option, i)
if err != nil {
return fmt.Errorf("failed to prase Option bytes, %v", err)
}
return nil
}
func (a *Application) ReqBodyTo(i interface{}) error {
err := json.Unmarshal(a.ReqBody, i)
if err != nil {
return fmt.Errorf("failed to parse ReqBody bytes, %v", err)
}
return nil
}
func (a *Application) RespBodyTo(i interface{}) error {
err := json.Unmarshal(a.RespBody, i)
if err != nil {
return fmt.Errorf("failed to parse RespBody bytes, %v", err)
}
return nil
}
//
func (a *Application) GVR() schema.GroupVersionResource {
gvr, _, _ := metaserver.ParseKey(a.Key)
return gvr
}
func (a *Application) Namespace() string {
_, ns, _ := metaserver.ParseKey(a.Key)
return ns
}
func (a *Application) Call() {
if a.cancel != nil {
a.cancel()
}
}
func (a *Application) getStatus() applicationStatus {
return a.Status
}
// Wait the result of application after it is applied by application agent
func (a *Application) Wait() {
if a.ctx != nil {
<-a.ctx.Done()
}
}
func (a *Application) Reset() {
if a.ctx != nil && a.cancel != nil {
a.cancel()
}
a.ctx, a.cancel = context.WithCancel(beehiveContext.GetContext())
a.Reason = ""
a.RespBody = []byte{}
}
func (a *Application) add() {
a.countLock.Lock()
a.count++
a.countLock.Unlock()
}
func (a *Application) getCount() uint64 {
a.countLock.Lock()
c := a.count
a.countLock.Unlock()
return c
}
// Close must be called when applicant no longer using application
func (a *Application) Close() {
a.countLock.Lock()
a.count--
if a.count == 0 {
a.Status = Completed
}
a.countLock.Unlock()
}
// used for generating application and do apply
type Agent struct {
Applications sync.Map //store struct application
nodeName string
}
// edged config.Config.HostnameOverride
func NewApplicationAgent(nodeName string) *Agent {
return &Agent{nodeName: nodeName}
}
func (a *Agent) Generate(ctx context.Context, verb applicationVerb, option interface{}, obj runtime.Object) *Application {
key, err := metaserver.KeyFuncReq(ctx, "")
if err != nil {
klog.Errorf("%v", err)
return &Application{}
}
app := newApplication(ctx, key, verb, a.nodeName, option, obj)
store, ok := a.Applications.LoadOrStore(app.Identifier(), app)
if ok {
app = store.(*Application)
app.add()
return app
}
return app
}
func (a *Agent) Apply(app *Application) error {
store, ok := a.Applications.Load(app.Identifier())
if !ok {
return fmt.Errorf("Application %v has not been registered to agent", app.String())
}
app = store.(*Application)
switch app.getStatus() {
case PreApplying:
go a.doApply(app)
case Completed:
app.Reset()
go a.doApply(app)
case Rejected, Failed:
return errors.New(app.Reason)
case Approved:
return nil
case InApplying:
//continue
}
app.Wait()
if app.getStatus() != Approved {
return errors.New(app.Reason)
}
return nil
}
func (a *Agent) doApply(app *Application) {
defer app.Call()
// encapsulate as a message
app.Status = InApplying
msg := model.NewMessage("").SetRoute(MetaServerSource, modules.DynamicControllerModuleGroup).FillBody(app)
msg.SetResourceOperation("null", "null")
resp, err := beehiveContext.SendSync(edgehub.ModuleNameEdgeHub, *msg, 10*time.Second)
if err != nil {
app.Status = Failed
app.Reason = fmt.Sprintf("failed to access cloud Application center: %v", err)
return
}
retApp, err := msgToApplication(resp)
if err != nil {
app.Status = Failed
app.Reason = fmt.Sprintf("failed to get Application from resp msg: %v", err)
return
}
//merge returned application to local application
app.Status = retApp.Status
app.Reason = retApp.Reason
app.RespBody = retApp.RespBody
}
func (a *Agent) GC() {
}
type Center struct {
Applications sync.Map
HandlerCenter
messageLayer messagelayer.MessageLayer
kubeclient dynamic.Interface
}
func NewApplicationCenter(dynamicSharedInformerFactory dynamicinformer.DynamicSharedInformerFactory) *Center {
a := &Center{
HandlerCenter: NewHandlerCenter(dynamicSharedInformerFactory),
kubeclient: client.GetDynamicClient(),
messageLayer: messagelayer.NewContextMessageLayer(),
}
return a
}
func toBytes(i interface{}) []byte {
if i == nil {
return []byte{}
}
var bytes []byte
var err error
switch i := i.(type) {
case []byte:
bytes = i
default:
bytes, err = json.Marshal(i)
if err != nil {
klog.Fatalf("marshal content to []byte failed, err: %v", err)
}
}
return bytes
}
// extract application in message's Content
func msgToApplication(msg model.Message) (*Application, error) {
var app = new(Application)
err := json.Unmarshal(toBytes(msg.Content), app)
if err != nil {
return nil, err
}
return app, nil
}
// TODO: upgrade to parallel process
// Process translate msg to application , process and send resp to edge
func (c *Center) Process(msg model.Message) {
app, err := msgToApplication(msg)
if err != nil {
klog.Errorf("failed to translate msg to Application: %v", err)
return
}
klog.Infof("[metaserver/ApplicationCenter] get a Application %v", app.String())
gvr, ns, name := metaserver.ParseKey(app.Key)
err = func() error {
app.Status = InProcessing
switch app.Verb {
case List:
var option = new(metav1.ListOptions)
if err := app.OptionTo(option); err != nil {
return err
}
err := c.HandlerCenter.AddListener(app.ToListener(*option))
if err != nil {
return fmt.Errorf("failed to add listener, %v", err)
}
list, err := c.kubeclient.Resource(app.GVR()).Namespace(app.Namespace()).List(context.TODO(), *option)
if err != nil {
return fmt.Errorf("successfully to add listener but failed to get current list, %v", err)
}
c.Response(app, msg.GetID(), Approved, "", list)
case Watch:
var option = new(metav1.ListOptions)
if err := app.OptionTo(option); err != nil {
return err
}
err := c.HandlerCenter.AddListener(app.ToListener(*option))
if err != nil {
return fmt.Errorf("failed to add listener, %v", err)
}
c.Response(app, msg.GetID(), Approved, "", nil)
case Get:
var option = new(metav1.GetOptions)
if err := app.OptionTo(option); err != nil {
return err
}
retObj, err := c.kubeclient.Resource(gvr).Namespace(ns).Get(context.TODO(), name, *option)
if err != nil {
return err
}
c.Response(app, msg.GetID(), Approved, "", retObj)
case Create:
var option = new(metav1.CreateOptions)
if err := app.OptionTo(option); err != nil {
return err
}
var obj = new(unstructured.Unstructured)
if err := app.ReqBodyTo(obj); err != nil {
return err
}
retObj, err := c.kubeclient.Resource(gvr).Namespace(ns).Create(context.TODO(), obj, *option)
if err != nil {
return err
}
c.Response(app, msg.GetID(), Approved, "", retObj)
case Delete:
var option = new(metav1.DeleteOptions)
if err := app.OptionTo(&option); err != nil {
return err
}
err := c.kubeclient.Resource(gvr).Namespace(ns).Delete(context.TODO(), name, *option)
if err != nil {
return err
}
c.Response(app, msg.GetID(), Approved, "", nil)
case Update:
var option = new(metav1.UpdateOptions)
if err := app.OptionTo(option); err != nil {
return err
}
var obj = new(unstructured.Unstructured)
if err := app.ReqBodyTo(obj); err != nil {
return err
}
retObj, err := c.kubeclient.Resource(gvr).Namespace(ns).Update(context.TODO(), obj, *option)
if err != nil {
return err
}
c.Response(app, msg.GetID(), Approved, "", retObj)
case UpdateStatus:
var option = new(metav1.UpdateOptions)
if err := app.OptionTo(option); err != nil {
return err
}
var obj = new(unstructured.Unstructured)
if err := app.ReqBodyTo(obj); err != nil {
return err
}
retObj, err := c.kubeclient.Resource(gvr).Namespace(ns).UpdateStatus(context.TODO(), obj, *option)
if err != nil {
return err
}
c.Response(app, msg.GetID(), Approved, "", retObj)
case Patch:
var pi = new(PatchInfo)
if err := app.OptionTo(pi); err != nil {
return err
}
retObj, err := c.kubeclient.Resource(gvr).Namespace(ns).Patch(context.TODO(), pi.Name, pi.PatchType, pi.Data, pi.Options, pi.Subresources...)
if err != nil {
return err
}
c.Response(app, msg.GetID(), Approved, "", retObj)
default:
return fmt.Errorf("unsupported Application Verb type :%v", app.Verb)
}
return nil
}()
if err != nil {
c.Response(app, msg.GetID(), Rejected, err.Error(), nil)
klog.Errorf("[metaserver/applicationCenter]failed to process Application(%+v), %v", app, err)
}
klog.Infof("[metaserver/applicationCenter]successfully to process Application(%+v)", app)
}
// Response update application, generate and send resp message to edge
func (c *Center) Response(app *Application, parentID string, status applicationStatus, reason string, respContent interface{}) {
app.Status = status
app.Reason = reason
if respContent != nil {
app.RespBody = toBytes(respContent)
}
msg := model.NewMessage(parentID)
msg.Content = app
resource, err := messagelayer.BuildResource(app.Nodename, Ignore, ApplicationResource, Ignore)
if err != nil {
klog.Warningf("built message resource failed with error: %s", err)
return
}
msg.BuildRouter(modules.DynamicControllerModuleName, message.ResourceGroupName, resource, ApplicationResp)
if err := c.messageLayer.Response(*msg); err != nil {
klog.Warningf("send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource())
} else {
klog.V(4).Infof("send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource())
}
}
func (c *Center) GC() {
}
| 1 | 21,131 | is this pr support parallelly process application? | kubeedge-kubeedge | go |
@@ -48,7 +48,9 @@ func MustEnableIssues(ctx *middleware.Context) {
}
func MustEnablePulls(ctx *middleware.Context) {
- if !ctx.Repo.Repository.EnablePulls {
+ if !ctx.Repo.Repository.CanEnablePulls() {
+ ctx.Handle(404, "Unsupported", nil)
+ } else if !ctx.Repo.Repository.EnablePulls {
ctx.Handle(404, "MustEnablePulls", nil)
}
| 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"github.com/Unknwon/com"
"github.com/Unknwon/paginater"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/auth"
"github.com/gogits/gogs/modules/base"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/mailer"
"github.com/gogits/gogs/modules/middleware"
"github.com/gogits/gogs/modules/setting"
)
const (
ISSUES base.TplName = "repo/issue/list"
ISSUE_NEW base.TplName = "repo/issue/new"
ISSUE_VIEW base.TplName = "repo/issue/view"
LABELS base.TplName = "repo/issue/labels"
MILESTONE base.TplName = "repo/issue/milestones"
MILESTONE_NEW base.TplName = "repo/issue/milestone_new"
MILESTONE_EDIT base.TplName = "repo/issue/milestone_edit"
)
var (
ErrFileTypeForbidden = errors.New("File type is not allowed")
ErrTooManyFiles = errors.New("Maximum number of files to upload exceeded")
)
func MustEnableIssues(ctx *middleware.Context) {
if !ctx.Repo.Repository.EnableIssues {
ctx.Handle(404, "MustEnableIssues", nil)
}
}
func MustEnablePulls(ctx *middleware.Context) {
if !ctx.Repo.Repository.EnablePulls {
ctx.Handle(404, "MustEnablePulls", nil)
}
ctx.Data["HasForkedRepo"] = ctx.IsSigned && ctx.User.HasForkedRepo(ctx.Repo.Repository.ID)
}
func RetrieveLabels(ctx *middleware.Context) {
labels, err := models.GetLabelsByRepoID(ctx.Repo.Repository.ID)
if err != nil {
ctx.Handle(500, "RetrieveLabels.GetLabels: %v", err)
return
}
for _, l := range labels {
l.CalOpenIssues()
}
ctx.Data["Labels"] = labels
ctx.Data["NumLabels"] = len(labels)
}
func Issues(ctx *middleware.Context) {
isPullList := ctx.Params(":type") == "pulls"
if isPullList {
MustEnablePulls(ctx)
if ctx.Written() {
return
}
ctx.Data["Title"] = ctx.Tr("repo.pulls")
ctx.Data["PageIsPullList"] = true
} else {
MustEnableIssues(ctx)
if ctx.Written() {
return
}
ctx.Data["Title"] = ctx.Tr("repo.issues")
ctx.Data["PageIsIssueList"] = true
}
viewType := ctx.Query("type")
sortType := ctx.Query("sort")
types := []string{"assigned", "created_by", "mentioned"}
if !com.IsSliceContainsStr(types, viewType) {
viewType = "all"
}
// Must sign in to see issues about you.
if viewType != "all" && !ctx.IsSigned {
ctx.SetCookie("redirect_to", "/"+url.QueryEscape(setting.AppSubUrl+ctx.Req.RequestURI), 0, setting.AppSubUrl)
ctx.Redirect(setting.AppSubUrl + "/user/login")
return
}
var (
assigneeID = ctx.QueryInt64("assignee")
posterID int64
)
filterMode := models.FM_ALL
switch viewType {
case "assigned":
filterMode = models.FM_ASSIGN
assigneeID = ctx.User.Id
case "created_by":
filterMode = models.FM_CREATE
posterID = ctx.User.Id
case "mentioned":
filterMode = models.FM_MENTION
}
var uid int64 = -1
if ctx.IsSigned {
uid = ctx.User.Id
}
repo := ctx.Repo.Repository
selectLabels := ctx.Query("labels")
milestoneID := ctx.QueryInt64("milestone")
isShowClosed := ctx.Query("state") == "closed"
issueStats := models.GetIssueStats(&models.IssueStatsOptions{
RepoID: repo.ID,
UserID: uid,
LabelID: com.StrTo(selectLabels).MustInt64(),
MilestoneID: milestoneID,
AssigneeID: assigneeID,
FilterMode: filterMode,
IsPull: isPullList,
})
page := ctx.QueryInt("page")
if page <= 1 {
page = 1
}
var total int
if !isShowClosed {
total = int(issueStats.OpenCount)
} else {
total = int(issueStats.ClosedCount)
}
pager := paginater.New(total, setting.IssuePagingNum, page, 5)
ctx.Data["Page"] = pager
// Get issues.
issues, err := models.Issues(&models.IssuesOptions{
UserID: uid,
AssigneeID: assigneeID,
RepoID: repo.ID,
PosterID: posterID,
MilestoneID: milestoneID,
Page: pager.Current(),
IsClosed: isShowClosed,
IsMention: filterMode == models.FM_MENTION,
IsPull: isPullList,
Labels: selectLabels,
SortType: sortType,
})
if err != nil {
ctx.Handle(500, "Issues: %v", err)
return
}
// Get issue-user relations.
pairs, err := models.GetIssueUsers(repo.ID, posterID, isShowClosed)
if err != nil {
ctx.Handle(500, "GetIssueUsers: %v", err)
return
}
// Get posters.
for i := range issues {
if err = issues[i].GetPoster(); err != nil {
ctx.Handle(500, "GetPoster", fmt.Errorf("[#%d]%v", issues[i].ID, err))
return
}
if err = issues[i].GetLabels(); err != nil {
ctx.Handle(500, "GetLabels", fmt.Errorf("[#%d]%v", issues[i].ID, err))
return
}
if !ctx.IsSigned {
issues[i].IsRead = true
continue
}
// Check read status.
idx := models.PairsContains(pairs, issues[i].ID, ctx.User.Id)
if idx > -1 {
issues[i].IsRead = pairs[idx].IsRead
} else {
issues[i].IsRead = true
}
}
ctx.Data["Issues"] = issues
// Get milestones.
ctx.Data["Milestones"], err = models.GetAllRepoMilestones(repo.ID)
if err != nil {
ctx.Handle(500, "GetAllRepoMilestones: %v", err)
return
}
// Get assignees.
ctx.Data["Assignees"], err = repo.GetAssignees()
if err != nil {
ctx.Handle(500, "GetAssignees: %v", err)
return
}
ctx.Data["IssueStats"] = issueStats
ctx.Data["SelectLabels"] = com.StrTo(selectLabels).MustInt64()
ctx.Data["ViewType"] = viewType
ctx.Data["SortType"] = sortType
ctx.Data["MilestoneID"] = milestoneID
ctx.Data["AssigneeID"] = assigneeID
ctx.Data["IsShowClosed"] = isShowClosed
if isShowClosed {
ctx.Data["State"] = "closed"
} else {
ctx.Data["State"] = "open"
}
ctx.HTML(200, ISSUES)
}
func renderAttachmentSettings(ctx *middleware.Context) {
ctx.Data["RequireDropzone"] = true
ctx.Data["IsAttachmentEnabled"] = setting.AttachmentEnabled
ctx.Data["AttachmentAllowedTypes"] = setting.AttachmentAllowedTypes
ctx.Data["AttachmentMaxSize"] = setting.AttachmentMaxSize
ctx.Data["AttachmentMaxFiles"] = setting.AttachmentMaxFiles
}
func RetrieveRepoMilestonesAndAssignees(ctx *middleware.Context, repo *models.Repository) {
var err error
ctx.Data["OpenMilestones"], err = models.GetMilestones(repo.ID, -1, false)
if err != nil {
ctx.Handle(500, "GetMilestones: %v", err)
return
}
ctx.Data["ClosedMilestones"], err = models.GetMilestones(repo.ID, -1, true)
if err != nil {
ctx.Handle(500, "GetMilestones: %v", err)
return
}
ctx.Data["Assignees"], err = repo.GetAssignees()
if err != nil {
ctx.Handle(500, "GetAssignees: %v", err)
return
}
}
func RetrieveRepoMetas(ctx *middleware.Context, repo *models.Repository) []*models.Label {
if !ctx.Repo.IsAdmin() {
return nil
}
labels, err := models.GetLabelsByRepoID(repo.ID)
if err != nil {
ctx.Handle(500, "GetLabelsByRepoID: %v", err)
return nil
}
ctx.Data["Labels"] = labels
RetrieveRepoMilestonesAndAssignees(ctx, repo)
if ctx.Written() {
return nil
}
return labels
}
func NewIssue(ctx *middleware.Context) {
ctx.Data["Title"] = ctx.Tr("repo.issues.new")
ctx.Data["PageIsIssueList"] = true
renderAttachmentSettings(ctx)
RetrieveRepoMetas(ctx, ctx.Repo.Repository)
if ctx.Written() {
return
}
ctx.Data["RequireHighlightJS"] = true
ctx.HTML(200, ISSUE_NEW)
}
func ValidateRepoMetas(ctx *middleware.Context, form auth.CreateIssueForm) ([]int64, int64, int64) {
var (
repo = ctx.Repo.Repository
err error
)
labels := RetrieveRepoMetas(ctx, ctx.Repo.Repository)
if ctx.Written() {
return nil, 0, 0
}
if !ctx.Repo.IsAdmin() {
return nil, 0, 0
}
// Check labels.
labelIDs := base.StringsToInt64s(strings.Split(form.LabelIDs, ","))
labelIDMark := base.Int64sToMap(labelIDs)
hasSelected := false
for i := range labels {
if labelIDMark[labels[i].ID] {
labels[i].IsChecked = true
hasSelected = true
}
}
ctx.Data["HasSelectedLabel"] = hasSelected
ctx.Data["label_ids"] = form.LabelIDs
ctx.Data["Labels"] = labels
// Check milestone.
milestoneID := form.MilestoneID
if milestoneID > 0 {
ctx.Data["Milestone"], err = repo.GetMilestoneByID(milestoneID)
if err != nil {
ctx.Handle(500, "GetMilestoneByID: %v", err)
return nil, 0, 0
}
ctx.Data["milestone_id"] = milestoneID
}
// Check assignee.
assigneeID := form.AssigneeID
if assigneeID > 0 {
ctx.Data["Assignee"], err = repo.GetAssigneeByID(assigneeID)
if err != nil {
ctx.Handle(500, "GetAssigneeByID: %v", err)
return nil, 0, 0
}
ctx.Data["assignee_id"] = assigneeID
}
return labelIDs, milestoneID, assigneeID
}
func notifyWatchersAndMentions(ctx *middleware.Context, issue *models.Issue) {
// Update mentions
mentions := base.MentionPattern.FindAllString(issue.Content, -1)
if len(mentions) > 0 {
for i := range mentions {
mentions[i] = strings.TrimSpace(mentions[i])[1:]
}
if err := models.UpdateMentions(mentions, issue.ID); err != nil {
ctx.Handle(500, "UpdateMentions", err)
return
}
}
repo := ctx.Repo.Repository
// Mail watchers and mentions.
if setting.Service.EnableNotifyMail {
tos, err := mailer.SendIssueNotifyMail(ctx.User, ctx.Repo.Owner, repo, issue)
if err != nil {
ctx.Handle(500, "SendIssueNotifyMail", err)
return
}
tos = append(tos, ctx.User.LowerName)
newTos := make([]string, 0, len(mentions))
for _, m := range mentions {
if com.IsSliceContainsStr(tos, m) {
continue
}
newTos = append(newTos, m)
}
if err = mailer.SendIssueMentionMail(ctx.Render, ctx.User, ctx.Repo.Owner,
repo, issue, models.GetUserEmailsByNames(newTos)); err != nil {
ctx.Handle(500, "SendIssueMentionMail", err)
return
}
}
}
func NewIssuePost(ctx *middleware.Context, form auth.CreateIssueForm) {
ctx.Data["Title"] = ctx.Tr("repo.issues.new")
ctx.Data["PageIsIssueList"] = true
renderAttachmentSettings(ctx)
var (
repo = ctx.Repo.Repository
attachments []string
)
labelIDs, milestoneID, assigneeID := ValidateRepoMetas(ctx, form)
if ctx.Written() {
return
}
if setting.AttachmentEnabled {
attachments = form.Attachments
}
if ctx.HasError() {
ctx.HTML(200, ISSUE_NEW)
return
}
issue := &models.Issue{
RepoID: ctx.Repo.Repository.ID,
Index: repo.NextIssueIndex(),
Name: strings.TrimSpace(form.Title),
PosterID: ctx.User.Id,
Poster: ctx.User,
MilestoneID: milestoneID,
AssigneeID: assigneeID,
Content: form.Content,
}
if err := models.NewIssue(repo, issue, labelIDs, attachments); err != nil {
ctx.Handle(500, "NewIssue", err)
return
}
notifyWatchersAndMentions(ctx, issue)
if ctx.Written() {
return
}
log.Trace("Issue created: %d/%d", repo.ID, issue.ID)
ctx.Redirect(ctx.Repo.RepoLink + "/issues/" + com.ToStr(issue.Index))
}
func UploadIssueAttachment(ctx *middleware.Context) {
if !setting.AttachmentEnabled {
ctx.Error(404, "attachment is not enabled")
return
}
allowedTypes := strings.Split(setting.AttachmentAllowedTypes, ",")
file, header, err := ctx.Req.FormFile("file")
if err != nil {
ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
return
}
defer file.Close()
buf := make([]byte, 1024)
n, _ := file.Read(buf)
if n > 0 {
buf = buf[:n]
}
fileType := http.DetectContentType(buf)
allowed := false
for _, t := range allowedTypes {
t := strings.Trim(t, " ")
if t == "*/*" || t == fileType {
allowed = true
break
}
}
if !allowed {
ctx.Error(400, ErrFileTypeForbidden.Error())
return
}
attach, err := models.NewAttachment(header.Filename, buf, file)
if err != nil {
ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
return
}
log.Trace("New attachment uploaded: %s", attach.UUID)
ctx.JSON(200, map[string]string{
"uuid": attach.UUID,
})
}
func ViewIssue(ctx *middleware.Context) {
ctx.Data["RequireDropzone"] = true
renderAttachmentSettings(ctx)
issue, err := models.GetIssueByIndex(ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
if err != nil {
if models.IsErrIssueNotExist(err) {
ctx.Handle(404, "GetIssueByIndex", err)
} else {
ctx.Handle(500, "GetIssueByIndex", err)
}
return
}
ctx.Data["Title"] = issue.Name
// Make sure type and URL matches.
if ctx.Params(":type") == "issues" && issue.IsPull {
ctx.Redirect(ctx.Repo.RepoLink + "/pulls/" + com.ToStr(issue.Index))
return
} else if ctx.Params(":type") == "pulls" && !issue.IsPull {
ctx.Redirect(ctx.Repo.RepoLink + "/issues/" + com.ToStr(issue.Index))
return
}
if issue.IsPull {
if err = issue.GetPullRequest(); err != nil {
ctx.Handle(500, "GetPullRequest", err)
return
}
ctx.Data["PageIsPullList"] = true
ctx.Data["PageIsPullConversation"] = true
ctx.Data["HasForkedRepo"] = ctx.IsSigned && ctx.User.HasForkedRepo(ctx.Repo.Repository.ID)
} else {
MustEnableIssues(ctx)
if ctx.Written() {
return
}
ctx.Data["PageIsIssueList"] = true
}
if err = issue.GetPoster(); err != nil {
ctx.Handle(500, "GetPoster", err)
return
}
issue.RenderedContent = string(base.RenderMarkdown([]byte(issue.Content), ctx.Repo.RepoLink,
ctx.Repo.Repository.ComposeMetas()))
repo := ctx.Repo.Repository
// Get more information if it's a pull request.
if issue.IsPull {
if issue.HasMerged {
ctx.Data["DisableStatusChange"] = issue.HasMerged
PrepareMergedViewPullInfo(ctx, issue)
} else {
PrepareViewPullInfo(ctx, issue)
}
if ctx.Written() {
return
}
}
// Metas.
// Check labels.
if err = issue.GetLabels(); err != nil {
ctx.Handle(500, "GetLabels", err)
return
}
labelIDMark := make(map[int64]bool)
for i := range issue.Labels {
labelIDMark[issue.Labels[i].ID] = true
}
labels, err := models.GetLabelsByRepoID(repo.ID)
if err != nil {
ctx.Handle(500, "GetLabelsByRepoID: %v", err)
return
}
hasSelected := false
for i := range labels {
if labelIDMark[labels[i].ID] {
labels[i].IsChecked = true
hasSelected = true
}
}
ctx.Data["HasSelectedLabel"] = hasSelected
ctx.Data["Labels"] = labels
// Check milestone and assignee.
if ctx.Repo.IsAdmin() {
RetrieveRepoMilestonesAndAssignees(ctx, repo)
if ctx.Written() {
return
}
}
if ctx.IsSigned {
// Update issue-user.
if err = issue.ReadBy(ctx.User.Id); err != nil {
ctx.Handle(500, "ReadBy", err)
return
}
}
var (
tag models.CommentTag
ok bool
marked = make(map[int64]models.CommentTag)
comment *models.Comment
participants = make([]*models.User, 1, 10)
)
// Render comments and and fetch participants.
participants[0] = issue.Poster
for _, comment = range issue.Comments {
if comment.Type == models.COMMENT_TYPE_COMMENT {
comment.RenderedContent = string(base.RenderMarkdown([]byte(comment.Content), ctx.Repo.RepoLink,
ctx.Repo.Repository.ComposeMetas()))
// Check tag.
tag, ok = marked[comment.PosterID]
if ok {
comment.ShowTag = tag
continue
}
if repo.IsOwnedBy(comment.PosterID) ||
(repo.Owner.IsOrganization() && repo.Owner.IsOwnedBy(comment.PosterID)) {
comment.ShowTag = models.COMMENT_TAG_OWNER
} else if comment.Poster.IsAdminOfRepo(repo) {
comment.ShowTag = models.COMMENT_TAG_ADMIN
} else if comment.PosterID == issue.PosterID {
comment.ShowTag = models.COMMENT_TAG_POSTER
}
marked[comment.PosterID] = comment.ShowTag
isAdded := false
for j := range participants {
if comment.Poster == participants[j] {
isAdded = true
break
}
}
if !isAdded && !issue.IsPoster(comment.Poster.Id) {
participants = append(participants, comment.Poster)
}
}
}
ctx.Data["Participants"] = participants
ctx.Data["NumParticipants"] = len(participants)
ctx.Data["Issue"] = issue
ctx.Data["IsIssueOwner"] = ctx.Repo.IsAdmin() || (ctx.IsSigned && issue.IsPoster(ctx.User.Id))
ctx.Data["SignInLink"] = setting.AppSubUrl + "/user/login"
ctx.Data["RequireHighlightJS"] = true
ctx.HTML(200, ISSUE_VIEW)
}
func getActionIssue(ctx *middleware.Context) *models.Issue {
issue, err := models.GetIssueByIndex(ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
if err != nil {
if models.IsErrIssueNotExist(err) {
ctx.Error(404, "GetIssueByIndex")
} else {
ctx.Handle(500, "GetIssueByIndex", err)
}
return nil
}
return issue
}
func UpdateIssueTitle(ctx *middleware.Context) {
issue := getActionIssue(ctx)
if ctx.Written() {
return
}
if !ctx.IsSigned || (ctx.User.Id != issue.PosterID && !ctx.Repo.IsAdmin()) {
ctx.Error(403)
return
}
issue.Name = ctx.QueryTrim("title")
if len(issue.Name) == 0 {
ctx.Error(204)
return
}
if err := models.UpdateIssue(issue); err != nil {
ctx.Handle(500, "UpdateIssue", err)
return
}
ctx.JSON(200, map[string]interface{}{
"title": issue.Name,
})
}
func UpdateIssueContent(ctx *middleware.Context) {
issue := getActionIssue(ctx)
if ctx.Written() {
return
}
if !ctx.IsSigned || (ctx.User.Id != issue.PosterID && !ctx.Repo.IsAdmin()) {
ctx.Error(403)
return
}
issue.Content = ctx.Query("content")
if err := models.UpdateIssue(issue); err != nil {
ctx.Handle(500, "UpdateIssue", err)
return
}
ctx.JSON(200, map[string]interface{}{
"content": string(base.RenderMarkdown([]byte(issue.Content), ctx.Query("context"), ctx.Repo.Repository.ComposeMetas())),
})
}
func UpdateIssueLabel(ctx *middleware.Context) {
issue := getActionIssue(ctx)
if ctx.Written() {
return
}
if ctx.Query("action") == "clear" {
if err := issue.ClearLabels(); err != nil {
ctx.Handle(500, "ClearLabels", err)
return
}
} else {
isAttach := ctx.Query("action") == "attach"
label, err := models.GetLabelByID(ctx.QueryInt64("id"))
if err != nil {
if models.IsErrLabelNotExist(err) {
ctx.Error(404, "GetLabelByID")
} else {
ctx.Handle(500, "GetLabelByID", err)
}
return
}
if isAttach && !issue.HasLabel(label.ID) {
if err = issue.AddLabel(label); err != nil {
ctx.Handle(500, "AddLabel", err)
return
}
} else if !isAttach && issue.HasLabel(label.ID) {
if err = issue.RemoveLabel(label); err != nil {
ctx.Handle(500, "RemoveLabel", err)
return
}
}
}
ctx.JSON(200, map[string]interface{}{
"ok": true,
})
}
func UpdateIssueMilestone(ctx *middleware.Context) {
issue := getActionIssue(ctx)
if ctx.Written() {
return
}
oldMid := issue.MilestoneID
mid := ctx.QueryInt64("id")
if oldMid == mid {
ctx.JSON(200, map[string]interface{}{
"ok": true,
})
return
}
// Not check for invalid milestone id and give responsibility to owners.
issue.MilestoneID = mid
if err := models.ChangeMilestoneAssign(oldMid, issue); err != nil {
ctx.Handle(500, "ChangeMilestoneAssign", err)
return
}
ctx.JSON(200, map[string]interface{}{
"ok": true,
})
}
func UpdateIssueAssignee(ctx *middleware.Context) {
issue := getActionIssue(ctx)
if ctx.Written() {
return
}
aid := ctx.QueryInt64("id")
if issue.AssigneeID == aid {
ctx.JSON(200, map[string]interface{}{
"ok": true,
})
return
}
// Not check for invalid assignee id and give responsibility to owners.
issue.AssigneeID = aid
if err := models.UpdateIssueUserByAssignee(issue); err != nil {
ctx.Handle(500, "UpdateIssueUserByAssignee: %v", err)
return
}
ctx.JSON(200, map[string]interface{}{
"ok": true,
})
}
func NewComment(ctx *middleware.Context, form auth.CreateCommentForm) {
issue, err := models.GetIssueByIndex(ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
if err != nil {
if models.IsErrIssueNotExist(err) {
ctx.Handle(404, "GetIssueByIndex", err)
} else {
ctx.Handle(500, "GetIssueByIndex", err)
}
return
}
if issue.IsPull {
if err = issue.GetPullRequest(); err != nil {
ctx.Handle(500, "GetPullRequest", err)
return
}
}
var attachments []string
if setting.AttachmentEnabled {
attachments = form.Attachments
}
if ctx.HasError() {
ctx.Flash.Error(ctx.Data["ErrorMsg"].(string))
ctx.Redirect(fmt.Sprintf("%s/issues/%d", ctx.Repo.RepoLink, issue.Index))
return
}
var comment *models.Comment
defer func() {
// Check if issue admin/poster changes the status of issue.
if (ctx.Repo.IsAdmin() || (ctx.IsSigned && issue.IsPoster(ctx.User.Id))) &&
(form.Status == "reopen" || form.Status == "close") &&
!(issue.IsPull && issue.HasMerged) {
// Duplication and conflict check should apply to reopen pull request.
var pr *models.PullRequest
if form.Status == "reopen" && issue.IsPull {
pull := issue.PullRequest
pr, err = models.GetUnmergedPullRequest(pull.HeadRepoID, pull.BaseRepoID, pull.HeadBranch, pull.BaseBranch)
if err != nil {
if !models.IsErrPullRequestNotExist(err) {
ctx.Handle(500, "GetUnmergedPullRequest", err)
return
}
}
// Regenerate patch and test conflict.
if pr == nil {
if err = issue.UpdatePatch(); err != nil {
ctx.Handle(500, "UpdatePatch", err)
return
}
issue.AddToTaskQueue()
}
}
if pr != nil {
ctx.Flash.Info(ctx.Tr("repo.pulls.open_unmerged_pull_exists", pr.Index))
} else {
issue.Repo = ctx.Repo.Repository
if err = issue.ChangeStatus(ctx.User, form.Status == "close"); err != nil {
log.Error(4, "ChangeStatus: %v", err)
} else {
log.Trace("Issue[%d] status changed to closed: %v", issue.ID, issue.IsClosed)
}
}
}
// Redirect to comment hashtag if there is any actual content.
typeName := "issues"
if issue.IsPull {
typeName = "pulls"
}
if comment != nil {
ctx.Redirect(fmt.Sprintf("%s/%s/%d#%s", ctx.Repo.RepoLink, typeName, issue.Index, comment.HashTag()))
} else {
ctx.Redirect(fmt.Sprintf("%s/%s/%d", ctx.Repo.RepoLink, typeName, issue.Index))
}
}()
// Fix #321: Allow empty comments, as long as we have attachments.
if len(form.Content) == 0 && len(attachments) == 0 {
return
}
comment, err = models.CreateIssueComment(ctx.User, ctx.Repo.Repository, issue, form.Content, attachments)
if err != nil {
ctx.Handle(500, "CreateIssueComment", err)
return
}
notifyWatchersAndMentions(ctx, &models.Issue{
ID: issue.ID,
Index: issue.Index,
Name: issue.Name,
Content: form.Content,
})
if ctx.Written() {
return
}
log.Trace("Comment created: %d/%d/%d", ctx.Repo.Repository.ID, issue.ID, comment.ID)
}
func UpdateCommentContent(ctx *middleware.Context) {
comment, err := models.GetCommentByID(ctx.ParamsInt64(":id"))
if err != nil {
if models.IsErrCommentNotExist(err) {
ctx.Error(404, "GetCommentByID")
} else {
ctx.Handle(500, "GetCommentByID", err)
}
return
}
if !ctx.IsSigned || (ctx.User.Id != comment.PosterID && !ctx.Repo.IsAdmin()) {
ctx.Error(403)
return
} else if comment.Type != models.COMMENT_TYPE_COMMENT {
ctx.Error(204)
return
}
comment.Content = ctx.Query("content")
if len(comment.Content) == 0 {
ctx.JSON(200, map[string]interface{}{
"content": "",
})
return
}
if err := models.UpdateComment(comment); err != nil {
ctx.Handle(500, "UpdateComment", err)
return
}
ctx.JSON(200, map[string]interface{}{
"content": string(base.RenderMarkdown([]byte(comment.Content), ctx.Query("context"), ctx.Repo.Repository.ComposeMetas())),
})
}
func Labels(ctx *middleware.Context) {
ctx.Data["Title"] = ctx.Tr("repo.labels")
ctx.Data["PageIsIssueList"] = true
ctx.Data["PageIsLabels"] = true
ctx.Data["RequireMinicolors"] = true
ctx.HTML(200, LABELS)
}
func NewLabel(ctx *middleware.Context, form auth.CreateLabelForm) {
ctx.Data["Title"] = ctx.Tr("repo.labels")
ctx.Data["PageIsLabels"] = true
if ctx.HasError() {
ctx.Flash.Error(ctx.Data["ErrorMsg"].(string))
ctx.Redirect(ctx.Repo.RepoLink + "/labels")
return
}
l := &models.Label{
RepoID: ctx.Repo.Repository.ID,
Name: form.Title,
Color: form.Color,
}
if err := models.NewLabel(l); err != nil {
ctx.Handle(500, "NewLabel", err)
return
}
ctx.Redirect(ctx.Repo.RepoLink + "/labels")
}
func UpdateLabel(ctx *middleware.Context, form auth.CreateLabelForm) {
l, err := models.GetLabelByID(form.ID)
if err != nil {
switch {
case models.IsErrLabelNotExist(err):
ctx.Error(404)
default:
ctx.Handle(500, "UpdateLabel", err)
}
return
}
fmt.Println(form.Title, form.Color)
l.Name = form.Title
l.Color = form.Color
if err := models.UpdateLabel(l); err != nil {
ctx.Handle(500, "UpdateLabel", err)
return
}
ctx.Redirect(ctx.Repo.RepoLink + "/labels")
}
func DeleteLabel(ctx *middleware.Context) {
if err := models.DeleteLabel(ctx.Repo.Repository.ID, ctx.QueryInt64("id")); err != nil {
ctx.Flash.Error("DeleteLabel: " + err.Error())
} else {
ctx.Flash.Success(ctx.Tr("repo.issues.label_deletion_success"))
}
ctx.JSON(200, map[string]interface{}{
"redirect": ctx.Repo.RepoLink + "/labels",
})
return
}
func Milestones(ctx *middleware.Context) {
ctx.Data["Title"] = ctx.Tr("repo.milestones")
ctx.Data["PageIsIssueList"] = true
ctx.Data["PageIsMilestones"] = true
isShowClosed := ctx.Query("state") == "closed"
openCount, closedCount := models.MilestoneStats(ctx.Repo.Repository.ID)
ctx.Data["OpenCount"] = openCount
ctx.Data["ClosedCount"] = closedCount
page := ctx.QueryInt("page")
if page <= 1 {
page = 1
}
var total int
if !isShowClosed {
total = int(openCount)
} else {
total = int(closedCount)
}
ctx.Data["Page"] = paginater.New(total, setting.IssuePagingNum, page, 5)
miles, err := models.GetMilestones(ctx.Repo.Repository.ID, page, isShowClosed)
if err != nil {
ctx.Handle(500, "GetMilestones", err)
return
}
for _, m := range miles {
m.RenderedContent = string(base.RenderMarkdown([]byte(m.Content), ctx.Repo.RepoLink, ctx.Repo.Repository.ComposeMetas()))
m.CalOpenIssues()
}
ctx.Data["Milestones"] = miles
if isShowClosed {
ctx.Data["State"] = "closed"
} else {
ctx.Data["State"] = "open"
}
ctx.Data["IsShowClosed"] = isShowClosed
ctx.HTML(200, MILESTONE)
}
func NewMilestone(ctx *middleware.Context) {
ctx.Data["Title"] = ctx.Tr("repo.milestones.new")
ctx.Data["PageIsIssueList"] = true
ctx.Data["PageIsMilestones"] = true
ctx.Data["RequireDatetimepicker"] = true
ctx.Data["DateLang"] = setting.DateLang(ctx.Locale.Language())
ctx.HTML(200, MILESTONE_NEW)
}
func NewMilestonePost(ctx *middleware.Context, form auth.CreateMilestoneForm) {
ctx.Data["Title"] = ctx.Tr("repo.milestones.new")
ctx.Data["PageIsIssueList"] = true
ctx.Data["PageIsMilestones"] = true
ctx.Data["RequireDatetimepicker"] = true
ctx.Data["DateLang"] = setting.DateLang(ctx.Locale.Language())
if ctx.HasError() {
ctx.HTML(200, MILESTONE_NEW)
return
}
if len(form.Deadline) == 0 {
form.Deadline = "9999-12-31"
}
deadline, err := time.ParseInLocation("2006-01-02", form.Deadline, time.Local)
if err != nil {
ctx.Data["Err_Deadline"] = true
ctx.RenderWithErr(ctx.Tr("repo.milestones.invalid_due_date_format"), MILESTONE_NEW, &form)
return
}
if err = models.NewMilestone(&models.Milestone{
RepoID: ctx.Repo.Repository.ID,
Name: form.Title,
Content: form.Content,
Deadline: deadline,
}); err != nil {
ctx.Handle(500, "NewMilestone", err)
return
}
ctx.Flash.Success(ctx.Tr("repo.milestones.create_success", form.Title))
ctx.Redirect(ctx.Repo.RepoLink + "/milestones")
}
func EditMilestone(ctx *middleware.Context) {
ctx.Data["Title"] = ctx.Tr("repo.milestones.edit")
ctx.Data["PageIsMilestones"] = true
ctx.Data["PageIsEditMilestone"] = true
ctx.Data["RequireDatetimepicker"] = true
ctx.Data["DateLang"] = setting.DateLang(ctx.Locale.Language())
m, err := models.GetMilestoneByID(ctx.ParamsInt64(":id"))
if err != nil {
if models.IsErrMilestoneNotExist(err) {
ctx.Handle(404, "GetMilestoneByID", nil)
} else {
ctx.Handle(500, "GetMilestoneByID", err)
}
return
}
ctx.Data["title"] = m.Name
ctx.Data["content"] = m.Content
if len(m.DeadlineString) > 0 {
ctx.Data["deadline"] = m.DeadlineString
}
ctx.HTML(200, MILESTONE_NEW)
}
func EditMilestonePost(ctx *middleware.Context, form auth.CreateMilestoneForm) {
ctx.Data["Title"] = ctx.Tr("repo.milestones.edit")
ctx.Data["PageIsMilestones"] = true
ctx.Data["PageIsEditMilestone"] = true
ctx.Data["RequireDatetimepicker"] = true
ctx.Data["DateLang"] = setting.DateLang(ctx.Locale.Language())
if ctx.HasError() {
ctx.HTML(200, MILESTONE_NEW)
return
}
if len(form.Deadline) == 0 {
form.Deadline = "9999-12-31"
}
deadline, err := time.ParseInLocation("2006-01-02", form.Deadline, time.Local)
if err != nil {
ctx.Data["Err_Deadline"] = true
ctx.RenderWithErr(ctx.Tr("repo.milestones.invalid_due_date_format"), MILESTONE_NEW, &form)
return
}
m, err := models.GetMilestoneByID(ctx.ParamsInt64(":id"))
if err != nil {
if models.IsErrMilestoneNotExist(err) {
ctx.Handle(404, "GetMilestoneByID", nil)
} else {
ctx.Handle(500, "GetMilestoneByID", err)
}
return
}
m.Name = form.Title
m.Content = form.Content
m.Deadline = deadline
if err = models.UpdateMilestone(m); err != nil {
ctx.Handle(500, "UpdateMilestone", err)
return
}
ctx.Flash.Success(ctx.Tr("repo.milestones.edit_success", m.Name))
ctx.Redirect(ctx.Repo.RepoLink + "/milestones")
}
func ChangeMilestonStatus(ctx *middleware.Context) {
m, err := models.GetMilestoneByID(ctx.ParamsInt64(":id"))
if err != nil {
if models.IsErrMilestoneNotExist(err) {
ctx.Handle(404, "GetMilestoneByID", err)
} else {
ctx.Handle(500, "GetMilestoneByID", err)
}
return
}
switch ctx.Params(":action") {
case "open":
if m.IsClosed {
if err = models.ChangeMilestoneStatus(m, false); err != nil {
ctx.Handle(500, "ChangeMilestoneStatus", err)
return
}
}
ctx.Redirect(ctx.Repo.RepoLink + "/milestones?state=open")
case "close":
if !m.IsClosed {
m.ClosedDate = time.Now()
if err = models.ChangeMilestoneStatus(m, true); err != nil {
ctx.Handle(500, "ChangeMilestoneStatus", err)
return
}
}
ctx.Redirect(ctx.Repo.RepoLink + "/milestones?state=closed")
default:
ctx.Redirect(ctx.Repo.RepoLink + "/milestones")
}
}
func DeleteMilestone(ctx *middleware.Context) {
if err := models.DeleteMilestoneByID(ctx.QueryInt64("id")); err != nil {
ctx.Flash.Error("DeleteMilestoneByID: " + err.Error())
} else {
ctx.Flash.Success(ctx.Tr("repo.milestones.deletion_success"))
}
ctx.JSON(200, map[string]interface{}{
"redirect": ctx.Repo.RepoLink + "/milestones",
})
}
| 1 | 10,396 | Based on the assumption that when `EnablePulls` is `true`, `CanEnablePulls` must be `true` as well, then this `if` check is redundant. Actually... we have `AllowsPulls` now... why not use that? | gogs-gogs | go |
@@ -637,7 +637,7 @@ public class ScheduleServlet extends LoginAbstractAzkabanServlet {
if (flow == null) {
ret.put("status", "error");
ret.put("message", "Flow " + flowName + " cannot be found in project "
- + project);
+ + projectName);
return;
}
| 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.webapp.servlet;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.io.IOUtils;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.LocalDateTime;
import org.joda.time.Minutes;
import org.joda.time.ReadablePeriod;
import org.joda.time.format.DateTimeFormat;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutionOptions;
import azkaban.executor.ExecutorManagerAdapter;
import azkaban.executor.ExecutorManagerException;
import azkaban.flow.Flow;
import azkaban.flow.Node;
import azkaban.project.Project;
import azkaban.project.ProjectLogEvent.EventType;
import azkaban.project.ProjectManager;
import azkaban.scheduler.Schedule;
import azkaban.scheduler.ScheduleManager;
import azkaban.scheduler.ScheduleManagerException;
import azkaban.server.session.Session;
import azkaban.server.HttpRequestUtils;
import azkaban.sla.SlaOption;
import azkaban.user.Permission;
import azkaban.user.Permission.Type;
import azkaban.user.User;
import azkaban.user.UserManager;
import azkaban.utils.JSONUtils;
import azkaban.utils.SplitterOutputStream;
import azkaban.utils.Utils;
import azkaban.webapp.AzkabanWebServer;
import azkaban.webapp.SchedulerStatistics;
public class ScheduleServlet extends LoginAbstractAzkabanServlet {
private static final long serialVersionUID = 1L;
private static final Logger logger = Logger.getLogger(ScheduleServlet.class);
private ProjectManager projectManager;
private ScheduleManager scheduleManager;
private UserManager userManager;
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
AzkabanWebServer server = (AzkabanWebServer) getApplication();
userManager = server.getUserManager();
projectManager = server.getProjectManager();
scheduleManager = server.getScheduleManager();
}
@Override
protected void handleGet(HttpServletRequest req, HttpServletResponse resp,
Session session) throws ServletException, IOException {
if (hasParam(req, "ajax")) {
handleAJAXAction(req, resp, session);
} else if (hasParam(req, "calendar")) {
handleGetScheduleCalendar(req, resp, session);
} else {
handleGetAllSchedules(req, resp, session);
}
}
private void handleAJAXAction(HttpServletRequest req,
HttpServletResponse resp, Session session) throws ServletException,
IOException {
HashMap<String, Object> ret = new HashMap<String, Object>();
String ajaxName = getParam(req, "ajax");
if (ajaxName.equals("slaInfo")) {
ajaxSlaInfo(req, ret, session.getUser());
} else if (ajaxName.equals("setSla")) {
ajaxSetSla(req, ret, session.getUser());
} else if (ajaxName.equals("loadFlow")) {
ajaxLoadFlows(req, ret, session.getUser());
} else if (ajaxName.equals("loadHistory")) {
ajaxLoadHistory(req, resp, session.getUser());
ret = null;
} else if (ajaxName.equals("scheduleFlow")) {
ajaxScheduleFlow(req, ret, session.getUser());
} else if (ajaxName.equals("scheduleCronFlow")) {
ajaxScheduleCronFlow(req, ret, session.getUser());
} else if (ajaxName.equals("fetchSchedule")) {
ajaxFetchSchedule(req, ret, session.getUser());
}
if (ret != null) {
this.writeJSON(resp, ret);
}
}
private void ajaxSetSla(HttpServletRequest req, HashMap<String, Object> ret,
User user) {
try {
int scheduleId = getIntParam(req, "scheduleId");
Schedule sched = scheduleManager.getSchedule(scheduleId);
Project project = projectManager.getProject(sched.getProjectId());
if (!hasPermission(project, user, Permission.Type.SCHEDULE)) {
ret.put("error", "User " + user
+ " does not have permission to set SLA for this flow.");
return;
}
String emailStr = getParam(req, "slaEmails");
String[] emailSplit = emailStr.split("\\s*,\\s*|\\s*;\\s*|\\s+");
List<String> slaEmails = Arrays.asList(emailSplit);
Map<String, String> settings = getParamGroup(req, "settings");
List<SlaOption> slaOptions = new ArrayList<SlaOption>();
for (String set : settings.keySet()) {
SlaOption sla;
try {
sla = parseSlaSetting(settings.get(set));
} catch (Exception e) {
throw new ServletException(e);
}
if (sla != null) {
sla.getInfo().put(SlaOption.INFO_FLOW_NAME, sched.getFlowName());
sla.getInfo().put(SlaOption.INFO_EMAIL_LIST, slaEmails);
slaOptions.add(sla);
}
}
sched.setSlaOptions(slaOptions);
scheduleManager.insertSchedule(sched);
if (slaOptions != null) {
projectManager.postProjectEvent(project, EventType.SLA,
user.getUserId(), "SLA for flow " + sched.getFlowName()
+ " has been added/changed.");
}
} catch (ServletException e) {
ret.put("error", e.getMessage());
} catch (ScheduleManagerException e) {
ret.put("error", e.getMessage());
}
}
private SlaOption parseSlaSetting(String set) throws ScheduleManagerException {
logger.info("Tryint to set sla with the following set: " + set);
String slaType;
List<String> slaActions = new ArrayList<String>();
Map<String, Object> slaInfo = new HashMap<String, Object>();
String[] parts = set.split(",", -1);
String id = parts[0];
String rule = parts[1];
String duration = parts[2];
String emailAction = parts[3];
String killAction = parts[4];
if (emailAction.equals("true") || killAction.equals("true")) {
if (emailAction.equals("true")) {
slaActions.add(SlaOption.ACTION_ALERT);
slaInfo.put(SlaOption.ALERT_TYPE, "email");
}
if (killAction.equals("true")) {
slaActions.add(SlaOption.ACTION_CANCEL_FLOW);
}
if (id.equals("")) {
if (rule.equals("SUCCESS")) {
slaType = SlaOption.TYPE_FLOW_SUCCEED;
} else {
slaType = SlaOption.TYPE_FLOW_FINISH;
}
} else {
slaInfo.put(SlaOption.INFO_JOB_NAME, id);
if (rule.equals("SUCCESS")) {
slaType = SlaOption.TYPE_JOB_SUCCEED;
} else {
slaType = SlaOption.TYPE_JOB_FINISH;
}
}
ReadablePeriod dur;
try {
dur = parseDuration(duration);
} catch (Exception e) {
throw new ScheduleManagerException(
"Unable to parse duration for a SLA that needs to take actions!", e);
}
slaInfo.put(SlaOption.INFO_DURATION, Utils.createPeriodString(dur));
SlaOption r = new SlaOption(slaType, slaActions, slaInfo);
logger.info("Parsing sla as id:" + id + " type:" + slaType + " rule:"
+ rule + " Duration:" + duration + " actions:" + slaActions);
return r;
}
return null;
}
private ReadablePeriod parseDuration(String duration) {
int hour = Integer.parseInt(duration.split(":")[0]);
int min = Integer.parseInt(duration.split(":")[1]);
return Minutes.minutes(min + hour * 60).toPeriod();
}
private void ajaxFetchSchedule(HttpServletRequest req,
HashMap<String, Object> ret, User user) throws ServletException {
int projectId = getIntParam(req, "projectId");
String flowId = getParam(req, "flowId");
try {
Schedule schedule = scheduleManager.getSchedule(projectId, flowId);
if (schedule != null) {
Map<String, Object> jsonObj = new HashMap<String, Object>();
jsonObj.put("scheduleId", Integer.toString(schedule.getScheduleId()));
jsonObj.put("submitUser", schedule.getSubmitUser());
jsonObj.put("firstSchedTime",
utils.formatDateTime(schedule.getFirstSchedTime()));
jsonObj.put("nextExecTime",
utils.formatDateTime(schedule.getNextExecTime()));
jsonObj.put("period", utils.formatPeriod(schedule.getPeriod()));
jsonObj.put("cronExpression", schedule.getCronExpression());
jsonObj.put("executionOptions", schedule.getExecutionOptions());
ret.put("schedule", jsonObj);
}
} catch (ScheduleManagerException e) {
ret.put("error", e);
}
}
private void ajaxSlaInfo(HttpServletRequest req, HashMap<String, Object> ret,
User user) {
int scheduleId;
try {
scheduleId = getIntParam(req, "scheduleId");
Schedule sched = scheduleManager.getSchedule(scheduleId);
Project project =
getProjectAjaxByPermission(ret, sched.getProjectId(), user, Type.READ);
if (project == null) {
ret.put("error",
"Error loading project. Project " + sched.getProjectId()
+ " doesn't exist");
return;
}
Flow flow = project.getFlow(sched.getFlowName());
if (flow == null) {
ret.put("error", "Error loading flow. Flow " + sched.getFlowName()
+ " doesn't exist in " + sched.getProjectId());
return;
}
List<SlaOption> slaOptions = sched.getSlaOptions();
ExecutionOptions flowOptions = sched.getExecutionOptions();
if (slaOptions != null && slaOptions.size() > 0) {
ret.put("slaEmails",
slaOptions.get(0).getInfo().get(SlaOption.INFO_EMAIL_LIST));
List<Object> setObj = new ArrayList<Object>();
for (SlaOption sla : slaOptions) {
setObj.add(sla.toWebObject());
}
ret.put("settings", setObj);
} else if (flowOptions != null) {
if (flowOptions.getFailureEmails() != null) {
List<String> emails = flowOptions.getFailureEmails();
if (emails.size() > 0) {
ret.put("slaEmails", emails);
}
}
} else {
if (flow.getFailureEmails() != null) {
List<String> emails = flow.getFailureEmails();
if (emails.size() > 0) {
ret.put("slaEmails", emails);
}
}
}
List<String> allJobs = new ArrayList<String>();
for (Node n : flow.getNodes()) {
allJobs.add(n.getId());
}
ret.put("allJobNames", allJobs);
} catch (ServletException e) {
ret.put("error", e);
} catch (ScheduleManagerException e) {
ret.put("error", e);
}
}
protected Project getProjectAjaxByPermission(Map<String, Object> ret,
int projectId, User user, Permission.Type type) {
Project project = projectManager.getProject(projectId);
if (project == null) {
ret.put("error", "Project '" + project + "' not found.");
} else if (!hasPermission(project, user, type)) {
ret.put("error",
"User '" + user.getUserId() + "' doesn't have " + type.name()
+ " permissions on " + project.getName());
} else {
return project;
}
return null;
}
private void handleGetAllSchedules(HttpServletRequest req,
HttpServletResponse resp, Session session) throws ServletException,
IOException {
Page page =
newPage(req, resp, session,
"azkaban/webapp/servlet/velocity/scheduledflowpage.vm");
List<Schedule> schedules;
try {
schedules = scheduleManager.getSchedules();
} catch (ScheduleManagerException e) {
throw new ServletException(e);
}
page.add("schedules", schedules);
page.render();
}
private void handleGetScheduleCalendar(HttpServletRequest req,
HttpServletResponse resp, Session session) throws ServletException,
IOException {
Page page =
newPage(req, resp, session,
"azkaban/webapp/servlet/velocity/scheduledflowcalendarpage.vm");
List<Schedule> schedules;
try {
schedules = scheduleManager.getSchedules();
} catch (ScheduleManagerException e) {
throw new ServletException(e);
}
page.add("schedules", schedules);
page.render();
}
@Override
protected void handlePost(HttpServletRequest req, HttpServletResponse resp,
Session session) throws ServletException, IOException {
if (hasParam(req, "ajax")) {
handleAJAXAction(req, resp, session);
} else {
HashMap<String, Object> ret = new HashMap<String, Object>();
if (hasParam(req, "action")) {
String action = getParam(req, "action");
if (action.equals("scheduleFlow")) {
ajaxScheduleFlow(req, ret, session.getUser());
} else if (action.equals("scheduleCronFlow")) {
ajaxScheduleCronFlow(req, ret, session.getUser());
} else if (action.equals("removeSched")) {
ajaxRemoveSched(req, ret, session.getUser());
}
}
if (ret.get("status") == ("success"))
setSuccessMessageInCookie(resp, (String) ret.get("message"));
else
setErrorMessageInCookie(resp, (String) ret.get("message"));
this.writeJSON(resp, ret);
}
}
private void ajaxLoadFlows(HttpServletRequest req,
HashMap<String, Object> ret, User user) throws ServletException {
List<Schedule> schedules;
try {
schedules = scheduleManager.getSchedules();
} catch (ScheduleManagerException e) {
throw new ServletException(e);
}
// See if anything is scheduled
if (schedules.size() <= 0)
return;
List<HashMap<String, Object>> output =
new ArrayList<HashMap<String, Object>>();
ret.put("items", output);
for (Schedule schedule : schedules) {
try {
writeScheduleData(output, schedule);
} catch (ScheduleManagerException e) {
throw new ServletException(e);
}
}
}
private void writeScheduleData(List<HashMap<String, Object>> output,
Schedule schedule) throws ScheduleManagerException {
Map<String, Object> stats =
SchedulerStatistics.getStatistics(schedule.getScheduleId(),
(AzkabanWebServer) getApplication());
HashMap<String, Object> data = new HashMap<String, Object>();
data.put("scheduleid", schedule.getScheduleId());
data.put("flowname", schedule.getFlowName());
data.put("projectname", schedule.getProjectName());
data.put("time", schedule.getFirstSchedTime());
DateTime time = DateTime.now();
long period = 0;
if (schedule.getPeriod() != null) {
period = time.plus(schedule.getPeriod()).getMillis() - time.getMillis();
}
data.put("period", period);
int length = 3600 * 1000;
if (stats.get("average") != null && stats.get("average") instanceof Integer) {
length = (int) (Integer) stats.get("average");
if (length == 0) {
length = 3600 * 1000;
}
}
data.put("length", length);
data.put("history", false);
data.put("stats", stats);
output.add(data);
}
private void ajaxLoadHistory(HttpServletRequest req,
HttpServletResponse resp, User user) throws ServletException, IOException {
resp.setContentType(JSON_MIME_TYPE);
long today = DateTime.now().withTime(0, 0, 0, 0).getMillis();
long startTime = getLongParam(req, "startTime");
DateTime start = new DateTime(startTime);
// Ensure start time is 12:00 AM
startTime = start.withTime(0, 0, 0, 0).getMillis();
boolean useCache = false;
if (startTime < today) {
useCache = true;
}
long endTime = startTime + 24 * 3600 * 1000;
int loadAll = getIntParam(req, "loadAll");
// Cache file
String cacheDir =
getApplication().getServerProps().getString("cache.directory", "cache");
File cacheDirFile = new File(cacheDir, "schedule-history");
File cache = new File(cacheDirFile, startTime + ".cache");
cache.getParentFile().mkdirs();
if (useCache) {
// Determine if cache exists
boolean cacheExists = false;
synchronized (this) {
cacheExists = cache.exists() && cache.isFile();
}
if (cacheExists) {
// Send the cache instead
InputStream cacheInput =
new BufferedInputStream(new FileInputStream(cache));
try {
IOUtils.copy(cacheInput, resp.getOutputStream());
return;
} finally {
IOUtils.closeQuietly(cacheInput);
}
}
}
// Load data if not cached
List<ExecutableFlow> history = null;
try {
AzkabanWebServer server = (AzkabanWebServer) getApplication();
ExecutorManagerAdapter executorManager = server.getExecutorManager();
history =
executorManager.getExecutableFlows(null, null, null, 0, startTime,
endTime, -1, -1);
} catch (ExecutorManagerException e) {
logger.error(e);
}
HashMap<String, Object> ret = new HashMap<String, Object>();
List<HashMap<String, Object>> output =
new ArrayList<HashMap<String, Object>>();
ret.put("items", output);
for (ExecutableFlow historyItem : history) {
// Check if it is an scheduled execution
if (historyItem.getScheduleId() >= 0 || loadAll != 0) {
writeHistoryData(output, historyItem);
}
}
// Make sure we're ready to cache it, otherwise output and return
synchronized (this) {
if (!useCache || cache.exists()) {
JSONUtils.toJSON(ret, resp.getOutputStream(), false);
return;
}
}
// Create cache file
File cacheTemp = new File(cacheDirFile, startTime + ".tmp");
cacheTemp.createNewFile();
OutputStream cacheOutput =
new BufferedOutputStream(new FileOutputStream(cacheTemp));
try {
OutputStream outputStream =
new SplitterOutputStream(cacheOutput, resp.getOutputStream());
// Write to both the cache file and web output
JSONUtils.toJSON(ret, outputStream, false);
} finally {
IOUtils.closeQuietly(cacheOutput);
}
// Move cache file
synchronized (this) {
cacheTemp.renameTo(cache);
}
}
private void writeHistoryData(List<HashMap<String, Object>> output,
ExecutableFlow history) {
HashMap<String, Object> data = new HashMap<String, Object>();
data.put("scheduleid", history.getScheduleId());
Project project = projectManager.getProject(history.getProjectId());
data.put("flowname", history.getFlowId());
data.put("projectname", project.getName());
data.put("time", history.getStartTime());
data.put("period", "0");
long endTime = history.getEndTime();
if (endTime == -1) {
endTime = System.currentTimeMillis();
}
data.put("length", endTime - history.getStartTime());
data.put("history", true);
data.put("status", history.getStatus().getNumVal());
output.add(data);
}
private void ajaxRemoveSched(HttpServletRequest req, Map<String, Object> ret,
User user) throws ServletException {
int scheduleId = getIntParam(req, "scheduleId");
Schedule sched;
try {
sched = scheduleManager.getSchedule(scheduleId);
} catch (ScheduleManagerException e) {
throw new ServletException(e);
}
if (sched == null) {
ret.put("message", "Schedule with ID " + scheduleId + " does not exist");
ret.put("status", "error");
return;
}
Project project = projectManager.getProject(sched.getProjectId());
if (project == null) {
ret.put("message", "Project " + sched.getProjectId() + " does not exist");
ret.put("status", "error");
return;
}
if (!hasPermission(project, user, Type.SCHEDULE)) {
ret.put("status", "error");
ret.put("message", "Permission denied. Cannot remove schedule with id "
+ scheduleId);
return;
}
scheduleManager.removeSchedule(sched);
logger.info("User '" + user.getUserId() + " has removed schedule "
+ sched.getScheduleName());
projectManager
.postProjectEvent(project, EventType.SCHEDULE, user.getUserId(),
"Schedule " + sched.toString() + " has been removed.");
ret.put("status", "success");
ret.put("message", "flow " + sched.getFlowName()
+ " removed from Schedules.");
return;
}
private void ajaxScheduleFlow(HttpServletRequest req,
HashMap<String, Object> ret, User user) throws ServletException {
String projectName = getParam(req, "projectName");
String flowName = getParam(req, "flow");
int projectId = getIntParam(req, "projectId");
Project project = projectManager.getProject(projectId);
if (project == null) {
ret.put("message", "Project " + projectName + " does not exist");
ret.put("status", "error");
return;
}
if (!hasPermission(project, user, Type.SCHEDULE)) {
ret.put("status", "error");
ret.put("message", "Permission denied. Cannot execute " + flowName);
return;
}
Flow flow = project.getFlow(flowName);
if (flow == null) {
ret.put("status", "error");
ret.put("message", "Flow " + flowName + " cannot be found in project "
+ project);
return;
}
String scheduleTime = getParam(req, "scheduleTime");
String scheduleDate = getParam(req, "scheduleDate");
DateTime firstSchedTime;
try {
firstSchedTime = parseDateTime(scheduleDate, scheduleTime);
} catch (Exception e) {
ret.put("error", "Invalid date and/or time '" + scheduleDate + " "
+ scheduleTime);
return;
}
ReadablePeriod thePeriod = null;
try {
if (hasParam(req, "is_recurring")
&& getParam(req, "is_recurring").equals("on")) {
thePeriod = Schedule.parsePeriodString(getParam(req, "period"));
}
} catch (Exception e) {
ret.put("error", e.getMessage());
}
ExecutionOptions flowOptions = null;
try {
flowOptions = HttpRequestUtils.parseFlowOptions(req);
HttpRequestUtils.filterAdminOnlyFlowParams(userManager, flowOptions, user);
} catch (Exception e) {
ret.put("error", e.getMessage());
}
List<SlaOption> slaOptions = null;
Schedule schedule =
scheduleManager.scheduleFlow(-1, projectId, projectName, flowName,
"ready", firstSchedTime.getMillis(), firstSchedTime.getZone(),
thePeriod, DateTime.now().getMillis(), firstSchedTime.getMillis(),
firstSchedTime.getMillis(), user.getUserId(), flowOptions,
slaOptions);
logger.info("User '" + user.getUserId() + "' has scheduled " + "["
+ projectName + flowName + " (" + projectId + ")" + "].");
projectManager.postProjectEvent(project, EventType.SCHEDULE,
user.getUserId(), "Schedule " + schedule.toString()
+ " has been added.");
ret.put("status", "success");
ret.put("scheduleId", schedule.getScheduleId());
ret.put("message", projectName + "." + flowName + " scheduled.");
}
/**
*
* This method is in charge of doing cron scheduling.
* @throws ServletException
*/
private void ajaxScheduleCronFlow(HttpServletRequest req,
HashMap<String, Object> ret, User user) throws ServletException {
String projectName = getParam(req, "projectName");
String flowName = getParam(req, "flow");
Project project = projectManager.getProject(projectName);
if (project == null) {
ret.put("message", "Project " + projectName + " does not exist");
ret.put("status", "error");
return;
}
int projectId = project.getId();
if (!hasPermission(project, user, Type.SCHEDULE)) {
ret.put("status", "error");
ret.put("message", "Permission denied. Cannot execute " + flowName);
return;
}
Flow flow = project.getFlow(flowName);
if (flow == null) {
ret.put("status", "error");
ret.put("message", "Flow " + flowName + " cannot be found in project "
+ project);
return;
}
DateTimeZone timezone = DateTimeZone.getDefault();
DateTime firstSchedTime = getPresentTimeByTimezone(timezone);
String cronExpression = null;
try {
if (hasParam(req, "cronExpression")) {
// everything in Azkaban functions is at the minute granularity, so we add 0 here
// to let the expression to be complete.
cronExpression = getParam(req, "cronExpression");
if(azkaban.utils.Utils.isCronExpressionValid(cronExpression, timezone) == false) {
ret.put("error", "This expression <" + cronExpression + "> can not be parsed to quartz cron.");
return;
}
}
if(cronExpression == null)
throw new Exception("Cron expression must exist.");
} catch (Exception e) {
ret.put("error", e.getMessage());
}
ExecutionOptions flowOptions = null;
try {
flowOptions = HttpRequestUtils.parseFlowOptions(req);
HttpRequestUtils.filterAdminOnlyFlowParams(userManager, flowOptions, user);
} catch (Exception e) {
ret.put("error", e.getMessage());
}
List<SlaOption> slaOptions = null;
// Because either cronExpression or recurrence exists, we build schedule in the below way.
Schedule schedule = scheduleManager.cronScheduleFlow(-1, projectId, projectName, flowName,
"ready", firstSchedTime.getMillis(), firstSchedTime.getZone(),
DateTime.now().getMillis(), firstSchedTime.getMillis(),
firstSchedTime.getMillis(), user.getUserId(), flowOptions,
slaOptions, cronExpression);
logger.info("User '" + user.getUserId() + "' has scheduled " + "["
+ projectName + flowName + " (" + projectId + ")" + "].");
projectManager.postProjectEvent(project, EventType.SCHEDULE,
user.getUserId(), "Schedule " + schedule.toString()
+ " has been added.");
ret.put("status", "success");
ret.put("scheduleId", schedule.getScheduleId());
ret.put("message", projectName + "." + flowName + " scheduled.");
}
private DateTime parseDateTime(String scheduleDate, String scheduleTime) {
// scheduleTime: 12,00,pm,PDT
String[] parts = scheduleTime.split(",", -1);
int hour = Integer.parseInt(parts[0]);
int minutes = Integer.parseInt(parts[1]);
boolean isPm = parts[2].equalsIgnoreCase("pm");
DateTimeZone timezone =
parts[3].equals("UTC") ? DateTimeZone.UTC : DateTimeZone.getDefault();
// scheduleDate: 02/10/2013
DateTime day = null;
if (scheduleDate == null || scheduleDate.trim().length() == 0) {
day = new LocalDateTime().toDateTime();
} else {
day = DateTimeFormat.forPattern("MM/dd/yyyy")
.withZone(timezone).parseDateTime(scheduleDate);
}
hour %= 12;
if (isPm)
hour += 12;
DateTime firstSchedTime =
day.withHourOfDay(hour).withMinuteOfHour(minutes).withSecondOfMinute(0);
return firstSchedTime;
}
/**
* @param cronTimezone represents the timezone from remote API call
* @return if the string is equal to UTC, we return UTC; otherwise, we always return default timezone.
*/
private DateTimeZone parseTimeZone(String cronTimezone) {
if(cronTimezone != null && cronTimezone.equals("UTC"))
return DateTimeZone.UTC;
return DateTimeZone.getDefault();
}
private DateTime getPresentTimeByTimezone(DateTimeZone timezone) {
return new DateTime(timezone);
}
}
| 1 | 10,913 | I am not sure why the API requires both project name and project ID as inputs. Wouldn't it introduce more opportunities for clients to make mistakes? | azkaban-azkaban | java |
@@ -15,7 +15,7 @@ module Ncr
def edit
if self.proposal.approved?
- flash[:warning] = "You are about to modify a fully approved request. Changes will be logged and sent to approvers, and this request may require re-approval, depending on the change."
+ flash.now[:warning] = "You are about to modify a fully approved request. Changes will be logged and sent to approvers, and this request may require re-approval, depending on the change."
end
super | 1 | module Ncr
class WorkOrdersController < UseCaseController
# arbitrary number...number of upload fields that "ought to be enough for anybody"
MAX_UPLOADS_ON_NEW = 10
def new
work_order.approving_official_email = self.suggested_approver_email
super
end
def create
Ncr::WorkOrderValueNormalizer.new(work_order).run
super
end
def edit
if self.proposal.approved?
flash[:warning] = "You are about to modify a fully approved request. Changes will be logged and sent to approvers, and this request may require re-approval, depending on the change."
end
super
end
def update
work_order.assign_attributes(permitted_params)
Ncr::WorkOrderValueNormalizer.new(work_order).run
work_order.modifier = current_user
super
end
protected
def work_order
@model_instance
end
def record_changes
ProposalUpdateRecorder.new(work_order).run
end
def setup_and_email_approvers
updater = Ncr::WorkOrderUpdater.new(
work_order: work_order,
flash: flash
)
updater.after_update
end
def attribute_changes?
super || work_order.approver_changed?
end
def model_class
Ncr::WorkOrder
end
def suggested_approver_email
last_proposal = current_user.last_requested_proposal
last_proposal.try(:approvers).try(:first).try(:email_address) || ''
end
def permitted_params
fields = Ncr::WorkOrder.relevant_fields(
params[:ncr_work_order][:expense_type])
if work_order
fields.delete(:emergency) # emergency field cannot be edited
end
params.require(:ncr_work_order).permit(:project_title, :approving_official_email, *fields)
end
# @pre: work_order.approving_official_email is set
def add_steps
super
if self.errors.empty?
work_order.setup_approvals_and_observers
end
end
end
end
| 1 | 15,498 | I believe this was happening for several different flash messages - should we add `now` to all flash messages? (there might be a downside to doing that, but I am not sure what it would be) | 18F-C2 | rb |
@@ -46,6 +46,9 @@ class ExceptionListener extends BaseExceptionListener
parent::__construct($controller, $logger);
}
+ /**
+ * {@inheritdoc}
+ */
public function onKernelException(GetResponseForExceptionEvent $event)
{
$exception = $event->getException(); | 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace JavierEguiluz\Bundle\EasyAdminBundle\EventListener;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\BaseException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\FlattenException;
use Psr\Log\LoggerInterface;
use Symfony\Bundle\FrameworkBundle\Templating\EngineInterface;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\HttpKernel\Event\GetResponseForExceptionEvent;
use Symfony\Component\HttpKernel\EventListener\ExceptionListener as BaseExceptionListener;
use Symfony\Component\HttpKernel\HttpKernelInterface;
use Symfony\Component\HttpKernel\Kernel;
use Symfony\Component\HttpKernel\Log\DebugLoggerInterface;
/**
* This listener allows to display customized error pages in the production
* environment.
*
* @author Javier Eguiluz <[email protected]>
* @author Maxime Steinhausser <[email protected]>
*/
class ExceptionListener extends BaseExceptionListener
{
/** @var EngineInterface */
private $templating;
/** @var bool */
private $debug;
public function __construct($templating, $debug, $controller, LoggerInterface $logger = null)
{
$this->templating = $templating;
$this->debug = $debug;
parent::__construct($controller, $logger);
}
public function onKernelException(GetResponseForExceptionEvent $event)
{
$exception = $event->getException();
if (!$exception instanceof BaseException || true === $this->debug) {
return;
}
if (3 !== Kernel::RELEASE_VERSION) {
parent::onKernelException($event);
} else {
/* For BC reasons with 2.3, we need to duplicate this entirely
from Symfony\Component\HttpKernel\EventListener\ExceptionListener.
Once sf 2.3 support is dropped, we can remove this else block and condition */
$request = $event->getRequest();
$this->logException($exception, sprintf('Uncaught PHP Exception %s: "%s" at %s line %s', get_class($exception), $exception->getMessage(), $exception->getFile(), $exception->getLine()));
$request = $this->duplicateRequest($exception, $request);
try {
$response = $event->getKernel()->handle($request, HttpKernelInterface::SUB_REQUEST, false);
} catch (\Exception $e) {
$this->logException($e, sprintf('Exception thrown when handling an exception (%s: %s at %s line %s)', get_class($e), $e->getMessage(), $e->getFile(), $e->getLine()));
$wrapper = $e;
while ($prev = $wrapper->getPrevious()) {
if ($exception === $wrapper = $prev) {
throw $e;
}
}
$prev = new \ReflectionProperty('Exception', 'previous');
$prev->setAccessible(true);
$prev->setValue($wrapper, $exception);
throw $e;
}
$event->setResponse($response);
}
}
public function showExceptionPageAction(FlattenException $exception)
{
return $this->templating->renderResponse(
$exception->getTemplatePath(),
array_merge($exception->getParameters(), array('message' => $exception->getMessage())),
Response::create()->setStatusCode($exception->getStatusCode())
);
}
/**
* {@inheritdoc}
*/
protected function logException(\Exception $exception, $message, $original = true)
{
if (null !== $this->logger) {
/** @var BaseException $exception */
if ($exception->getHttpStatusCode() >= 500) {
$this->logger->critical($message, array('exception' => $exception));
} else {
$this->logger->error($message, array('exception' => $exception));
}
}
}
/**
* {@inheritdoc}
*/
protected function duplicateRequest(\Exception $exception, Request $request)
{
if (3 !== Kernel::RELEASE_VERSION) {
$request = parent::duplicateRequest($exception, $request);
} else {
/* For BC reasons with 2.3, we need to duplicate this entirely
from Symfony\Component\HttpKernel\EventListener\ExceptionListener.
Once sf 2.3 support is dropped, we can remove this else block and condition */
$attributes = array(
'_controller' => $this->controller,
'logger' => $this->logger instanceof DebugLoggerInterface ? $this->logger : null,
'format' => $request->getRequestFormat(),
);
$request = $request->duplicate(null, null, $attributes);
$request->setMethod('GET');
}
$request->attributes->set('exception', FlattenException::create($exception));
return $request;
}
}
| 1 | 9,278 | There is no docblock on the parent method. Also, not related, but I forgot to typehint the `$templating` constructor argument above. | EasyCorp-EasyAdminBundle | php |
@@ -669,6 +669,12 @@ func TestDdevXdebugEnabled(t *testing.T) {
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name()))
phpVersions := nodeps.ValidPHPVersions
+
+ // arm64 builds from deb.sury.org do not have xdebug in php5.6
+ if runtime.GOARCH == "arm64" {
+ t.Log("Skipping php5.6 test on arm64 because deb.sury.org packages do not include it currently")
+ delete(phpVersions, "5.6")
+ }
phpKeys := make([]string, 0, len(phpVersions))
for k := range phpVersions {
phpKeys = append(phpKeys, k) | 1 | package ddevapp_test
import (
"bufio"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/drud/ddev/pkg/nodeps"
"github.com/drud/ddev/pkg/version"
"github.com/drud/ddev/pkg/globalconfig"
"github.com/stretchr/testify/require"
"github.com/drud/ddev/pkg/archive"
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/exec"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/testcommon"
"github.com/drud/ddev/pkg/util"
docker "github.com/fsouza/go-dockerclient"
"github.com/google/uuid"
"github.com/lunixbochs/vtclean"
log "github.com/sirupsen/logrus"
asrt "github.com/stretchr/testify/assert"
)
var (
DdevBin = "ddev"
TestSites = []testcommon.TestSite{
{
Name: "TestPkgWordpress",
SourceURL: "https://github.com/drud/wordpress/archive/v0.4.0.tar.gz",
ArchiveInternalExtractionPath: "wordpress-0.4.0/",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.0/wordpress_files.tar.gz",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.0/wordpress_db.tar.gz",
Docroot: "htdocs",
Type: nodeps.AppTypeWordPress,
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/readme.html", Expect: "Welcome. WordPress is a very special project to me."},
DynamicURI: testcommon.URIWithExpect{URI: "/", Expect: "this post has a photo"},
FilesImageURI: "/wp-content/uploads/2017/04/pexels-photo-265186-1024x683.jpeg",
},
{
Name: "TestPkgDrupal8",
SourceURL: "https://ftp.drupal.org/files/projects/drupal-8.8.4.tar.gz",
ArchiveInternalExtractionPath: "drupal-8.8.4/",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d8_umami.files.tar.gz",
FilesZipballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d8_umami.files.zip",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d8_umami.sql.tar.gz",
DBZipURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d8_umami.sql.zip",
FullSiteTarballURL: "",
Type: nodeps.AppTypeDrupal8,
Docroot: "",
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/README.txt", Expect: "Drupal is an open source content management platform"},
DynamicURI: testcommon.URIWithExpect{URI: "/node/2", Expect: "Vegan chocolate and nut brownies"},
FilesImageURI: "/sites/default/files/vegan-chocolate-nut-brownies.jpg",
},
{
Name: "TestPkgDrupal7", // Drupal D7
SourceURL: "https://ftp.drupal.org/files/projects/drupal-7.61.tar.gz",
ArchiveInternalExtractionPath: "drupal-7.61/",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d7test-7.59.files.tar.gz",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d7test-7.59-db.tar.gz",
FullSiteTarballURL: "",
Docroot: "",
Type: nodeps.AppTypeDrupal7,
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/README.txt", Expect: "Drupal is an open source content management platform"},
DynamicURI: testcommon.URIWithExpect{URI: "/node/1", Expect: "D7 test project, kittens edition"},
FilesImageURI: "/sites/default/files/field/image/kittens-large.jpg",
FullSiteArchiveExtPath: "docroot/sites/default/files",
},
{
Name: "TestPkgDrupal6",
SourceURL: "https://ftp.drupal.org/files/projects/drupal-6.38.tar.gz",
ArchiveInternalExtractionPath: "drupal-6.38/",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/drupal6.38_db.tar.gz",
FullSiteTarballURL: "",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/drupal6_files.tar.gz",
Docroot: "",
Type: nodeps.AppTypeDrupal6,
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/CHANGELOG.txt", Expect: "Drupal 6.38, 2016-02-24"},
DynamicURI: testcommon.URIWithExpect{URI: "/node/2", Expect: "This is a story. The story is somewhat shaky"},
FilesImageURI: "/sites/default/files/garland_logo.jpg",
},
{
Name: "TestPkgBackdrop",
SourceURL: "https://github.com/backdrop/backdrop/archive/1.11.0.tar.gz",
ArchiveInternalExtractionPath: "backdrop-1.11.0/",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/backdrop_db.11.0.tar.gz",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/backdrop_files.11.0.tar.gz",
FullSiteTarballURL: "",
Docroot: "",
Type: nodeps.AppTypeBackdrop,
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/README.md", Expect: "Backdrop is a full-featured content management system"},
DynamicURI: testcommon.URIWithExpect{URI: "/posts/first-post-all-about-kittens", Expect: "Lots of kittens are a good thing"},
FilesImageURI: "/files/styles/large/public/field/image/kittens-large.jpg",
},
{
Name: "TestPkgTypo3",
SourceURL: "https://github.com/drud/typo3-v9-test/archive/v0.2.2.tar.gz",
ArchiveInternalExtractionPath: "typo3-v9-test-0.2.2/",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/typo3_v9.5_introduction_db.tar.gz",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/typo3_v9.5_introduction_files.tar.gz",
FullSiteTarballURL: "",
Docroot: "public",
Type: nodeps.AppTypeTYPO3,
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/README.txt", Expect: "junk readme simply for reading"},
DynamicURI: testcommon.URIWithExpect{URI: "/index.php?id=65", Expect: "Boxed Content"},
FilesImageURI: "/fileadmin/introduction/images/streets/nikita-maru-70928.jpg",
},
{
Name: "testpkgmagento",
SourceURL: "https://github.com/OpenMage/magento-mirror/archive/1.9.4.3.tar.gz",
ArchiveInternalExtractionPath: "magento-mirror-1.9.4.3/",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/TestPkgMagento_db_secure_url.tar.gz",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/magento_upload_files.tgz",
FullSiteTarballURL: "",
Docroot: "",
Type: nodeps.AppTypeMagento,
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/LICENSE.txt", Expect: `Open Software License ("OSL")`},
DynamicURI: testcommon.URIWithExpect{URI: "/", Expect: "This is a demo store"},
FilesImageURI: "/media/wrapping/Chrysanthemum.jpg",
},
// Note that testpkgmagento2 code is enormous and makes this really, really slow.
{
Name: "testpkgmagento2",
SourceURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/magento2_code_no_dev_with_media.tgz",
ArchiveInternalExtractionPath: "",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/magento2_db.tgz",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/magento2_files.tgz",
FullSiteTarballURL: "",
Docroot: "pub",
Type: nodeps.AppTypeMagento2,
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/junk.txt", Expect: `This is a junk`},
DynamicURI: testcommon.URIWithExpect{URI: "/index.php/junk-product.html", Expect: "junk product"},
FilesImageURI: "/media/catalog/product/r/a/randy_4th_of_july_unicycle.jpg",
},
{
Name: "TestPkgDrupal9",
SourceURL: "https://ftp.drupal.org/files/projects/drupal-9.0.0-beta1.tar.gz",
ArchiveInternalExtractionPath: "drupal-9.0.0-beta1/",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d9_umami_files.tgz",
FilesZipballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d9_umami_files.zip",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d9_umami_sql.tar.gz",
DBZipURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/d9_umami.sql.zip",
FullSiteTarballURL: "",
Type: nodeps.AppTypeDrupal9,
Docroot: "",
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/README.txt", Expect: "Drupal is an open source content management platform"},
DynamicURI: testcommon.URIWithExpect{URI: "/node/1", Expect: "Deep mediterranean quiche"},
FilesImageURI: "/sites/default/files/mediterranean-quiche-umami.jpg",
},
{
Name: "TestPkgLaravel",
SourceURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/ddev-lumen-testapp.tar.gz",
ArchiveInternalExtractionPath: "ddev-lumen-testapp/",
FilesTarballURL: "",
FilesZipballURL: "",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/ddev-lumen-testapp_sql.tar.gz",
DBZipURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/ddev-lumen-testapp_sql.zip",
FullSiteTarballURL: "",
Type: nodeps.AppTypeLaravel,
Docroot: "public",
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/", Expect: "Laravel Components"},
DynamicURI: testcommon.URIWithExpect{URI: "/api/status-code/200", Expect: "indicates that the request has succeeded."},
FilesImageURI: "/images/200.jpg",
},
{
Name: "testpkgshopware6",
SourceURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/shopware6_code.tgz",
ArchiveInternalExtractionPath: "",
FilesTarballURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/shopware6_files.tgz",
DBTarURL: "https://github.com/drud/ddev_test_tarballs/releases/download/v1.1/shopware6_db.tgz",
FullSiteTarballURL: "",
Type: nodeps.AppTypeShopware6,
Docroot: "public",
Safe200URIWithExpectation: testcommon.URIWithExpect{URI: "/maintenance.html", Expect: "Our website is currently undergoing maintenance"},
DynamicURI: testcommon.URIWithExpect{URI: "/Main-product-with-properties/SWDEMO10007.1", Expect: "Main product with properties"},
FilesImageURI: "/media/2f/b0/e2/1603218072/hemd_600x600.jpg",
},
}
FullTestSites = TestSites
)
func init() {
// Make sets DDEV_BINARY_FULLPATH when building the executable
if os.Getenv("DDEV_BINARY_FULLPATH") != "" {
DdevBin = os.Getenv("DDEV_BINARY_FULLPATH")
}
}
func TestMain(m *testing.M) {
output.LogSetUp()
// Since this may be first time ddev has been used, we need the
// ddev_default network available.
dockerutil.EnsureDdevNetwork()
// Avoid having sudo try to add to /etc/hosts.
// This is normally done by Testsite.Prepare()
_ = os.Setenv("DRUD_NONINTERACTIVE", "true")
// If GOTEST_SHORT is an integer, then use it as index for a single usage
// in the array. Any value can be used, it will default to just using the
// first site in the array.
gotestShort := os.Getenv("GOTEST_SHORT")
if gotestShort != "" {
useSite := 0
if site, err := strconv.Atoi(gotestShort); err == nil && site >= 0 && site < len(TestSites) {
useSite = site
}
TestSites = []testcommon.TestSite{TestSites[useSite]}
}
// testRun is the exit result we'll provide.
// Start with a clean exit result, it will be changed if we have trouble.
testRun := 0
err := globalconfig.ReadGlobalConfig()
if err != nil {
log.Fatalf("could not read globalconfig: %v", err)
}
token := os.Getenv("DDEV_PANTHEON_API_TOKEN")
if token != "" {
out, err := exec.RunCommand(DdevBin, []string{"auth", "pantheon", token})
if err != nil {
log.Fatalf("Unable to ddev auth pantheon: %v (%v)", err, out)
}
} else {
log.Info("No DDEV_PANTHEON_API_TOKEN env var has been set. Skipping Pantheon specific tests.")
}
token = os.Getenv("DDEV_DDEVLIVE_API_TOKEN")
if token != "" {
// ddev auth ddev-live can create a .ddev folder, which we don't need right now,
// so drop it in /tmp
out, err := exec.RunCommand("bash", []string{"-c", "cd /tmp && ddev auth ddev-live " + token})
if err != nil {
log.Fatalf("Unable to ddev auth ddev-live: %v (%v)", err, out)
}
} else {
log.Info("No DDEV_DDEVLIVE_API_TOKEN env var has been set. Skipping ddev-live specific tests.")
}
for i, site := range TestSites {
app := &ddevapp.DdevApp{Name: site.Name}
_ = app.Stop(true, false)
_ = globalconfig.RemoveProjectInfo(site.Name)
err := TestSites[i].Prepare()
if err != nil {
log.Fatalf("Prepare() failed on TestSite.Prepare() site=%s, err=%v", TestSites[i].Name, err)
}
switchDir := TestSites[i].Chdir()
testcommon.ClearDockerEnv()
app = &ddevapp.DdevApp{}
err = app.Init(TestSites[i].Dir)
if err != nil {
testRun = -1
log.Errorf("TestMain startup: app.Init() failed on site %s in dir %s, err=%v", TestSites[i].Name, TestSites[i].Dir, err)
continue
}
err = app.WriteConfig()
if err != nil {
testRun = -1
log.Errorf("TestMain startup: app.WriteConfig() failed on site %s in dir %s, err=%v", TestSites[i].Name, TestSites[i].Dir, err)
continue
}
for _, volume := range []string{app.Name + "-mariadb"} {
err = dockerutil.RemoveVolume(volume)
if err != nil {
log.Errorf("TestMain startup: Failed to delete volume %s: %v", volume, err)
}
}
switchDir()
}
if testRun == 0 {
log.Debugln("Running tests.")
testRun = m.Run()
}
for i, site := range TestSites {
testcommon.ClearDockerEnv()
app := &ddevapp.DdevApp{}
err := app.Init(site.Dir)
if err != nil {
log.Fatalf("TestMain shutdown: app.Init() failed on site %s in dir %s, err=%v", TestSites[i].Name, TestSites[i].Dir, err)
}
if app.SiteStatus() != ddevapp.SiteStopped {
err = app.Stop(true, false)
if err != nil {
log.Fatalf("TestMain shutdown: app.Stop() failed on site %s, err=%v", TestSites[i].Name, err)
}
}
site.Cleanup()
}
os.Exit(testRun)
}
// TestDdevStart tests the functionality that is called when "ddev start" is executed
func TestDdevStart(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
// Make sure this leaves us in the original test directory
testDir, _ := os.Getwd()
//nolint: errcheck
defer os.Chdir(testDir)
site := TestSites[0]
switchDir := site.Chdir()
defer switchDir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s DdevStart", site.Name))
err := app.Init(site.Dir)
assert.NoError(err)
err = app.Start()
assert.NoError(err)
// Make sure the -built docker image exists before stop
webBuilt := version.GetWebImage() + "-" + site.Name + "-built"
dbBuilt := version.GetWebImage() + "-" + site.Name + "-built"
exists, err := dockerutil.ImageExistsLocally(webBuilt)
assert.NoError(err)
assert.True(exists)
//nolint: errcheck
defer app.Stop(true, false)
// ensure .ddev/.ddev-docker-compose* exists inside .ddev site folder
composeFile := fileutil.FileExists(app.DockerComposeYAMLPath())
assert.True(composeFile)
for _, containerType := range [3]string{"web", "db", "dba"} {
containerName, err := constructContainerName(containerType, app)
assert.NoError(err)
check, err := testcommon.ContainerCheck(containerName, "running")
assert.NoError(err)
assert.True(check, "Container check on %s failed", containerType)
}
if util.IsCommandAvailable("mysql") {
dbPort, err := app.GetPublishedPort("db")
assert.NoError(err)
dockerIP, _ := dockerutil.GetDockerIP()
out, err := exec.RunCommand("mysql", []string{"--user=db", "--password=db", "--port=" + strconv.Itoa(dbPort), "--database=db", "--host=" + dockerIP, "-e", "SELECT 1;"})
assert.NoError(err)
assert.Contains(out, "1")
} else {
fmt.Print("TestDddevStart skipping check for local mysql connection because mysql command not in path")
}
err = app.Stop(true, false)
assert.NoError(err)
// Make sure the -built docker images do not exist after stop with removeData
for _, imageName := range []string{webBuilt, dbBuilt} {
exists, err = dockerutil.ImageExistsLocally(imageName)
assert.NoError(err)
assert.False(exists, "image %s should not have existed but still exists (while testing %s)", app.Name)
}
runTime()
switchDir()
// Start up TestSites[0] again with a post-start hook
// When run the first time, it should execute the hook, second time it should not
err = os.Chdir(site.Dir)
assert.NoError(err)
err = app.Init(site.Dir)
app.Hooks = map[string][]ddevapp.YAMLTask{"post-start": {{"exec": "echo hello"}}}
assert.NoError(err)
stdoutFunc, err := util.CaptureOutputToFile()
assert.NoError(err)
promptOutFunc := util.CaptureUserOut()
err = app.Start()
assert.NoError(err)
//nolint: errcheck
defer app.Stop(true, false)
out := stdoutFunc()
UOut := promptOutFunc()
assert.Contains(UOut, "Running task: Exec command 'echo hello' in container/service 'web'")
assert.Contains(out, "hello\n")
// try to start a site of same name at different path
another := site
tmpDir := testcommon.CreateTmpDir("another")
copyDir := filepath.Join(tmpDir, "copy")
err = fileutil.CopyDir(site.Dir, copyDir)
assert.NoError(err)
another.Dir = copyDir
//nolint: errcheck
defer os.RemoveAll(copyDir)
badapp := &ddevapp.DdevApp{}
err = badapp.Init(copyDir)
//nolint: errcheck
defer badapp.Stop(true, false)
if err == nil {
logs, logErr := app.CaptureLogs("web", false, "")
require.Error(t, err, "did not receive err from badapp.Init, logErr=%v, logs:\n======================= logs from app webserver =================\n%s\n============ end logs =========\n", logErr, logs)
}
if err != nil {
assert.Contains(err.Error(), fmt.Sprintf("a project (web container) in running state already exists for %s that was created at %s", TestSites[0].Name, TestSites[0].Dir))
}
// Try to start a site of same name at an equivalent but different path. It should work.
tmpDir, err = testcommon.OsTempDir()
assert.NoError(err)
symlink := filepath.Join(tmpDir, fileutil.RandomFilenameBase())
err = os.Symlink(app.AppRoot, symlink)
assert.NoError(err)
//nolint: errcheck
defer os.Remove(symlink)
symlinkApp := &ddevapp.DdevApp{}
err = symlinkApp.Init(symlink)
assert.NoError(err)
//nolint: errcheck
defer symlinkApp.Stop(true, false)
// Make sure that GetActiveApp() also fails when trying to start app of duplicate name in current directory.
switchDir = another.Chdir()
defer switchDir()
_, err = ddevapp.GetActiveApp("")
assert.Error(err)
if err != nil {
assert.Contains(err.Error(), fmt.Sprintf("a project (web container) in running state already exists for %s that was created at %s", TestSites[0].Name, TestSites[0].Dir))
}
testcommon.CleanupDir(another.Dir)
}
// TestDdevStartMultipleHostnames tests start with multiple hostnames
func TestDdevStartMultipleHostnames(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
for _, site := range TestSites {
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s DdevStartMultipleHostnames", site.Name))
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
// site.Name is explicitly added because if not removed in GetHostNames() it will cause ddev-router failure
// "a" is repeated for the same reason; a user error of this type should not cause a failure; GetHostNames()
// should uniqueify them.
app.AdditionalHostnames = []string{"sub1." + site.Name, "sub2." + site.Name, "subname.sub3." + site.Name, site.Name, site.Name, site.Name}
// sub1.<sitename>.ddev.site and sitename.ddev.site are deliberately included to prove they don't
// cause ddev-router failures"
// Note that these AdditionalFQDNs require sudo privileges, which the test runners
// don't typically have.
app.AdditionalFQDNs = []string{"one.example.com", "two.example.com", "a.one.example.com", site.Name + "." + app.ProjectTLD, "sub1." + site.Name + "." + app.ProjectTLD}
err = app.WriteConfig()
assert.NoError(err)
err = app.StartAndWait(5)
assert.NoError(err)
if err != nil && strings.Contains(err.Error(), "db container failed") {
container, err := app.FindContainerByType("db")
assert.NoError(err)
out, err := exec.RunCommand("docker", []string{"logs", container.Names[0]})
assert.NoError(err)
t.Logf("DB Logs after app.Start: \n%s\n=== END DB LOGS ===", out)
}
// ensure .ddev/docker-compose*.yaml exists inside .ddev site folder
composeFile := fileutil.FileExists(app.DockerComposeYAMLPath())
assert.True(composeFile)
for _, containerType := range [3]string{"web", "db", "dba"} {
containerName, err := constructContainerName(containerType, app)
assert.NoError(err)
check, err := testcommon.ContainerCheck(containerName, "running")
assert.NoError(err)
assert.True(check, "Container check on %s failed", containerType)
}
_, _, urls := app.GetAllURLs()
t.Logf("Testing these URLs: %v", urls)
_, _, allURLs := app.GetAllURLs()
for _, url := range allURLs {
_, _ = testcommon.EnsureLocalHTTPContent(t, url+site.Safe200URIWithExpectation.URI, site.Safe200URIWithExpectation.Expect)
}
out, err := exec.RunCommand(DdevBin, []string{"list"})
assert.NoError(err)
t.Logf("=========== output of ddev list ==========\n%s\n============", out)
// Multiple projects can't run at the same time with the fqdns, so we need to clean
// up these for tests that run later.
app.AdditionalFQDNs = []string{}
app.AdditionalHostnames = []string{}
err = app.WriteConfig()
assert.NoError(err)
err = app.Stop(true, false)
assert.NoError(err)
runTime()
}
}
// TestDdevStartUnmanagedSettings start and config with disable_settings_management
func TestDdevStartUnmanagedSettings(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
// Make sure this leaves us in the original test directory
testDir, _ := os.Getwd()
//nolint: errcheck
defer os.Chdir(testDir)
// Use Drupal8 only, mostly for the composer example
site := FullTestSites[1]
// If running this with GOTEST_SHORT we have to create the directory, tarball etc.
if site.Dir == "" || !fileutil.FileExists(site.Dir) {
app := &ddevapp.DdevApp{Name: site.Name}
_ = app.Stop(true, false)
_ = globalconfig.RemoveProjectInfo(site.Name)
err := site.Prepare()
require.NoError(t, err)
// nolint: errcheck
defer os.RemoveAll(site.Dir)
}
switchDir := site.Chdir()
defer switchDir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s DdevStart", site.Name))
defer runTime()
err := app.Init(site.Dir)
assert.NoError(err)
// Previous tests may have left settings files
_ = os.Remove(app.SiteSettingsPath)
_ = os.Remove(app.SiteDdevSettingsFile)
// On initial init, settings files should not exist
assert.False(fileutil.FileExists(app.SiteSettingsPath))
assert.False(fileutil.FileExists(app.SiteDdevSettingsFile))
app.DisableSettingsManagement = true
err = app.WriteConfig()
assert.NoError(err)
// After config, they should still not exist, because we had DisableSettingsManagement
assert.False(fileutil.FileExists(app.SiteSettingsPath))
assert.False(fileutil.FileExists(app.SiteDdevSettingsFile))
err = app.Start()
assert.NoError(err)
//nolint: errcheck
defer app.Stop(true, false)
// After start, they should still not exist, because we had DisableSettingsManagement
assert.False(fileutil.FileExists(app.SiteSettingsPath))
assert.False(fileutil.FileExists(app.SiteDdevSettingsFile))
app.DisableSettingsManagement = false
err = app.WriteConfig()
assert.NoError(err)
_, err = app.CreateSettingsFile()
assert.NoError(err)
// Now with DisableSettingsManagement=false, both should exist after config/settings creation
assert.FileExists(app.SiteSettingsPath)
assert.FileExists(app.SiteDdevSettingsFile)
_ = os.Remove(filepath.Join(app.SiteSettingsPath))
_ = os.Remove(filepath.Join(app.SiteDdevSettingsFile))
assert.False(fileutil.FileExists(app.SiteSettingsPath))
assert.False(fileutil.FileExists(app.SiteDdevSettingsFile))
err = app.Start()
assert.NoError(err)
//nolint: errcheck
defer app.Stop(true, false)
// Now with DisableSettingsManagement=false, start should have created both
assert.FileExists(app.SiteSettingsPath)
assert.FileExists(app.SiteDdevSettingsFile)
}
// TestDdevNoProjectMount tests running without the app file mount.
func TestDdevNoProjectMount(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
// Make sure this leaves us in the original test directory
testDir, _ := os.Getwd()
//nolint: errcheck
defer os.Chdir(testDir)
site := TestSites[0]
switchDir := site.Chdir()
defer switchDir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s %s", t.Name(), site.Name))
defer runTime()
err := app.Init(site.Dir)
assert.NoError(err)
app.NoProjectMount = true
err = app.WriteConfig()
assert.NoError(err)
defer func() {
err = app.Stop(true, false)
assert.NoError(err)
app.NoProjectMount = false
err = app.WriteConfig()
assert.NoError(err)
}()
err = app.Start()
assert.NoError(err)
stdout, _, err := app.Exec(&ddevapp.ExecOpts{
Service: "web",
Dir: "/var/www/html",
Cmd: `findmnt -T /var/www/html | awk '$1 != "TARGET" {printf $1}'`,
})
assert.NoError(err)
assert.NotEqual("/var/www/html", stdout)
}
// TestDdevXdebugEnabled tests running with xdebug_enabled = true, etc.
func TestDdevXdebugEnabled(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
testcommon.ClearDockerEnv()
site := TestSites[0]
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name()))
phpVersions := nodeps.ValidPHPVersions
phpKeys := make([]string, 0, len(phpVersions))
for k := range phpVersions {
phpKeys = append(phpKeys, k)
}
sort.Strings(phpKeys)
err := app.Init(site.Dir)
assert.NoError(err)
//nolint: errcheck
defer app.Stop(true, false)
for _, v := range phpKeys {
app.PHPVersion = v
t.Logf("Beginning XDebug checks with XDebug php%s\n", v)
fmt.Printf("Attempting XDebug checks with XDebug %s\n", v)
app.XdebugEnabled = false
assert.NoError(err)
err = app.Start()
require.NoError(t, err)
opts := &ddevapp.ExecOpts{
Service: "web",
Cmd: "php --ri xdebug",
}
stdout, _, err := app.Exec(opts)
assert.Error(err)
assert.Contains(stdout, "Extension 'xdebug' not present")
// Run with xdebug_enabled: true
testcommon.ClearDockerEnv()
app.XdebugEnabled = true
err = app.Start()
require.NoError(t, err)
stdout, _, err = app.Exec(opts)
assert.NoError(err)
if err != nil {
t.Errorf("Aborting xdebug check for php%s: %v", v, err)
continue
}
if app.PHPVersion == nodeps.PHP80 {
assert.Contains(stdout, "xdebug.mode => debug => debug", "xdebug is not enabled for %s", v)
} else {
assert.Contains(stdout, "xdebug support => enabled", "xdebug is not enabled for %s", v)
}
if app.PHPVersion == nodeps.PHP80 {
assert.Contains(stdout, "xdebug.client_host => host.docker.internal => host.docker.internal")
} else {
assert.Contains(stdout, "xdebug.remote_host => host.docker.internal => host.docker.internal")
}
// Start a listener on port 9000 of localhost (where PHPStorm or whatever would listen)
listener, err := net.Listen("tcp", ":9000")
assert.NoError(err)
if err != nil || listener == nil {
continue
}
// Curl to the project's index.php or anything else
_, _, _ = testcommon.GetLocalHTTPResponse(t, app.GetHTTPURL(), 1)
fmt.Printf("Attempting accept of port 9000 with xdebug enabled, XDebug version=%s\n", v)
// Accept is blocking, no way to timeout, so use
// goroutine instead.
acceptListenDone := make(chan bool, 1)
defer close(acceptListenDone)
go func() {
conn, err := listener.Accept()
assert.NoError(err)
if err != nil {
t.Logf("Completed accept of port 9000 with xdebug enabled, XDebug version=%s, time=%v\n", v, time.Now())
}
// Grab the Xdebug connection start and look in it for "Xdebug"
b := make([]byte, 650)
_, err = bufio.NewReader(conn).Read(b)
assert.NoError(err)
lineString := string(b)
assert.Contains(lineString, "Xdebug")
assert.Contains(lineString, `xdebug:language_version="`+v)
acceptListenDone <- true
}()
select {
case <-acceptListenDone:
fmt.Printf("Read from acceptListenDone at %v\n", time.Now())
case <-time.After(10 * time.Second):
fmt.Printf("Timed out waiting for accept/listen at %v\n", time.Now())
}
}
runTime()
}
// TestDdevMysqlWorks tests that mysql client can be run in both containers.
func TestDdevMysqlWorks(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
site := TestSites[0]
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s DdevMysqlWorks", site.Name))
err := app.Init(site.Dir)
assert.NoError(err)
testcommon.ClearDockerEnv()
err = app.StartAndWait(0)
//nolint: errcheck
defer app.Stop(true, false)
require.NoError(t, err)
// Test that mysql + .my.cnf works on web container
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "mysql -e 'SELECT USER();' | grep 'db@'",
})
assert.NoError(err)
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "mysql -e 'SELECT DATABASE();' | grep 'db'",
})
assert.NoError(err)
// Test that mysql + .my.cnf works on db container
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: "mysql -e 'SELECT USER();' | grep 'root@localhost'",
})
assert.NoError(err)
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: "mysql -e 'SELECT DATABASE();' | grep 'db'",
})
assert.NoError(err)
err = app.Stop(true, false)
assert.NoError(err)
runTime()
}
// TestStartWithoutDdev makes sure we don't have a regression where lack of .ddev
// causes a panic.
func TestStartWithoutDdevConfig(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
testDir := testcommon.CreateTmpDir(t.Name())
// testcommon.Chdir()() and CleanupDir() check their own errors (and exit)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
err := os.MkdirAll(testDir+"/sites/default", 0777)
assert.NoError(err)
err = os.Chdir(testDir)
assert.NoError(err)
_, err = ddevapp.GetActiveApp("")
assert.Error(err)
if err != nil {
assert.Contains(err.Error(), "Could not find a project")
}
}
// TestGetApps tests the GetActiveProjects function to ensure it accurately returns a list of running applications.
func TestGetApps(t *testing.T) {
assert := asrt.New(t)
// Start the apps.
for _, site := range TestSites {
testcommon.ClearDockerEnv()
app := &ddevapp.DdevApp{}
err := app.Init(site.Dir)
assert.NoError(err)
err = app.Start()
assert.NoError(err)
}
apps := ddevapp.GetActiveProjects()
for _, testSite := range TestSites {
var found bool
for _, app := range apps {
if testSite.Name == app.GetName() {
found = true
break
}
}
assert.True(found, "Found testSite %s in list", testSite.Name)
}
// Now shut down all sites as we expect them to be shut down.
for _, site := range TestSites {
testcommon.ClearDockerEnv()
app := &ddevapp.DdevApp{}
err := app.Init(site.Dir)
assert.NoError(err)
err = app.Stop(true, false)
assert.NoError(err)
}
}
// TestDdevImportDB tests the functionality that is called when "ddev import-db" is executed
func TestDdevImportDB(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
testDir, _ := os.Getwd()
site := TestSites[0]
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name()))
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
err = app.Start()
assert.NoError(err)
defer func() {
app.Hooks = nil
_ = app.WriteConfig()
_ = app.Stop(true, false)
}()
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: "mysql -N -e 'DROP DATABASE IF EXISTS test;'",
})
assert.NoError(err)
app.Hooks = map[string][]ddevapp.YAMLTask{"post-import-db": {{"exec-host": "touch hello-post-import-db-" + app.Name}}, "pre-import-db": {{"exec-host": "touch hello-pre-import-db-" + app.Name}}}
// Test simple db loads.
for _, file := range []string{"users.sql", "users.mysql", "users.sql.gz", "users.mysql.gz", "users.sql.tar", "users.mysql.tar", "users.sql.tar.gz", "users.mysql.tar.gz", "users.sql.tgz", "users.mysql.tgz", "users.sql.zip", "users.mysql.zip", "users_with_USE_statement.sql"} {
path := filepath.Join(testDir, "testdata", t.Name(), file)
err = app.ImportDB(path, "", false, false, "db")
assert.NoError(err, "Failed to app.ImportDB path: %s err: %v", path, err)
if err != nil {
continue
}
// There should be exactly the one users table for each of these files
out, _, err := app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: "mysql -N -e 'SHOW TABLES;' | cat",
})
assert.NoError(err)
assert.Equal("users\n", out)
// Verify that no extra database was created
out, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: `mysql -N -e 'SHOW DATABASES;' | egrep -v "^(information_schema|performance_schema|mysql)$"`,
})
assert.NoError(err)
assert.Equal("db\n", out)
// Test that a settings file has correct hash_salt format
switch app.Type {
case nodeps.AppTypeDrupal7:
drupalHashSalt, err := fileutil.FgrepStringInFile(app.SiteDdevSettingsFile, "$drupal_hash_salt")
assert.NoError(err)
assert.True(drupalHashSalt)
case nodeps.AppTypeDrupal8:
settingsHashSalt, err := fileutil.FgrepStringInFile(app.SiteDdevSettingsFile, "settings['hash_salt']")
assert.NoError(err)
assert.True(settingsHashSalt)
case nodeps.AppTypeWordPress:
hasAuthSalt, err := fileutil.FgrepStringInFile(app.SiteSettingsPath, "SECURE_AUTH_SALT")
assert.NoError(err)
assert.True(hasAuthSalt)
}
}
if site.DBTarURL != "" {
_, cachedArchive, err := testcommon.GetCachedArchive(site.Name, site.Name+"_siteTarArchive", "", site.DBTarURL)
assert.NoError(err)
err = app.ImportDB(cachedArchive, "", false, false, "db")
assert.NoError(err)
assert.FileExists("hello-pre-import-db-" + app.Name)
assert.FileExists("hello-post-import-db-" + app.Name)
err = os.Remove("hello-pre-import-db-" + app.Name)
assert.NoError(err)
err = os.Remove("hello-post-import-db-" + app.Name)
assert.NoError(err)
}
if site.DBZipURL != "" {
_, cachedArchive, err := testcommon.GetCachedArchive(site.Name, site.Name+"_siteZipArchive", "", site.DBZipURL)
assert.NoError(err)
err = app.ImportDB(cachedArchive, "", false, false, "db")
assert.NoError(err)
assert.FileExists("hello-pre-import-db-" + app.Name)
assert.FileExists("hello-post-import-db-" + app.Name)
_ = os.RemoveAll("hello-pre-import-db-" + app.Name)
_ = os.RemoveAll("hello-post-import-db-" + app.Name)
}
if site.FullSiteTarballURL != "" {
_, cachedArchive, err := testcommon.GetCachedArchive(site.Name, site.Name+"_FullSiteTarballURL", "", site.FullSiteTarballURL)
assert.NoError(err)
err = app.ImportDB(cachedArchive, "data.sql", false, false, "db")
assert.NoError(err, "Failed to find data.sql at root of tarball %s", cachedArchive)
assert.FileExists("hello-pre-import-db-" + app.Name)
assert.FileExists("hello-post-import-db-" + app.Name)
_ = os.RemoveAll("hello-pre-import-db-" + app.Name)
_ = os.RemoveAll("hello-post-import-db-" + app.Name)
}
app.Hooks = nil
for _, db := range []string{"db", "extradb"} {
// Import from stdin, make sure that works
inputFile := filepath.Join(testDir, "testdata", t.Name(), "stdintable.sql")
f, err := os.Open(inputFile)
require.NoError(t, err)
// nolint: errcheck
defer f.Close()
savedStdin := os.Stdin
os.Stdin = f
err = app.ImportDB("", "", false, false, db)
os.Stdin = savedStdin
assert.NoError(err)
out, _, err := app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: fmt.Sprintf(`echo "SHOW DATABASES LIKE '%s'; SELECT COUNT(*) FROM stdintable;" | mysql -N %s`, db, db),
})
assert.NoError(err)
assert.Equal(out, fmt.Sprintf("%s\n2\n", db))
// Import 2-user users.sql into users table
path := filepath.Join(testDir, "testdata", t.Name(), "users.sql")
err = app.ImportDB(path, "", false, false, db)
assert.NoError(err)
out, stderr, err := app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: fmt.Sprintf(`echo "SELECT COUNT(*) AS TOTALNUMBEROFTABLES FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%s';" | mysql -N %s`, db, db),
})
assert.NoError(err, "exec failed: %v", stderr)
assert.Equal("1\n", out)
// Import 1-user sql and make sure only one row is left there
path = filepath.Join(testDir, "testdata", t.Name(), "oneuser.sql")
err = app.ImportDB(path, "", false, false, db)
assert.NoError(err)
out, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: fmt.Sprintf(`echo "SELECT COUNT(*) AS TOTALNUMBEROFTABLES FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%s';" | mysql -N %s`, db, db),
})
assert.NoError(err)
assert.Equal("1\n", out)
// Import 2-user users.sql again, but with nodrop=true
// We should end up with 2 tables now
path = filepath.Join(testDir, "testdata", t.Name(), "users.sql")
err = app.ImportDB(path, "", false, true, db)
assert.NoError(err)
out, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: fmt.Sprintf(`echo "SELECT COUNT(*) AS TOTALNUMBEROFTABLES FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%s';" | mysql -N %s`, db, db),
})
assert.NoError(err)
assert.Equal("2\n", out)
}
runTime()
switchDir()
}
// TestDdevAllDatabases tests db import/export/start with all MariaDB versions
func TestDdevAllDatabases(t *testing.T) {
assert := asrt.New(t)
dbVersions := map[string]map[string]bool{
"mariadb": nodeps.ValidMariaDBVersions,
"mysql": nodeps.ValidMySQLVersions,
}
//Use a smaller list if GOTEST_SHORT
if os.Getenv("GOTEST_SHORT") != "" {
dbVersions = map[string]map[string]bool{
"mariadb": {"10.2": true, "10.1": true},
"mysql": {"8.0": true, "5.5": true},
}
}
app := &ddevapp.DdevApp{}
testDir, _ := os.Getwd()
site := TestSites[0]
switchDir := site.Chdir()
defer switchDir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name()))
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
// Make sure there isn't an old db laying around
_ = dockerutil.RemoveVolume(app.Name + "-mariadb")
//nolint: errcheck
defer func() {
_ = app.Stop(true, false)
// Make sure we leave the config.yaml in expected state
app.MariaDBVersion = ""
app.MySQLVersion = ""
app.DBImage = ""
_ = app.WriteConfig()
}()
for dbType, versions := range dbVersions {
for v := range versions {
t.Logf("testing db server functionality of %v:%v", dbType, v)
_ = app.Stop(true, false)
if dbType == "mariadb" {
app.MySQLVersion = ""
app.MariaDBVersion = v
} else if dbType == "mysql" {
app.MariaDBVersion = ""
app.MySQLVersion = v
}
app.DBImage = ""
_ = app.WriteConfig()
startErr := app.Start()
if startErr != nil {
appLogs, err := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(err)
t.Fatalf("app.Start() failure %v; logs:\n=====\n%s\n=====\n", startErr, appLogs)
}
// Make sure the version of db running matches expected
containerDBVersion, _, _ := app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: "cat /var/lib/mysql/db_mariadb_version.txt",
})
assert.Equal(v, strings.Trim(containerDBVersion, "\n\r "))
importPath := filepath.Join(testDir, "testdata", t.Name(), "users.sql")
err = app.ImportDB(importPath, "", false, false, "db")
assert.NoError(err, "failed to import %v", importPath)
_ = os.Mkdir("tmp", 0777)
err = fileutil.PurgeDirectory("tmp")
assert.NoError(err)
// Test that we can export-db to a gzipped file
err = app.ExportDB("tmp/users1.sql.gz", true, "db")
assert.NoError(err)
// Validate contents
err = archive.Ungzip("tmp/users1.sql.gz", "tmp")
assert.NoError(err)
stringFound, err := fileutil.FgrepStringInFile("tmp/users1.sql", "Table structure for table `users`")
assert.NoError(err)
assert.True(stringFound)
err = fileutil.PurgeDirectory("tmp")
assert.NoError(err)
// Export to an ungzipped file and validate
err = app.ExportDB("tmp/users2.sql", false, "db")
assert.NoError(err)
// Validate contents
stringFound, err = fileutil.FgrepStringInFile("tmp/users2.sql", "Table structure for table `users`")
assert.NoError(err)
assert.True(stringFound)
err = fileutil.PurgeDirectory("tmp")
assert.NoError(err)
// Capture to stdout without gzip compression
stdout := util.CaptureStdOut()
err = app.ExportDB("", false, "db")
assert.NoError(err)
out := stdout()
assert.Contains(out, "Table structure for table `users`")
snapshotName := v + "_" + fileutil.RandomFilenameBase()
output, err := app.Snapshot(snapshotName)
assert.NoError(err, "could not create snapshot %s for version %s: %v output=%v", snapshotName, v, err, output)
err = app.RestoreSnapshot(snapshotName)
assert.NoError(err, "could not restore snapshot %s for version %s: %v", snapshotName, v, err)
// Make sure the version of db running matches expected
containerDBVersion, _, _ = app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: "cat /var/lib/mysql/db_mariadb_version.txt",
})
assert.Equal(v, strings.Trim(containerDBVersion, "\n\r "))
// TODO: Restore a snapshot from a different version note warning.
_ = app.Stop(true, false)
}
}
runTime()
}
// TestDdevExportDB tests the functionality that is called when "ddev export-db" is executed
func TestDdevExportDB(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
testDir, _ := os.Getwd()
site := TestSites[0]
switchDir := site.Chdir()
defer switchDir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s DdevExportDB", site.Name))
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
err = app.Start()
assert.NoError(err)
//nolint: errcheck
defer app.Stop(true, false)
importPath := filepath.Join(testDir, "testdata", t.Name(), "users.sql")
err = app.ImportDB(importPath, "", false, false, "db")
require.NoError(t, err)
_ = os.Mkdir("tmp", 0777)
// Most likely reason for failure is it exists, so let that go
err = fileutil.PurgeDirectory("tmp")
assert.NoError(err)
// Test that we can export-db to a gzipped file
err = app.ExportDB("tmp/users1.sql.gz", true, "db")
assert.NoError(err)
// Validate contents
err = archive.Ungzip("tmp/users1.sql.gz", "tmp")
assert.NoError(err)
stringFound, err := fileutil.FgrepStringInFile("tmp/users1.sql", "Table structure for table `users`")
assert.NoError(err)
assert.True(stringFound)
err = fileutil.PurgeDirectory("tmp")
assert.NoError(err)
// Export to an ungzipped file and validate
err = app.ExportDB("tmp/users2.sql", false, "db")
assert.NoError(err)
// Validate contents
stringFound, err = fileutil.FgrepStringInFile("tmp/users2.sql", "Table structure for table `users`")
assert.NoError(err)
assert.True(stringFound)
err = fileutil.PurgeDirectory("tmp")
assert.NoError(err)
// Capture to stdout without gzip compression
stdout := util.CaptureStdOut()
err = app.ExportDB("", false, "db")
assert.NoError(err)
output := stdout()
assert.Contains(output, "Table structure for table `users`")
// Export an alternate database
importPath = filepath.Join(testDir, "testdata", t.Name(), "users.sql")
err = app.ImportDB(importPath, "", false, false, "anotherdb")
require.NoError(t, err)
err = app.ExportDB("tmp/anotherdb.sql.gz", true, "anotherdb")
assert.NoError(err)
importPath = "tmp/anotherdb.sql.gz"
err = app.ImportDB(importPath, "", false, false, "thirddb")
assert.NoError(err)
out, _, err := app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: fmt.Sprintf(`echo "SELECT COUNT(*) FROM users;" | mysql -N thirddb`),
})
assert.NoError(err)
assert.Equal("2\n", out)
runTime()
}
// TestDdevFullSiteSetup tests a full import-db and import-files and then looks to see if
// we have a spot-test success hit on a URL
func TestDdevFullSiteSetup(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
for _, site := range TestSites {
switchDir := site.Chdir()
defer switchDir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s DdevFullSiteSetup", site.Name))
t.Logf("=== BEGIN TestDdevFullSiteSetup for %s\n", site.Name)
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
// Get files before start, as syncing can start immediately.
if site.FilesTarballURL != "" {
_, tarballPath, err := testcommon.GetCachedArchive(site.Name, "local-tarballs-files", "", site.FilesTarballURL)
assert.NoError(err)
err = app.ImportFiles(tarballPath, "")
assert.NoError(err)
}
// Running WriteConfig assures that settings.ddev.php gets written
// so Drupal 8 won't try to set things unwriteable
err = app.WriteConfig()
assert.NoError(err)
err = app.Start()
assert.NoError(err)
// Validate PHPMyAdmin is working and database named db is present
_, _ = testcommon.EnsureLocalHTTPContent(t, app.GetHTTPURL()+":8036/tbl_create.php?server=1&db=db", "Table name:")
// Validate MailHog is working and "connected"
_, _ = testcommon.EnsureLocalHTTPContent(t, app.GetHTTPURL()+":8025/#", "Connected")
settingsLocation, err := app.DetermineSettingsPathLocation()
assert.NoError(err)
if app.Type != nodeps.AppTypeShopware6 {
assert.Equal(filepath.Dir(settingsLocation), filepath.Dir(app.SiteSettingsPath))
}
if nodeps.ArrayContainsString([]string{"drupal6", "drupal7"}, app.Type) {
assert.FileExists(filepath.Join(filepath.Dir(app.SiteSettingsPath), "drushrc.php"))
}
if site.DBTarURL != "" {
_, cachedArchive, err := testcommon.GetCachedArchive(site.Name, site.Name+"_siteTarArchive", "", site.DBTarURL)
assert.NoError(err)
err = app.ImportDB(cachedArchive, "", false, false, "db")
assert.NoError(err, "failed to import-db with dbtarball %s, app.Type=%s, mariadb_version=%s, mysql_version=%s", site.DBTarURL, app.Type, app.MariaDBVersion, app.MySQLVersion)
}
startErr := app.StartAndWait(2)
if startErr != nil {
appLogs, getLogsErr := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(getLogsErr)
t.Fatalf("app.StartAndWait() failure err=%v; logs:\n=====\n%s\n=====\n", startErr, appLogs)
}
// Test static content.
_, _ = testcommon.EnsureLocalHTTPContent(t, app.GetHTTPSURL()+site.Safe200URIWithExpectation.URI, site.Safe200URIWithExpectation.Expect)
// Test dynamic php + database content.
rawurl := app.GetHTTPSURL() + site.DynamicURI.URI
body, resp, err := testcommon.GetLocalHTTPResponse(t, rawurl, 120)
assert.NoError(err, "GetLocalHTTPResponse returned err on project=%s rawurl %s, resp=%v: %v", site.Name, rawurl, resp, err)
if err != nil && strings.Contains(err.Error(), "container ") {
logs, err := ddevapp.GetErrLogsFromApp(app, err)
assert.NoError(err)
t.Fatalf("Logs after GetLocalHTTPResponse: %s", logs)
}
assert.Contains(body, site.DynamicURI.Expect, "expected %s on project %s", site.DynamicURI.Expect, site.Name)
// Load an image from the files section
if site.FilesImageURI != "" {
_, resp, err := testcommon.GetLocalHTTPResponse(t, app.GetHTTPSURL()+site.FilesImageURI)
assert.NoError(err, "failed ImageURI response on project %s", site.Name)
if err != nil && resp != nil {
assert.Equal("image/jpeg", resp.Header["Content-Type"][0])
}
}
// Make sure we can do a simple hit against the host-mount of web container.
_, _ = testcommon.EnsureLocalHTTPContent(t, app.GetWebContainerDirectHTTPURL()+site.Safe200URIWithExpectation.URI, site.Safe200URIWithExpectation.Expect)
// We don't want all the projects running at once.
err = app.Stop(true, false)
assert.NoError(err)
runTime()
switchDir()
}
fmt.Print()
}
// TestDdevRestoreSnapshot tests creating a snapshot and reverting to it. This runs with Mariadb 10.2
func TestDdevRestoreSnapshot(t *testing.T) {
assert := asrt.New(t)
testDir, _ := os.Getwd()
app := &ddevapp.DdevApp{}
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("TestDdevRestoreSnapshot"))
d7testerTest1Dump, err := filepath.Abs(filepath.Join("testdata", t.Name(), "restore_snapshot", "d7tester_test_1.sql.gz"))
assert.NoError(err)
d7testerTest2Dump, err := filepath.Abs(filepath.Join("testdata", t.Name(), "restore_snapshot", "d7tester_test_2.sql.gz"))
assert.NoError(err)
// Use d7 only for this test, the key thing is the database interaction
site := FullTestSites[2]
// If running this with GOTEST_SHORT we have to create the directory, tarball etc.
if site.Dir == "" || !fileutil.FileExists(site.Dir) {
err = site.Prepare()
require.NoError(t, err)
}
switchDir := site.Chdir()
defer switchDir()
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
require.NoError(t, err)
app.Hooks = map[string][]ddevapp.YAMLTask{"post-snapshot": {{"exec-host": "touch hello-post-snapshot-" + app.Name}}, "pre-snapshot": {{"exec-host": "touch hello-pre-snapshot-" + app.Name}}}
// Try using php72 to avoid SIGBUS failures after restore.
app.PHPVersion = nodeps.PHP72
// First do regular start, which is good enough to get us to an ImportDB()
err = app.Start()
require.NoError(t, err)
//nolint: errcheck
defer app.Stop(true, false)
err = app.ImportDB(d7testerTest1Dump, "", false, false, "db")
require.NoError(t, err, "Failed to app.ImportDB path: %s err: %v", d7testerTest1Dump, err)
err = app.StartAndWait(2)
require.NoError(t, err, "app.Start() failed on site %s, err=%v", site.Name, err)
resp, ensureErr := testcommon.EnsureLocalHTTPContent(t, app.GetHTTPSURL(), "d7 tester test 1 has 1 node", 45)
assert.NoError(ensureErr)
if ensureErr != nil && strings.Contains(ensureErr.Error(), "container failed") {
logs, err := ddevapp.GetErrLogsFromApp(app, ensureErr)
assert.NoError(err)
t.Fatalf("container failed: logs:\n=======\n%s\n========\n", logs)
}
require.NotNil(t, resp)
if ensureErr != nil && resp.StatusCode != 200 {
logs, err := app.CaptureLogs("web", false, "")
assert.NoError(err)
t.Fatalf("EnsureLocalHTTPContent received %d. Resp=%v, web logs=\n========\n%s\n=========\n", resp.StatusCode, resp, logs)
}
// Make a snapshot of d7 tester test 1
backupsDir := filepath.Join(app.GetConfigPath(""), "db_snapshots")
snapshotName, err := app.Snapshot("d7testerTest1")
assert.NoError(err)
assert.EqualValues(snapshotName, "d7testerTest1")
assert.True(fileutil.FileExists(filepath.Join(backupsDir, snapshotName, "xtrabackup_info")))
assert.FileExists("hello-pre-snapshot-" + app.Name)
assert.FileExists("hello-post-snapshot-" + app.Name)
err = os.Remove("hello-pre-snapshot-" + app.Name)
assert.NoError(err)
err = os.Remove("hello-post-snapshot-" + app.Name)
assert.NoError(err)
err = app.ImportDB(d7testerTest2Dump, "", false, false, "db")
assert.NoError(err, "Failed to app.ImportDB path: %s err: %v", d7testerTest2Dump, err)
_, _ = testcommon.EnsureLocalHTTPContent(t, app.GetHTTPSURL(), "d7 tester test 2 has 2 nodes", 45)
snapshotName, err = app.Snapshot("d7testerTest2")
assert.NoError(err)
assert.EqualValues(snapshotName, "d7testerTest2")
assert.True(fileutil.FileExists(filepath.Join(backupsDir, snapshotName, "xtrabackup_info")))
app.Hooks = map[string][]ddevapp.YAMLTask{"post-restore-snapshot": {{"exec-host": "touch hello-post-restore-snapshot-" + app.Name}}, "pre-restore-snapshot": {{"exec-host": "touch hello-pre-restore-snapshot-" + app.Name}}}
err = app.RestoreSnapshot("d7testerTest1")
assert.NoError(err)
assert.FileExists("hello-pre-restore-snapshot-" + app.Name)
assert.FileExists("hello-post-restore-snapshot-" + app.Name)
err = os.Remove("hello-pre-restore-snapshot-" + app.Name)
assert.NoError(err)
err = os.Remove("hello-post-restore-snapshot-" + app.Name)
assert.NoError(err)
_, _ = testcommon.EnsureLocalHTTPContent(t, app.GetHTTPSURL(), "d7 tester test 1 has 1 node", 45)
err = app.RestoreSnapshot("d7testerTest2")
assert.NoError(err)
body, resp, err := testcommon.GetLocalHTTPResponse(t, app.GetHTTPSURL(), 45)
assert.NoError(err, "GetLocalHTTPResponse returned err on rawurl %s: %v", app.GetHTTPSURL(), err)
assert.Contains(body, "d7 tester test 2 has 2 nodes")
if err != nil {
t.Logf("resp after timeout: %v", resp)
out, err := app.CaptureLogs("web", false, "")
assert.NoError(err)
t.Logf("web container logs after timeout: %s", out)
}
// Attempt a restore with a pre-mariadb_10.2 snapshot. It should fail and give a link.
oldSnapshotTarball, err := filepath.Abs(filepath.Join(testDir, "testdata", t.Name(), "restore_snapshot", "d7tester_test_1.snapshot_mariadb_10_1.tgz"))
assert.NoError(err)
err = archive.Untar(oldSnapshotTarball, filepath.Join(site.Dir, ".ddev", "db_snapshots"), "")
assert.NoError(err)
err = app.RestoreSnapshot("d7tester_test_1.snapshot_mariadb_10.1")
assert.Error(err)
assert.Contains(err.Error(), "is not compatible")
app.Hooks = nil
_ = app.WriteConfig()
err = app.Stop(true, false)
assert.NoError(err)
// TODO: Check behavior of ddev rm with snapshot, see if it has right stuff in it.
runTime()
}
// TestWriteableFilesDirectory tests to make sure that files created on host are writable on container
// and files ceated in container are correct user on host.
func TestWriteableFilesDirectory(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
site := TestSites[0]
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s TestWritableFilesDirectory", site.Name))
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
err = app.StartAndWait(0)
assert.NoError(err)
uploadDir := app.GetUploadDir()
assert.NotEmpty(uploadDir)
// Use exec to touch a file in the container and see what the result is. Make sure it comes out with ownership
// making it writeable on the host.
filename := fileutil.RandomFilenameBase()
dirname := fileutil.RandomFilenameBase()
// Use path.Join for items on th container (linux) and filepath.Join for items on the host.
inContainerDir := path.Join(uploadDir, dirname)
onHostDir := filepath.Join(app.Docroot, inContainerDir)
// The container execution directory is dependent on the app type
switch app.Type {
case nodeps.AppTypeWordPress, nodeps.AppTypeTYPO3, nodeps.AppTypePHP:
inContainerDir = path.Join(app.Docroot, inContainerDir)
}
inContainerRelativePath := path.Join(inContainerDir, filename)
onHostRelativePath := path.Join(onHostDir, filename)
err = os.MkdirAll(onHostDir, 0775)
assert.NoError(err)
// Create a file in the directory to make sure it syncs
f, err := os.OpenFile(filepath.Join(onHostDir, "junk.txt"), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0755)
assert.NoError(err)
_ = f.Close()
_, _, createFileErr := app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "echo 'content created inside container\n' >" + inContainerRelativePath,
})
assert.NoError(createFileErr)
// Now try to append to the file on the host.
// os.OpenFile() for append here fails if the file does not already exist.
f, err = os.OpenFile(onHostRelativePath, os.O_APPEND|os.O_WRONLY, 0660)
assert.NoError(err)
_, err = f.WriteString("this addition to the file was added on the host side")
assert.NoError(err)
_ = f.Close()
// Create a file on the host and see what the result is. Make sure we can not append/write to it in the container.
filename = fileutil.RandomFilenameBase()
dirname = fileutil.RandomFilenameBase()
inContainerDir = path.Join(uploadDir, dirname)
onHostDir = filepath.Join(app.Docroot, inContainerDir)
// The container execution directory is dependent on the app type
switch app.Type {
case nodeps.AppTypeWordPress, nodeps.AppTypeTYPO3, nodeps.AppTypePHP:
inContainerDir = path.Join(app.Docroot, inContainerDir)
}
inContainerRelativePath = path.Join(inContainerDir, filename)
onHostRelativePath = filepath.Join(onHostDir, filename)
err = os.MkdirAll(onHostDir, 0775)
assert.NoError(err)
f, err = os.OpenFile(onHostRelativePath, os.O_CREATE|os.O_RDWR, 0660)
assert.NoError(err)
_, err = f.WriteString("this base content was inserted on the host side\n")
assert.NoError(err)
_ = f.Close()
// if the file exists, add to it. We don't want to add if it's not already there.
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "if [ -f " + inContainerRelativePath + " ]; then echo 'content added inside container\n' >>" + inContainerRelativePath + "; fi",
})
assert.NoError(err)
// grep the file for both the content added on host and that added in container.
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "grep 'base content was inserted on the host' " + inContainerRelativePath + "&& grep 'content added inside container' " + inContainerRelativePath,
})
assert.NoError(err)
err = app.Stop(true, false)
assert.NoError(err)
runTime()
switchDir()
}
// TestDdevImportFilesDir tests that "ddev import-files" can successfully import non-archive directories
func TestDdevImportFilesDir(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
// Create a dummy directory to test non-archive imports
importDir, err := ioutil.TempDir("", t.Name())
assert.NoError(err)
fileNames := make([]string, 0)
for i := 0; i < 5; i++ {
fileName := uuid.New().String()
fileNames = append(fileNames, fileName)
fullPath := filepath.Join(importDir, fileName)
err = ioutil.WriteFile(fullPath, []byte(fileName), 0644)
assert.NoError(err)
}
for _, site := range TestSites {
if site.FilesTarballURL == "" && site.FilesZipballURL == "" {
t.Logf("=== SKIP TestDdevImportFilesDir for %s (FilesTarballURL and FilesZipballURL are not provided)\n", site.Name)
continue
}
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name()))
t.Logf("=== BEGIN TestDdevImportFilesDir for %s\n", site.Name)
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
assert.NoError(err)
// Function under test
err = app.ImportFiles(importDir, "")
assert.NoError(err, "Importing a directory returned an error:", err)
// Confirm contents of destination dir after import
absUploadDir := filepath.Join(app.AppRoot, app.Docroot, app.GetUploadDir())
uploadedFiles, err := ioutil.ReadDir(absUploadDir)
assert.NoError(err)
uploadedFilesMap := map[string]bool{}
for _, uploadedFile := range uploadedFiles {
uploadedFilesMap[filepath.Base(uploadedFile.Name())] = true
}
for _, expectedFile := range fileNames {
assert.True(uploadedFilesMap[expectedFile], "Expected file %s not found for site: %s", expectedFile, site.Name)
}
runTime()
switchDir()
}
}
// TestDdevImportFiles tests the functionality that is called when "ddev import-files" is executed
func TestDdevImportFiles(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
for _, site := range TestSites {
if site.FilesTarballURL == "" && site.FilesZipballURL == "" && site.FullSiteTarballURL == "" {
t.Logf("=== SKIP TestDdevImportFiles for %s (FilesTarballURL and FilesZipballURL are not provided)\n", site.Name)
continue
}
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name()))
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
app.Hooks = map[string][]ddevapp.YAMLTask{"post-import-files": {{"exec-host": "touch hello-post-import-files-" + app.Name}}, "pre-import-files": {{"exec-host": "touch hello-pre-import-files-" + app.Name}}}
if site.FilesTarballURL != "" {
_, tarballPath, err := testcommon.GetCachedArchive(site.Name, "local-tarballs-files", "", site.FilesTarballURL)
assert.NoError(err)
err = app.ImportFiles(tarballPath, "")
assert.NoError(err)
}
if site.FilesZipballURL != "" {
_, zipballPath, err := testcommon.GetCachedArchive(site.Name, "local-zipballs-files", "", site.FilesZipballURL)
assert.NoError(err)
err = app.ImportFiles(zipballPath, "")
assert.NoError(err)
}
if site.FullSiteTarballURL != "" && site.FullSiteArchiveExtPath != "" {
_, siteTarPath, err := testcommon.GetCachedArchive(site.Name, "local-site-tar", "", site.FullSiteTarballURL)
assert.NoError(err)
err = app.ImportFiles(siteTarPath, site.FullSiteArchiveExtPath)
assert.NoError(err)
}
assert.FileExists("hello-pre-import-files-" + app.Name)
assert.FileExists("hello-post-import-files-" + app.Name)
err = os.Remove("hello-pre-import-files-" + app.Name)
assert.NoError(err)
err = os.Remove("hello-post-import-files-" + app.Name)
assert.NoError(err)
runTime()
switchDir()
}
}
// TestDdevImportFilesCustomUploadDir ensures that files are imported to a custom upload directory when requested
func TestDdevImportFilesCustomUploadDir(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
for _, site := range TestSites {
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name()))
t.Logf("=== BEGIN TestDdevImportFilesCustomUploadDir for %s\n", site.Name)
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
// Set custom upload dir
app.UploadDir = "my/upload/dir"
absUploadDir := filepath.Join(app.AppRoot, app.Docroot, app.UploadDir)
err = os.MkdirAll(absUploadDir, 0755)
assert.NoError(err)
if site.FilesTarballURL != "" {
_, tarballPath, err := testcommon.GetCachedArchive(site.Name, "local-tarballs-files", "", site.FilesTarballURL)
assert.NoError(err)
err = app.ImportFiles(tarballPath, "")
assert.NoError(err)
// Ensure upload dir isn't empty
fileInfoSlice, err := ioutil.ReadDir(absUploadDir)
assert.NoError(err)
assert.NotEmpty(fileInfoSlice)
}
if site.FilesZipballURL != "" {
_, zipballPath, err := testcommon.GetCachedArchive(site.Name, "local-zipballs-files", "", site.FilesZipballURL)
assert.NoError(err)
err = app.ImportFiles(zipballPath, "")
assert.NoError(err)
// Ensure upload dir isn't empty
fileInfoSlice, err := ioutil.ReadDir(absUploadDir)
assert.NoError(err)
assert.NotEmpty(fileInfoSlice)
}
if site.FullSiteTarballURL != "" && site.FullSiteArchiveExtPath != "" {
_, siteTarPath, err := testcommon.GetCachedArchive(site.Name, "local-site-tar", "", site.FullSiteTarballURL)
assert.NoError(err)
err = app.ImportFiles(siteTarPath, site.FullSiteArchiveExtPath)
assert.NoError(err)
// Ensure upload dir isn't empty
fileInfoSlice, err := ioutil.ReadDir(absUploadDir)
assert.NoError(err)
assert.NotEmpty(fileInfoSlice)
}
runTime()
switchDir()
}
}
// TestDdevExec tests the execution of commands inside a docker container of a site.
func TestDdevExec(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
testDir, _ := os.Getwd()
for index, site := range TestSites {
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s DdevExec", site.Name))
if index == 0 {
err := fileutil.CopyFile(filepath.Join(testDir, "testdata", t.Name(), "docker-compose.busybox.yaml"), filepath.Join(site.Dir, ".ddev", "docker-compose.busybox.yaml"))
defer func() {
err = os.RemoveAll(filepath.Join(site.Dir, ".ddev", "docker-compose.busybox.yaml"))
assert.NoError(err)
}()
assert.NoError(err)
}
err := app.Init(site.Dir)
assert.NoError(err)
app.Hooks = map[string][]ddevapp.YAMLTask{"post-exec": {{"exec-host": "touch hello-post-exec-" + app.Name}}, "pre-exec": {{"exec-host": "touch hello-pre-exec-" + app.Name}}}
defer func() {
app.Hooks = nil
_ = app.Stop(true, false)
_ = app.WriteConfig()
}()
startErr := app.Start()
if startErr != nil {
logs, err := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(err)
t.Fatalf("app.Start() failed err=%v, logs from broken container:\n=======\n%s\n========\n", startErr, logs)
}
out, _, err := app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "pwd",
})
assert.NoError(err)
assert.Contains(out, "/var/www/html")
assert.FileExists("hello-pre-exec-" + app.Name)
assert.FileExists("hello-post-exec-" + app.Name)
err = os.Remove("hello-pre-exec-" + app.Name)
assert.NoError(err)
err = os.Remove("hello-post-exec-" + app.Name)
assert.NoError(err)
out, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Dir: "/usr/local",
Cmd: "pwd",
})
assert.NoError(err)
assert.Contains(out, "/usr/local")
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: "mysql -e 'DROP DATABASE db;'",
})
assert.NoError(err)
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "db",
Cmd: "mysql information_schema -e 'CREATE DATABASE db;'",
})
assert.NoError(err)
switch app.GetType() {
case nodeps.AppTypeDrupal6:
fallthrough
case nodeps.AppTypeDrupal7:
fallthrough
case nodeps.AppTypeDrupal8:
out, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "drush status",
})
assert.NoError(err)
assert.Regexp("PHP configuration[ :]*/etc/php/[0-9].[0-9]/cli/php.ini", out)
case nodeps.AppTypeWordPress:
out, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "wp --info",
})
assert.NoError(err)
assert.Regexp("/etc/php.*/php.ini", out)
// Make sure error works for unset env vars, etc.
_, stderr, err := app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "echo $ENVDOESNOTEXIST",
})
assert.Error(err)
assert.Contains(stderr, "ENVDOESNOTEXIST: unbound variable")
}
// Make sure that exec works on non-ddev container like busybox as well
if index == 0 {
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "busybox",
Cmd: "ls | grep bin",
})
assert.NoError(err)
_, stderr, err := app.Exec(&ddevapp.ExecOpts{
Service: "busybox",
Cmd: "echo $ENVDOESNOTEXIST",
})
assert.Error(err)
assert.Contains(stderr, "parameter not set")
_, stderr, err = app.Exec(&ddevapp.ExecOpts{
Service: "busybox",
Cmd: "this is an error;",
})
assert.Error(err)
assert.Contains(stderr, "this: not found")
}
err = app.Stop(true, false)
assert.NoError(err)
runTime()
switchDir()
}
}
// TestDdevLogs tests the container log output functionality.
func TestDdevLogs(t *testing.T) {
assert := asrt.New(t)
// Skip test because on Windows because the CaptureUserOut() hangs, at least
// sometimes.
if runtime.GOOS == "windows" {
t.Skip("Skipping test TestDdevLogs on Windows")
}
app := &ddevapp.DdevApp{}
site := TestSites[0]
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s DdevLogs", site.Name))
err := app.Init(site.Dir)
assert.NoError(err)
//nolint: errcheck
defer app.Stop(true, false)
startErr := app.StartAndWait(0)
if startErr != nil {
logs, err := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(err)
t.Fatalf("app.Start failed, err=%v, logs=\n========\n%s\n===========\n", startErr, logs)
}
out, err := app.CaptureLogs("web", false, "")
assert.NoError(err)
assert.Contains(out, "Server started")
out, err = app.CaptureLogs("db", false, "")
assert.NoError(err)
assert.Contains(out, "MySQL init process done. Ready for start up.")
// Test that we can get logs when project is stopped also
err = app.Pause()
assert.NoError(err)
out, err = app.CaptureLogs("web", false, "")
assert.NoError(err)
assert.Contains(out, "Server started")
out, err = app.CaptureLogs("db", false, "")
assert.NoError(err)
assert.Contains(out, "MySQL init process done. Ready for start up.")
runTime()
switchDir()
}
// TestProcessHooks tests execution of commands defined in config.yaml
func TestProcessHooks(t *testing.T) {
assert := asrt.New(t)
site := TestSites[0]
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), t.Name())
testcommon.ClearDockerEnv()
app, err := ddevapp.NewApp(site.Dir, true, nodeps.ProviderDefault)
assert.NoError(err)
defer func() {
_ = app.Stop(true, false)
app.Hooks = nil
_ = app.WriteConfig()
switchDir()
}()
err = app.Start()
assert.NoError(err)
// Note that any ExecHost commands must be able to run on Windows.
// echo and pwd are things that work pretty much the same in both places.
app.Hooks = map[string][]ddevapp.YAMLTask{
"hook-test": {
{"exec": "ls /usr/local/bin/composer"},
{"exec-host": "echo something"},
{"exec": "echo MYSQL_USER=${MYSQL_USER}", "service": "db"},
{"exec": "echo TestProcessHooks > /var/www/html/TestProcessHooks${DDEV_ROUTER_HTTPS_PORT}.txt"},
{"exec": "touch /var/tmp/TestProcessHooks && touch /var/www/html/touch_works_after_and.txt"},
},
}
captureOutputFunc, err := util.CaptureOutputToFile()
assert.NoError(err)
userOutFunc := util.CaptureUserOut()
err = app.ProcessHooks("hook-test")
assert.NoError(err)
out := captureOutputFunc()
userOut := userOutFunc()
// Ignore color in output, can be different in different OS's
out = vtclean.Clean(out, false)
assert.Contains(userOut, "Executing hook-test hook")
assert.Contains(userOut, "Exec command 'ls /usr/local/bin/composer' in container/service 'web'")
assert.Contains(userOut, "Exec command 'echo something' on the host")
assert.Contains(userOut, "Exec command 'echo MYSQL_USER=${MYSQL_USER}' in container/service 'db'")
assert.Contains(out, "MYSQL_USER=db")
assert.Contains(userOut, "Exec command 'echo TestProcessHooks > /var/www/html/TestProcessHooks${DDEV_ROUTER_HTTPS_PORT}.txt' in container/service 'web'")
assert.Contains(userOut, "Exec command 'touch /var/tmp/TestProcessHooks && touch /var/www/html/touch_works_after_and.txt' in container/service 'web',")
assert.FileExists(filepath.Join(app.AppRoot, fmt.Sprintf("TestProcessHooks%s.txt", app.RouterHTTPSPort)))
assert.FileExists(filepath.Join(app.AppRoot, "touch_works_after_and.txt"))
err = app.Stop(true, false)
assert.NoError(err)
runTime()
}
// TestDdevPause tests the functionality that is called when "ddev pause" is executed
func TestDdevPause(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
site := TestSites[0]
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s DdevStop", site.Name))
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
err = app.StartAndWait(0)
app.Hooks = map[string][]ddevapp.YAMLTask{"post-pause": {{"exec-host": "touch hello-post-pause-" + app.Name}}, "pre-pause": {{"exec-host": "touch hello-pre-pause-" + app.Name}}}
defer func() {
app.Hooks = nil
_ = app.WriteConfig()
_ = app.Stop(true, false)
}()
require.NoError(t, err)
err = app.Pause()
assert.NoError(err)
for _, containerType := range [3]string{"web", "db", "dba"} {
containerName, err := constructContainerName(containerType, app)
assert.NoError(err)
check, err := testcommon.ContainerCheck(containerName, "exited")
assert.NoError(err)
assert.True(check, containerType, "container has exited")
}
assert.FileExists("hello-pre-pause-" + app.Name)
assert.FileExists("hello-post-pause-" + app.Name)
err = os.Remove("hello-pre-pause-" + app.Name)
assert.NoError(err)
err = os.Remove("hello-post-pause-" + app.Name)
assert.NoError(err)
runTime()
switchDir()
}
// TestDdevStopMissingDirectory tests that the 'ddev stop' command works properly on sites with missing directories or ddev configs.
func TestDdevStopMissingDirectory(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping because unreliable on Windows")
}
assert := asrt.New(t)
site := TestSites[0]
testcommon.ClearDockerEnv()
app := &ddevapp.DdevApp{}
err := app.Init(site.Dir)
assert.NoError(err)
startErr := app.StartAndWait(0)
//nolint: errcheck
defer app.Stop(true, false)
if startErr != nil {
logs, err := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(err)
t.Fatalf("app.StartAndWait failed err=%v logs from broken container: \n=======\n%s\n========\n", startErr, logs)
}
tempPath := testcommon.CreateTmpDir("site-copy")
siteCopyDest := filepath.Join(tempPath, "site")
defer removeAllErrCheck(tempPath, assert)
_ = app.Stop(false, false)
// Move the site directory to a temp location to mimic a missing directory.
err = os.Rename(site.Dir, siteCopyDest)
assert.NoError(err)
//nolint: errcheck
defer os.Rename(siteCopyDest, site.Dir)
// ddev stop (in cmd) actually does the check for missing project files,
// so we imitate that here.
err = ddevapp.CheckForMissingProjectFiles(app)
assert.Error(err)
if err != nil {
assert.Contains(err.Error(), "If you would like to continue using ddev to manage this project please restore your files to that directory.")
}
}
// TestDdevDescribe tests that the describe command works properly on a running
// and also a stopped project.
func TestDdevDescribe(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
site := TestSites[0]
switchDir := site.Chdir()
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
app.Hooks = map[string][]ddevapp.YAMLTask{"post-describe": {{"exec-host": "touch hello-post-describe-" + app.Name}}, "pre-describe": {{"exec-host": "touch hello-pre-describe-" + app.Name}}}
startErr := app.StartAndWait(0)
defer func() {
_ = app.Stop(true, false)
app.Hooks = nil
_ = app.WriteConfig()
}()
// If we have a problem starting, get the container logs and output.
if startErr != nil {
out, logsErr := app.CaptureLogs("web", false, "")
assert.NoError(logsErr)
healthcheck, inspectErr := exec.RunCommandPipe("sh", []string{"-c", fmt.Sprintf("docker inspect ddev-%s-web|jq -r '.[0].State.Health.Log[-1]'", app.Name)})
assert.NoError(inspectErr)
t.Fatalf("app.StartAndWait(%s) failed: %v, \nweb container healthcheck='%s', \n=== web container logs=\n%s\n=== END web container logs ===", site.Name, err, healthcheck, out)
}
desc, err := app.Describe(false)
assert.NoError(err)
assert.EqualValues(ddevapp.SiteRunning, desc["status"], "")
assert.EqualValues(app.GetName(), desc["name"])
assert.EqualValues(ddevapp.RenderHomeRootedDir(app.GetAppRoot()), desc["shortroot"])
assert.EqualValues(app.GetAppRoot(), desc["approot"])
assert.EqualValues(app.GetPhpVersion(), desc["php_version"])
assert.FileExists("hello-pre-describe-" + app.Name)
assert.FileExists("hello-post-describe-" + app.Name)
err = os.Remove("hello-pre-describe-" + app.Name)
assert.NoError(err)
err = os.Remove("hello-post-describe-" + app.Name)
assert.NoError(err)
// Now stop it and test behavior.
err = app.Pause()
assert.NoError(err)
desc, err = app.Describe(false)
assert.NoError(err)
assert.EqualValues(ddevapp.SitePaused, desc["status"])
switchDir()
}
// TestDdevDescribeMissingDirectory tests that the describe command works properly on sites with missing directories or ddev configs.
func TestDdevDescribeMissingDirectory(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping because unreliable on Windows")
}
assert := asrt.New(t)
site := TestSites[0]
tempPath := testcommon.CreateTmpDir("site-copy")
siteCopyDest := filepath.Join(tempPath, "site")
defer removeAllErrCheck(tempPath, assert)
app := &ddevapp.DdevApp{}
err := app.Init(site.Dir)
assert.NoError(err)
startErr := app.StartAndWait(0)
//nolint: errcheck
defer app.Stop(true, false)
if startErr != nil {
logs, err := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(err)
t.Fatalf("app.StartAndWait failed err=%v logs from broken container: \n=======\n%s\n========\n", startErr, logs)
}
// Move the site directory to a temp location to mimic a missing directory.
err = app.Stop(false, false)
assert.NoError(err)
err = os.Rename(site.Dir, siteCopyDest)
assert.NoError(err)
desc, err := app.Describe(false)
assert.NoError(err)
assert.Contains(desc["status"], ddevapp.SiteDirMissing, "Status did not include the phrase '%s' when describing a site with missing directories.", ddevapp.SiteDirMissing)
// Move the site directory back to its original location.
err = os.Rename(siteCopyDest, site.Dir)
assert.NoError(err)
}
// TestRouterPortsCheck makes sure that we can detect if the ports are available before starting the router.
func TestRouterPortsCheck(t *testing.T) {
assert := asrt.New(t)
// First, stop any sites that might be running
app := &ddevapp.DdevApp{}
// Stop/Remove all sites, which should get the router out of there.
for _, site := range TestSites {
switchDir := site.Chdir()
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
if app.SiteStatus() == ddevapp.SiteRunning || app.SiteStatus() == ddevapp.SitePaused {
err = app.Stop(true, false)
assert.NoError(err)
}
switchDir()
}
// Now start one site, it's hard to get router to behave without one site.
site := TestSites[0]
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
startErr := app.StartAndWait(5)
//nolint: errcheck
defer app.Stop(true, false)
if startErr != nil {
appLogs, getLogsErr := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(getLogsErr)
t.Fatalf("app.StartAndWait() failure; err=%v logs:\n=====\n%s\n=====\n", startErr, appLogs)
}
app, err = ddevapp.GetActiveApp(site.Name)
if err != nil {
t.Fatalf("Failed to GetActiveApp(%s), err:%v", site.Name, err)
}
startErr = app.StartAndWait(5)
//nolint: errcheck
defer app.Stop(true, false)
if startErr != nil {
appLogs, getLogsErr := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(getLogsErr)
t.Fatalf("app.StartAndWait() failure err=%v logs:\n=====\n%s\n=====\n", startErr, appLogs)
}
// Stop the router using code from StopRouterIfNoContainers().
// StopRouterIfNoContainers can't be used here because it checks to see if containers are running
// and doesn't do its job as a result.
dest := ddevapp.RouterComposeYAMLPath()
_, _, err = dockerutil.ComposeCmd([]string{dest}, "-p", ddevapp.RouterProjectName, "down")
assert.NoError(err, "Failed to stop router using docker-compose, err=%v", err)
// Occupy port 80 using docker busybox trick, then see if we can start router.
// This is done with docker so that we don't have to use explicit sudo
containerID, err := exec.RunCommand("sh", []string{"-c", "docker run -d -p80:80 --rm busybox:latest sleep 100 2>/dev/null"})
if err != nil {
t.Fatalf("Failed to run docker command to occupy port 80, err=%v output=%v", err, containerID)
}
containerID = strings.TrimSpace(containerID)
// Now try to start the router. It should fail because the port is occupied.
err = ddevapp.StartDdevRouter()
assert.Error(err, "Failure: router started even though port 80 was occupied")
// Remove our dummy busybox docker container.
out, err := exec.RunCommand("docker", []string{"rm", "-f", containerID})
assert.NoError(err, "Failed to docker rm the port-occupier container, err=%v output=%v", err, out)
}
// TestCleanupWithoutCompose ensures app containers can be properly cleaned up without a docker-compose config file present.
func TestCleanupWithoutCompose(t *testing.T) {
assert := asrt.New(t)
// Skip test because we can't rename folders while they're in use if running on Windows.
if runtime.GOOS == "windows" {
t.Skip("Skipping test TestCleanupWithoutCompose on Windows")
}
site := TestSites[0]
revertDir := site.Chdir()
app := &ddevapp.DdevApp{}
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
// Ensure we have a site started so we have something to cleanup
startErr := app.StartAndWait(5)
//nolint: errcheck
defer app.Stop(true, false)
if startErr != nil {
appLogs, getLogsErr := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(getLogsErr)
t.Fatalf("app.StartAndWait failure; err=%v, logs:\n=====\n%s\n=====\n", startErr, appLogs)
}
// Setup by creating temp directory and nesting a folder for our site.
tempPath := testcommon.CreateTmpDir("site-copy")
siteCopyDest := filepath.Join(tempPath, "site")
//nolint: errcheck
defer os.RemoveAll(tempPath)
//nolint: errcheck
defer revertDir()
// Move the site directory back to its original location.
//nolint: errcheck
defer os.Rename(siteCopyDest, site.Dir)
// Move site directory to a temp directory to mimick a missing directory.
err = os.Rename(site.Dir, siteCopyDest)
assert.NoError(err)
// Call the Stop command()
// Notice that we set the removeData parameter to true.
// This gives us added test coverage over sites with missing directories
// by ensuring any associated database files get cleaned up as well.
err = app.Stop(true, false)
assert.NoError(err)
assert.Empty(globalconfig.DdevGlobalConfig.ProjectList[app.Name])
for _, containerType := range [3]string{"web", "db", "dba"} {
_, err := constructContainerName(containerType, app)
assert.Error(err)
}
// Ensure there are no volumes associated with this project
client := dockerutil.GetDockerClient()
volumes, err := client.ListVolumes(docker.ListVolumesOptions{})
assert.NoError(err)
for _, volume := range volumes {
assert.False(volume.Labels["com.docker.compose.project"] == "ddev"+strings.ToLower(app.GetName()))
}
}
// TestGetappsEmpty ensures that GetActiveProjects returns an empty list when no applications are running.
func TestGetAppsEmpty(t *testing.T) {
assert := asrt.New(t)
// Ensure test sites are removed
for _, site := range TestSites {
app := &ddevapp.DdevApp{}
switchDir := site.Chdir()
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
if app.SiteStatus() != ddevapp.SiteStopped {
err = app.Stop(true, false)
assert.NoError(err)
}
switchDir()
}
apps := ddevapp.GetActiveProjects()
assert.Equal(0, len(apps), "Expected to find no apps but found %d apps=%v", len(apps), apps)
}
// TestRouterNotRunning ensures the router is shut down after all sites are stopped.
// This depends on TestGetAppsEmpty() having shut everything down.
func TestRouterNotRunning(t *testing.T) {
assert := asrt.New(t)
containers, err := dockerutil.GetDockerContainers(false)
assert.NoError(err)
for _, container := range containers {
assert.NotEqual("ddev-router", dockerutil.ContainerName(container), "ddev-router was not supposed to be running but it was")
}
}
// TestListWithoutDir prevents regression where ddev list panics if one of the
// sites found is missing a directory
func TestListWithoutDir(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping because unreliable on Windows")
}
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
testcommon.ClearDockerEnv()
packageDir, _ := os.Getwd()
// startCount is the count of apps at the start of this adventure
apps := ddevapp.GetActiveProjects()
startCount := len(apps)
testDir := testcommon.CreateTmpDir("TestStartWithoutDdevConfig")
defer testcommon.CleanupDir(testDir)
err := os.MkdirAll(testDir+"/sites/default", 0777)
assert.NoError(err)
err = os.Chdir(testDir)
assert.NoError(err)
app, err := ddevapp.NewApp(testDir, true, nodeps.ProviderDefault)
assert.NoError(err)
app.Name = "junk"
app.Type = nodeps.AppTypeDrupal7
err = app.WriteConfig()
assert.NoError(err)
// Do a start on the configured site.
app, err = ddevapp.GetActiveApp("")
assert.NoError(err)
err = app.Start()
assert.NoError(err)
// Make sure we move out of the directory for Windows' sake
garbageDir := testcommon.CreateTmpDir("RestingHere")
defer testcommon.CleanupDir(garbageDir)
err = os.Chdir(garbageDir)
assert.NoError(err)
testcommon.CleanupDir(testDir)
apps = ddevapp.GetActiveProjects()
assert.EqualValues(len(apps), startCount+1)
// Make a whole table and make sure our app directory missing shows up.
// This could be done otherwise, but we'd have to go find the site in the
// array first.
table := ddevapp.CreateAppTable()
for _, site := range apps {
desc, err := site.Describe(false)
if err != nil {
t.Fatalf("Failed to describe site %s: %v", site.GetName(), err)
}
ddevapp.RenderAppRow(table, desc)
}
// testDir on Windows has backslashes in it, resulting in invalid regexp
// Remove them and use ., which is good enough.
testDirSafe := strings.Replace(testDir, "\\", ".", -1)
assert.Regexp(regexp.MustCompile("(?s)"+ddevapp.SiteDirMissing+".*"+testDirSafe), table.String())
err = app.Stop(true, false)
assert.NoError(err)
// Change back to package dir. Lots of things will have to be cleaned up
// in defers, and for windows we have to not be sitting in them.
err = os.Chdir(packageDir)
assert.NoError(err)
}
type URLRedirectExpectations struct {
scheme string
uri string
expectedRedirectURI string
}
// TestAppdirAlreadyInUse tests trying to start a project in an already-used
// directory will fail
func TestAppdirAlreadyInUse(t *testing.T) {
assert := asrt.New(t)
originalProjectName := "originalproject"
secondProjectName := "secondproject"
// Create a temporary directory and switch to it.
tmpdir := testcommon.CreateTmpDir(t.Name())
app, err := ddevapp.NewApp(tmpdir, false, "")
require.NoError(t, err)
defer func() {
app.Name = originalProjectName
_ = app.Stop(true, false)
app.Name = secondProjectName
_ = app.Stop(true, false)
testcommon.Chdir(tmpdir)()
testcommon.CleanupDir(tmpdir)
}()
// Write/create global with the name "originalproject"
app.Name = originalProjectName
require.NoError(t, err)
err = app.Start()
require.NoError(t, err)
// Now change the project namename and look for the complaint
app.Name = secondProjectName
err = app.Start()
assert.Error(err)
assert.Contains(err.Error(), "already contains a project named "+originalProjectName)
err = app.Stop(true, false)
assert.NoError(err)
// Change back to original name
app.Name = originalProjectName
assert.NoError(err)
err = app.Start()
assert.NoError(err)
// Now stop and make sure the behavior is same with everything stopped
err = app.Stop(false, false)
assert.NoError(err)
// Now change the project name again and look for the error
app.Name = secondProjectName
err = app.Start()
assert.Error(err)
assert.Contains(err.Error(), "already contains a project named "+originalProjectName)
}
// TestHttpsRedirection tests to make sure that webserver and php redirect to correct
// scheme (http or https).
func TestHttpsRedirection(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
testcommon.ClearDockerEnv()
packageDir, _ := os.Getwd()
testDir := testcommon.CreateTmpDir(t.Name())
defer testcommon.CleanupDir(testDir)
appDir := filepath.Join(testDir, t.Name())
err := fileutil.CopyDir(filepath.Join(packageDir, "testdata", t.Name()), appDir)
assert.NoError(err)
err = os.Chdir(appDir)
assert.NoError(err)
app, err := ddevapp.NewApp(appDir, true, nodeps.ProviderDefault)
assert.NoError(err)
_ = app.Stop(true, false)
//nolint: errcheck
defer app.Stop(true, false)
expectations := []URLRedirectExpectations{
{"https", "/subdir", "/subdir/"},
{"https", "/redir_abs.php", "/landed.php"},
{"https", "/redir_relative.php", "/landed.php"},
{"http", "/subdir", "/subdir/"},
{"http", "/redir_abs.php", "/landed.php"},
{"http", "/redir_relative.php", "/landed.php"},
}
types := ddevapp.GetValidAppTypes()
webserverTypes := []string{nodeps.WebserverNginxFPM, nodeps.WebserverApacheFPM}
if os.Getenv("GOTEST_SHORT") != "" {
types = []string{nodeps.AppTypePHP, nodeps.AppTypeDrupal8}
webserverTypes = []string{nodeps.WebserverNginxFPM, nodeps.WebserverApacheFPM}
}
for _, projectType := range types {
// TODO: Fix the laravel config so it can do the redir_abs.php successfully on nginx-fpm
if projectType == nodeps.AppTypeLaravel {
t.Log("Skipping laravel because it can't pass absolute redirect test, fix config")
continue
}
for _, webserverType := range webserverTypes {
app.WebserverType = webserverType
app.Type = projectType
err = app.WriteConfig()
assert.NoError(err)
// Do a start on the configured site.
app, err = ddevapp.GetActiveApp("")
assert.NoError(err)
startErr := app.Start()
assert.NoError(startErr, "app.Start() failed with projectType=%s, webserverType=%s", projectType, webserverType)
if startErr != nil {
appLogs, getLogsErr := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(getLogsErr)
t.Fatalf("app.StartAndWait failure; err=%v \n===== container logs ===\n%s\n", startErr, appLogs)
}
// Test for directory redirects under https and http
for _, parts := range expectations {
reqURL := parts.scheme + "://" + strings.ToLower(app.GetHostname()) + parts.uri
//t.Logf("TestHttpsRedirection trying URL %s with webserver_type=%s", reqURL, webserverType)
out, resp, err := testcommon.GetLocalHTTPResponse(t, reqURL)
assert.NotNil(resp, "resp was nil for projectType=%s webserver_type=%s url=%s, err=%v, out='%s'", projectType, webserverType, reqURL, err, out)
if resp != nil {
locHeader := resp.Header.Get("Location")
expectedRedirect := parts.expectedRedirectURI
// However, if we're hitting redir_abs.php (or apache hitting directory), the redirect will be the whole url.
if strings.Contains(parts.uri, "redir_abs.php") || webserverType != nodeps.WebserverNginxFPM {
expectedRedirect = parts.scheme + "://" + strings.ToLower(app.GetHostname()) + parts.expectedRedirectURI
}
// Except the php relative redirect is always relative.
if strings.Contains(parts.uri, "redir_relative.php") {
expectedRedirect = parts.expectedRedirectURI
}
assert.EqualValues(locHeader, expectedRedirect, "For project type=%s webserver_type=%s url=%s expected redirect %s != actual %s", projectType, webserverType, reqURL, expectedRedirect, locHeader)
}
}
}
}
// Change back to package dir. Lots of things will have to be cleaned up
// in defers, and for windows we have to not be sitting in them.
err = os.Chdir(packageDir)
assert.NoError(err)
}
// TestMultipleComposeFiles checks to see if a set of docker-compose files gets
// properly loaded in the right order, with .ddev/.ddev-docker-compose*yaml first and
// with docker-compose.override.yaml last.
func TestMultipleComposeFiles(t *testing.T) {
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
pwd, _ := os.Getwd()
testDir := testcommon.CreateTmpDir(t.Name())
//_ = os.Chdir(testDir)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
err := fileutil.CopyDir(filepath.Join(pwd, "testdata", t.Name(), ".ddev"), filepath.Join(testDir, ".ddev"))
assert.NoError(err)
// Make sure that valid yaml files get properly loaded in the proper order
app, err := ddevapp.NewApp(testDir, true, "")
assert.NoError(err)
//nolint: errcheck
defer app.Stop(true, false)
err = app.WriteConfig()
assert.NoError(err)
_, err = app.ReadConfig(true)
require.NoError(t, err)
err = app.WriteDockerComposeYAML()
require.NoError(t, err)
app, err = ddevapp.NewApp(testDir, true, "")
assert.NoError(err)
//nolint: errcheck
defer app.Stop(true, false)
desc, err := app.Describe(false)
assert.NoError(err)
_ = desc
files, err := app.ComposeFiles()
assert.NoError(err)
require.NotEmpty(t, files)
assert.Equal(4, len(files))
require.Equal(t, app.GetConfigPath(".ddev-docker-compose-base.yaml"), files[0])
require.Equal(t, app.GetConfigPath("docker-compose.override.yaml"), files[len(files)-1])
require.NotEmpty(t, app.ComposeYaml)
require.True(t, len(app.ComposeYaml) > 0)
// Verify that the env var DUMMY_BASE got set by docker-compose.override.yaml
if services, ok := app.ComposeYaml["services"].(map[interface{}]interface{}); ok {
if w, ok := services["web"].(map[interface{}]interface{}); ok {
if env, ok := w["environment"].(map[interface{}]interface{}); ok {
// The docker-compose.override should have won with the value of DUMMY_BASE
assert.Equal("override", env["DUMMY_BASE"])
// But each of the DUMMY_COMPOSE_ONE/TWO/OVERRIDE which are unique
// should come through fine.
assert.Equal("1", env["DUMMY_COMPOSE_ONE"])
assert.Equal("2", env["DUMMY_COMPOSE_TWO"])
assert.Equal("override", env["DUMMY_COMPOSE_OVERRIDE"])
} else {
t.Error("Failed to parse environment")
}
} else {
t.Error("failed to parse web service")
}
} else {
t.Error("Unable to access ComposeYaml[services]")
}
_, err = app.ComposeFiles()
assert.NoError(err)
}
// TestGetAllURLs ensures the GetAllURLs function returns the expected number of URLs,
// and include the direct web container URLs.
func TestGetAllURLs(t *testing.T) {
assert := asrt.New(t)
site := TestSites[0]
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s GetAllURLs", site.Name))
testcommon.ClearDockerEnv()
app := new(ddevapp.DdevApp)
err := app.Init(site.Dir)
assert.NoError(err)
// Add some additional hostnames
app.AdditionalHostnames = []string{"sub1", "sub2", "sub3"}
err = app.WriteConfig()
assert.NoError(err)
err = app.StartAndWait(0)
require.NoError(t, err)
_, _, urls := app.GetAllURLs()
// Convert URLs to map[string]bool
urlMap := make(map[string]bool)
for _, u := range urls {
urlMap[u] = true
}
// We expect two URLs for each hostname (http/https) and two direct web container address.
expectedNumUrls := len(app.GetHostnames())*2 + 2
assert.Equal(len(urlMap), expectedNumUrls, "Unexpected number of URLs returned: %d", len(urlMap))
// Ensure urlMap contains direct address of the web container
webContainer, err := app.FindContainerByType("web")
assert.NoError(err)
require.NotEmpty(t, webContainer)
expectedDirectAddress := app.GetWebContainerDirectHTTPSURL()
if globalconfig.GetCAROOT() == "" {
expectedDirectAddress = app.GetWebContainerDirectHTTPURL()
}
exists := urlMap[expectedDirectAddress]
assert.True(exists, "URL list for app: %s does not contain direct web container address: %s", app.Name, expectedDirectAddress)
// Multiple projects can't run at the same time with the fqdns, so we need to clean
// up these for tests that run later.
app.AdditionalFQDNs = []string{}
app.AdditionalHostnames = []string{}
err = app.WriteConfig()
assert.NoError(err)
err = app.Stop(true, false)
assert.NoError(err)
runTime()
}
// TestWebserverType checks that webserver_type:apache-fpm does the right thing
func TestWebserverType(t *testing.T) {
assert := asrt.New(t)
for _, site := range TestSites {
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s TestWebserverType", site.Name))
app := new(ddevapp.DdevApp)
err := app.Init(site.Dir)
assert.NoError(err)
// Copy our phpinfo into the docroot of testsite.
pwd, err := os.Getwd()
assert.NoError(err)
err = fileutil.CopyFile(filepath.Join(pwd, "testdata", "servertype.php"), filepath.Join(app.AppRoot, app.Docroot, "servertype.php"))
assert.NoError(err)
for _, app.WebserverType = range []string{nodeps.WebserverApacheFPM, nodeps.WebserverNginxFPM} {
err = app.WriteConfig()
assert.NoError(err)
testcommon.ClearDockerEnv()
startErr := app.StartAndWait(30)
//nolint: errcheck
defer app.Stop(true, false)
if startErr != nil {
appLogs, getLogsErr := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(getLogsErr)
t.Fatalf("app.StartAndWait failure for WebserverType=%s; site.Name=%s; err=%v, logs:\n=====\n%s\n=====\n", app.WebserverType, site.Name, startErr, appLogs)
}
out, resp, err := testcommon.GetLocalHTTPResponse(t, app.GetWebContainerDirectHTTPURL()+"/servertype.php")
require.NoError(t, err)
expectedServerType := "Apache/2"
if app.WebserverType == nodeps.WebserverNginxFPM {
expectedServerType = "nginx"
}
require.NotEmpty(t, resp.Header["Server"])
require.NotEmpty(t, resp.Header["Server"][0])
assert.Contains(resp.Header["Server"][0], expectedServerType, "Server header for project=%s, app.WebserverType=%s should be %s", app.Name, app.WebserverType, expectedServerType)
assert.Contains(out, expectedServerType, "For app.WebserverType=%s phpinfo expected servertype.php to show %s", app.WebserverType, expectedServerType)
err = app.Stop(true, false)
assert.NoError(err)
}
// Set the apptype back to whatever the default was so we don't break any following tests.
testVar := os.Getenv("DDEV_TEST_WEBSERVER_TYPE")
if testVar != "" {
app.WebserverType = testVar
err = app.WriteConfig()
assert.NoError(err)
}
runTime()
}
}
// TestInternalAndExternalAccessToURL checks we can access content
// from host and from inside container by URL (with port)
func TestInternalAndExternalAccessToURL(t *testing.T) {
assert := asrt.New(t)
runTime := util.TimeTrack(time.Now(), t.Name())
site := TestSites[0]
app := new(ddevapp.DdevApp)
err := app.Init(site.Dir)
assert.NoError(err)
// Add some additional hostnames
app.AdditionalHostnames = []string{"sub1", "sub2", "sub3"}
app.AdditionalFQDNs = []string{"junker99.example.com"}
for _, pair := range []testcommon.PortPair{{HTTPPort: "80", HTTPSPort: "443"}, {HTTPPort: "8080", HTTPSPort: "8443"}} {
testcommon.ClearDockerEnv()
app.RouterHTTPPort = pair.HTTPPort
app.RouterHTTPSPort = pair.HTTPSPort
err = app.WriteConfig()
assert.NoError(err)
// Make sure that project is absolutely not running
err = app.Stop(true, false)
assert.NoError(err)
err = app.StartAndWait(5)
assert.NoError(err)
_, _, urls := app.GetAllURLs()
// Convert URLs to map[string]bool
urlMap := make(map[string]bool)
for _, u := range urls {
urlMap[u] = true
}
// We expect two URLs for each hostname (http/https) and two direct web container addresses.
expectedNumUrls := len(app.GetHostnames())*2 + 2
assert.Equal(len(urlMap), expectedNumUrls, "Unexpected number of URLs returned: %d", len(urlMap))
_, _, URLList := app.GetAllURLs()
URLList = append(URLList, "http://localhost", "http://localhost")
for _, item := range URLList {
// Make sure internal (web container) access is successful
parts, err := url.Parse(item)
assert.NoError(err)
// Only try it if not an IP address URL; those won't be right
hostParts := strings.Split(parts.Host, ".")
// Make sure access from host is successful
// But "localhost" is only for inside container.
if parts.Host != "localhost" {
_, _ = testcommon.EnsureLocalHTTPContent(t, item+site.Safe200URIWithExpectation.URI, site.Safe200URIWithExpectation.Expect)
}
if _, err := strconv.ParseInt(hostParts[0], 10, 64); err != nil {
out, _, err := app.Exec(&ddevapp.ExecOpts{
Service: "web",
Cmd: "curl -sS --fail " + item + site.Safe200URIWithExpectation.URI,
})
assert.NoError(err, "failed curl to %s: %v", item+site.Safe200URIWithExpectation.URI, err)
assert.Contains(out, site.Safe200URIWithExpectation.Expect)
}
}
}
out, err := exec.RunCommand(DdevBin, []string{"list"})
assert.NoError(err)
t.Logf("\n=========== output of ddev list ==========\n%s\n============\n", out)
out, err = exec.RunCommand("docker", []string{"logs", "ddev-router"})
assert.NoError(err)
t.Logf("\n=========== output of docker logs ddev-router ==========\n%s\n============\n", out)
// Set the ports back to the default was so we don't break any following tests.
app.RouterHTTPSPort = "443"
app.RouterHTTPPort = "80"
app.AdditionalFQDNs = []string{}
app.AdditionalHostnames = []string{}
err = app.WriteConfig()
assert.NoError(err)
err = app.Stop(true, false)
assert.NoError(err)
runTime()
}
// TestCaptureLogs checks that app.CaptureLogs() works
func TestCaptureLogs(t *testing.T) {
assert := asrt.New(t)
site := TestSites[0]
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s CaptureLogs", site.Name))
app := ddevapp.DdevApp{}
err := app.Init(site.Dir)
assert.NoError(err)
err = app.Start()
assert.NoError(err)
logs, err := app.CaptureLogs("web", false, "100")
assert.NoError(err)
assert.Contains(logs, "INFO spawned")
err = app.Stop(true, false)
assert.NoError(err)
runTime()
}
// TestNFSMount tests ddev start functionality with nfs_mount_enabled: true
// This requires that the test machine must have NFS shares working
// Tests using both app-specific nfs_mount_enabled and global nfs_mount_enabled
func TestNFSMount(t *testing.T) {
assert := asrt.New(t)
app := &ddevapp.DdevApp{}
// Make sure this leaves us in the original test directory
testDir, _ := os.Getwd()
//nolint: errcheck
defer os.Chdir(testDir)
site := TestSites[0]
switchDir := site.Chdir()
runTime := util.TimeTrack(time.Now(), fmt.Sprintf("%s %s", site.Name, t.Name()))
err := app.Init(site.Dir)
assert.NoError(err)
defer func() {
globalconfig.DdevGlobalConfig.NFSMountEnabledGlobal = false
_ = globalconfig.WriteGlobalConfig(globalconfig.DdevGlobalConfig)
app.NFSMountEnabled = false
_ = app.WriteConfig()
_ = app.Stop(true, false)
}()
t.Log("testing with global NFSMountEnabled")
globalconfig.DdevGlobalConfig.NFSMountEnabledGlobal = true
err = globalconfig.WriteGlobalConfig(globalconfig.DdevGlobalConfig)
assert.NoError(err)
// Run NewApp so that it picks up the global config, as it would in real life
app, err = ddevapp.NewApp(site.Dir, false, "")
assert.NoError(err)
verifyNFSMount(t, app)
t.Log("testing with app NFSMountEnabled")
globalconfig.DdevGlobalConfig.NFSMountEnabledGlobal = false
err = globalconfig.WriteGlobalConfig(globalconfig.DdevGlobalConfig)
assert.NoError(err)
// Run NewApp so that it picks up the global config, as it would in real life
app, err = ddevapp.NewApp(site.Dir, false, "")
assert.NoError(err)
app.NFSMountEnabled = true
verifyNFSMount(t, app)
runTime()
switchDir()
}
func verifyNFSMount(t *testing.T, app *ddevapp.DdevApp) {
assert := asrt.New(t)
err := app.Stop(true, false)
assert.NoError(err)
err = app.Start()
//nolint: errcheck
defer app.Stop(true, false)
require.NoError(t, err)
stdout, _, err := app.Exec(&ddevapp.ExecOpts{
Service: "web",
Dir: "/var/www/html",
Cmd: "findmnt -T .",
})
assert.NoError(err)
source := app.AppRoot
if runtime.GOOS == "darwin" && fileutil.IsDirectory(filepath.Join("/System/Volumes/Data", app.AppRoot)) {
source = filepath.Join("/System/Volumes/Data", app.AppRoot)
}
assert.Contains(stdout, ":"+dockerutil.MassageWindowsNFSMount(source))
// Create a host-side dir symlink; give a second for it to sync, make sure it can be used in container.
err = os.Symlink(".ddev", "nfslinked_.ddev")
assert.NoError(err)
// nolint: errcheck
defer os.Remove("nfslinked_.ddev")
time.Sleep(2 * time.Second)
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Dir: "/var/www/html",
Cmd: "ls nfslinked_.ddev/config.yaml",
})
assert.NoError(err)
// Create a host-side file symlink; give a second for it to sync, make sure it can be used in container.
err = os.Symlink(".ddev/config.yaml", "nfslinked_config.yaml")
assert.NoError(err)
// nolint: errcheck
defer os.Remove("nfslinked_config.yaml")
time.Sleep(2 * time.Second)
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Dir: "/var/www/html",
Cmd: "ls nfslinked_config.yaml",
})
assert.NoError(err)
// Create a container-side dir symlink; give a second for it to sync, make sure it can be used on host.
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Dir: "/var/www/html",
Cmd: "ln -s .ddev nfscontainerlinked_ddev",
})
assert.NoError(err)
// nolint: errcheck
defer os.Remove("nfscontainerlinked_ddev")
time.Sleep(2 * time.Second)
assert.FileExists("nfscontainerlinked_ddev/config.yaml")
// Create a container-side file symlink; give a second for it to sync, make sure it can be used on host.
_, _, err = app.Exec(&ddevapp.ExecOpts{
Service: "web",
Dir: "/var/www/html",
Cmd: "ln -s .ddev/config.yaml nfscontainerlinked_config.yaml",
})
assert.NoError(err)
// nolint: errcheck
defer os.Remove("nfscontainerlinked_config.yaml")
time.Sleep(2 * time.Second)
assert.FileExists("nfscontainerlinked_config.yaml")
}
// TestHostDBPort tests to make sure that the host_db_port specification has the intended effect
func TestHostDBPort(t *testing.T) {
assert := asrt.New(t)
runTime := util.TimeTrack(time.Now(), t.Name())
defer runTime()
testDir, _ := os.Getwd()
site := TestSites[0]
switchDir := site.Chdir()
defer switchDir()
app, err := ddevapp.NewApp(site.Dir, false, "")
assert.NoError(err)
showportPath := app.GetConfigPath("commands/host/showport")
err = os.MkdirAll(filepath.Dir(showportPath), 0755)
assert.NoError(err)
err = fileutil.CopyFile(filepath.Join(testDir, "testdata", t.Name(), "showport"), showportPath)
assert.NoError(err)
defer func() {
_ = os.RemoveAll(showportPath)
_ = app.Stop(true, false)
}()
// Make sure that everything works with and without
// an explicitly specified hostDBPort
for _, hostDBPort := range []string{"", "9998"} {
app.HostDBPort = hostDBPort
err = app.Start()
require.NoError(t, err)
desc, err := app.Describe(false)
assert.NoError(err)
dockerIP, err := dockerutil.GetDockerIP()
assert.NoError(err)
dbinfo := desc["dbinfo"].(map[string]interface{})
dbPort := dbinfo["published_port"].(int)
dbPortStr := strconv.Itoa(dbPort)
if app.HostDBPort != "" {
assert.EqualValues(app.HostDBPort, dbPortStr)
}
if !util.IsCommandAvailable("mysql") {
t.Log("Skipping mysql check because mysql tool not available")
} else {
// Running mysql against the container ensures that we can get there via the values
// in ddev describe
out, err := exec.RunCommand("mysql", []string{"--user=db", "--password=db", "--host=" + dockerIP, fmt.Sprintf("--port=%d", dbPort), "--database=db", `--execute=SELECT 1;`})
assert.NoError(err, "Failed to run mysql: %v", out)
out = strings.Replace(out, "\r", "", -1)
assert.Contains(out, "1\n1\n")
}
// Running the test host custom command "showport" ensures that the DDEV_HOST_DB_PORT
// is getting in there available to host custom commands.
_, _ = exec.RunCommand(DdevBin, []string{})
out, err := exec.RunCommand(DdevBin, []string{"showport"})
assert.NoError(err)
assert.EqualValues("DDEV_HOST_DB_PORT="+dbPortStr, strings.Trim(out, "\n"))
}
}
// TestPortSpecifications tests to make sure that one project can't step on the
// ports used by another
func TestPortSpecifications(t *testing.T) {
assert := asrt.New(t)
runTime := util.TimeTrack(time.Now(), fmt.Sprint("TestPortSpecifications"))
defer runTime()
testDir, _ := os.Getwd()
site0 := TestSites[0]
switchDir := site0.Chdir()
defer switchDir()
nospecApp := ddevapp.DdevApp{}
err := nospecApp.Init(site0.Dir)
assert.NoError(err)
err = nospecApp.WriteConfig()
require.NoError(t, err)
// Since host ports were not explicitly set in nospecApp, they shouldn't be in globalconfig.
require.Empty(t, globalconfig.DdevGlobalConfig.ProjectList[nospecApp.Name].UsedHostPorts)
err = nospecApp.Start()
assert.NoError(err)
//nolint: errcheck
defer nospecApp.Stop(true, false)
// Now that we have a working nospecApp with unspecified ephemeral ports, test that we
// can't use those ports while nospecApp is running
_ = os.Chdir(testDir)
ddevDir, _ := filepath.Abs("./testdata/TestPortSpecifications/.ddev")
specAppPath := testcommon.CreateTmpDir(t.Name() + "_specapp")
err = fileutil.CopyDir(ddevDir, filepath.Join(specAppPath, ".ddev"))
require.NoError(t, err, "could not copy to specAppPath %v", specAppPath)
specAPP, err := ddevapp.NewApp(specAppPath, false, "")
assert.NoError(err)
t.Cleanup(func() {
_ = specAPP.Stop(true, false)
err = os.RemoveAll(specAppPath)
assert.NoError(err)
})
// It should be able to WriteConfig and Start with the configured host ports it came up with
err = specAPP.WriteConfig()
assert.NoError(err)
err = specAPP.Start()
assert.NoError(err)
//nolint: errcheck
err = specAPP.Stop(false, false)
require.NoError(t, err)
// Verify that DdevGlobalConfig got updated properly
require.NotEmpty(t, globalconfig.DdevGlobalConfig.ProjectList[specAPP.Name])
require.NotEmpty(t, globalconfig.DdevGlobalConfig.ProjectList[specAPP.Name].UsedHostPorts)
// However, if we change change the name to make it appear to be a
// different project, we should not be able to config or start
conflictApp, err := ddevapp.NewApp(specAppPath, false, "")
assert.NoError(err)
conflictApp.Name = "conflictapp"
t.Cleanup(func() {
_ = conflictApp.Stop(true, false)
})
err = conflictApp.WriteConfig()
assert.Error(err)
err = conflictApp.Start()
assert.Error(err, "Expected error starting conflictApp=%v", conflictApp)
// Now delete the specAPP and we should be able to use the conflictApp
err = specAPP.Stop(true, false)
assert.NoError(err)
assert.Empty(globalconfig.DdevGlobalConfig.ProjectList[specAPP.Name])
err = conflictApp.WriteConfig()
assert.NoError(err)
err = conflictApp.Start()
assert.NoError(err)
require.NotEmpty(t, globalconfig.DdevGlobalConfig.ProjectList[conflictApp.Name])
require.NotEmpty(t, globalconfig.DdevGlobalConfig.ProjectList[conflictApp.Name].UsedHostPorts)
}
// TestDdevGetProjects exercises GetProjects()
// It's only here for profiling at this point
func TestDdevGetProjects(t *testing.T) {
assert := asrt.New(t)
runTime := util.TimeTrack(time.Now(), fmt.Sprint(t.Name()))
defer runTime()
apps, err := ddevapp.GetProjects(false)
assert.NoError(err)
_ = apps
}
// TestCustomCerts makes sure that added custom certificates are respected and used
func TestCustomCerts(t *testing.T) {
assert := asrt.New(t)
// Force router stop - shouldn't be necessary
//dest := ddevapp.RouterComposeYAMLPath()
//_, _, err := dockerutil.ComposeCmd([]string{dest}, "-p", ddevapp.RouterProjectName, "down")
//assert.NoError(err)
site := TestSites[0]
switchDir := site.Chdir()
defer switchDir()
app, err := ddevapp.NewApp(site.Dir, false, "")
assert.NoError(err)
certDir := app.GetConfigPath("custom_certs")
err = os.MkdirAll(certDir, 0755)
assert.NoError(err)
t.Cleanup(func() {
_ = os.RemoveAll(certDir)
_, _, err = app.Exec(&ddevapp.ExecOpts{
Cmd: "rm /mnt/ddev-global-cache/custom_certs/" + app.GetHostname() + "*",
})
assert.NoError(err)
err = app.Stop(true, false)
assert.NoError(err)
})
// Start without cert and make sure normal DNS names are there
err = app.Start()
assert.NoError(err)
stdout, _, err := app.Exec(&ddevapp.ExecOpts{
Cmd: fmt.Sprintf("openssl s_client -connect %s:443 -servername %s </dev/null 2>/dev/null | openssl x509 -noout -text | perl -l -0777 -ne '@names=/\\bDNS:([^\\s,]+)/g; print join(\"\\n\", sort @names);'", app.GetHostname(), app.GetHostname()),
})
stdout = strings.Trim(stdout, "\n")
// This should be our regular wildcard cert
assert.Contains(stdout, "*.ddev.site")
// Now stop it so we can install new custom cert.
err = app.Stop(true, false)
assert.NoError(err)
// Create a certfile/key in .ddev/custom_certs with just one DNS name in it
// mkcert --cert-file d9composer.ddev.site.crt --key-file d9composer.ddev.site.key d9composer.ddev.site
out, err := exec.RunCommand("mkcert", []string{"--cert-file", filepath.Join(certDir, app.GetHostname()+".crt"), "--key-file", filepath.Join(certDir, app.GetHostname()+".key"), app.GetHostname()})
assert.NoError(err, "mkcert command failed, out=%s", out)
err = app.Start()
assert.NoError(err)
stdout, _, err = app.Exec(&ddevapp.ExecOpts{
Cmd: fmt.Sprintf("openssl s_client -connect %s:443 -servername %s </dev/null 2>/dev/null | openssl x509 -noout -text | perl -l -0777 -ne '@names=/\\bDNS:([^\\s,]+)/g; print join(\"\\n\", sort @names);'", app.GetHostname(), app.GetHostname()),
})
stdout = strings.Trim(stdout, "\n")
// If we had the regular cert, there would be several things here including *.ddev.site
// But e should only see the hostname listed.
assert.Equal(app.GetHostname(), stdout)
}
// TestDdevList tests the ddevapp.List() functionality
// It's only here for profiling at this point.
func TestDdevList(t *testing.T) {
ddevapp.List(true, false, 1)
}
// TestEnvironmentVariables tests to make sure that documented environment variables appear
// in the web container and on the host.
func TestEnvironmentVariables(t *testing.T) {
assert := asrt.New(t)
pwd, _ := os.Getwd()
customCmd := filepath.Join(pwd, "testdata", t.Name(), "showhostenvvar")
site := TestSites[0]
switchDir := site.Chdir()
defer switchDir()
app, err := ddevapp.NewApp(site.Dir, false, "")
assert.NoError(err)
customCmdDest := app.GetConfigPath("commands/host/" + "showhostenvvar")
err = os.MkdirAll(filepath.Dir(customCmdDest), 0755)
require.NoError(t, err)
err = fileutil.CopyFile(customCmd, customCmdDest)
require.NoError(t, err)
// This set of webContainerExpectations should be maintained to match the list in the docs
webContainerExpectations := map[string]string{
"DDEV_DOCROOT": app.GetDocroot(),
"DDEV_HOSTNAME": app.GetHostname(),
"DDEV_PHP_VERSION": app.PHPVersion,
"DDEV_PRIMARY_URL": app.GetPrimaryURL(),
"DDEV_PROJECT": app.Name,
"DDEV_PROJECT_TYPE": app.Type,
"DDEV_ROUTER_HTTP_PORT": app.RouterHTTPPort,
"DDEV_ROUTER_HTTPS_PORT": app.RouterHTTPSPort,
"DDEV_SITENAME": app.Name,
"DDEV_TLD": app.ProjectTLD,
"DDEV_WEBSERVER_TYPE": app.WebserverType,
}
err = app.Start()
require.NoError(t, err)
t.Cleanup(func() {
err = os.RemoveAll(customCmdDest)
assert.NoError(err)
err = app.Stop(true, false)
assert.NoError(err)
})
for k, v := range webContainerExpectations {
envVal, _, err := app.Exec(&ddevapp.ExecOpts{
Cmd: fmt.Sprintf("echo ${%s}", k),
})
assert.NoError(err)
envVal = strings.Trim(envVal, "\n")
assert.Equal(v, envVal)
}
dbPort, err := app.GetPublishedPort("db")
dbPortStr := strconv.Itoa(dbPort)
if dbPortStr == "-1" || err != nil {
dbPortStr = ""
}
if app.HostDBPort != "" {
dbPortStr = app.HostDBPort
}
// This set of hostExpections should bne maintained in parallel with documentation
hostExpectations := map[string]string{
"DDEV_APPROOT": app.AppRoot,
"DDEV_DOCROOT": app.GetDocroot(),
"DDEV_HOST_DB_PORT": dbPortStr,
"DDEV_HOST_HTTPS_PORT": app.HostHTTPSPort,
"DDEV_HOST_WEBSERVER_PORT": app.HostWebserverPort,
"DDEV_HOSTNAME": app.GetHostname(),
"DDEV_PHP_VERSION": app.PHPVersion,
"DDEV_PRIMARY_URL": app.GetPrimaryURL(),
"DDEV_PROJECT": app.Name,
"DDEV_PROJECT_TYPE": app.Type,
"DDEV_ROUTER_HTTP_PORT": app.RouterHTTPPort,
"DDEV_ROUTER_HTTPS_PORT": app.RouterHTTPSPort,
"DDEV_SITENAME": app.Name,
"DDEV_TLD": app.ProjectTLD,
"DDEV_WEBSERVER_TYPE": app.WebserverType,
}
for k, v := range hostExpectations {
envVal, err := exec.RunCommand(DdevBin, []string{"showhostenvvar", k})
assert.NoError(err, "could not run %s %s %s, result=%s", DdevBin, "showhostenvvar", k, envVal)
envVal = strings.Trim(envVal, "\n")
assert.Equal(v, envVal, "expected envvar $%s to equal '%s', but it was '%s'", k, v, envVal)
}
}
// constructContainerName builds a container name given the type (web/db/dba) and the app
func constructContainerName(containerType string, app *ddevapp.DdevApp) (string, error) {
container, err := app.FindContainerByType(containerType)
if err != nil {
return "", err
}
if container == nil {
return "", fmt.Errorf("No container exists for containerType=%s app=%v", containerType, app)
}
name := dockerutil.ContainerName(*container)
return name, nil
}
func removeAllErrCheck(path string, assert *asrt.Assertions) {
err := os.RemoveAll(path)
assert.NoError(err)
}
| 1 | 14,742 | Silly me. This shouldn't be done in the tests, but instead in the actual definition of ValidPHPVersions on arm64. Same with Mysql and MariaDB. | drud-ddev | go |
@@ -42,7 +42,8 @@ final class LatLonShapeBoundingBoxQuery extends ShapeQuery {
@Override
protected Relation relateRangeBBoxToQuery(int minXOffset, int minYOffset, byte[] minTriangle,
int maxXOffset, int maxYOffset, byte[] maxTriangle) {
- return rectangle2D.relateRangeBBox(minXOffset, minYOffset, minTriangle, maxXOffset, maxYOffset, maxTriangle);
+ return rectangle2D.relateRangeBBox(minXOffset, minYOffset, minTriangle, maxXOffset, maxYOffset, maxTriangle,
+ queryRelation == QueryRelation.INTERSECTS);
}
/** returns true if the query matches the encoded triangle */ | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.document;
import org.apache.lucene.document.ShapeField.QueryRelation;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.geo.Rectangle2D;
import org.apache.lucene.index.PointValues.Relation;
/**
* Finds all previously indexed geo shapes that intersect the specified bounding box.
*
* <p>The field must be indexed using
* {@link org.apache.lucene.document.LatLonShape#createIndexableFields} added per document.
*
* @lucene.experimental
**/
final class LatLonShapeBoundingBoxQuery extends ShapeQuery {
final Rectangle rectangle;
final Rectangle2D rectangle2D;
public LatLonShapeBoundingBoxQuery(String field, QueryRelation queryRelation, double minLat, double maxLat, double minLon, double maxLon) {
super(field, queryRelation);
this.rectangle = new Rectangle(minLat, maxLat, minLon, maxLon);
this.rectangle2D = Rectangle2D.create(this.rectangle);
}
@Override
protected Relation relateRangeBBoxToQuery(int minXOffset, int minYOffset, byte[] minTriangle,
int maxXOffset, int maxYOffset, byte[] maxTriangle) {
return rectangle2D.relateRangeBBox(minXOffset, minYOffset, minTriangle, maxXOffset, maxYOffset, maxTriangle);
}
/** returns true if the query matches the encoded triangle */
@Override
protected boolean queryMatches(byte[] t, int[] scratchTriangle, QueryRelation queryRelation) {
// decode indexed triangle
ShapeField.decodeTriangle(t, scratchTriangle);
int aY = scratchTriangle[0];
int aX = scratchTriangle[1];
int bY = scratchTriangle[2];
int bX = scratchTriangle[3];
int cY = scratchTriangle[4];
int cX = scratchTriangle[5];
if (queryRelation == QueryRelation.WITHIN) {
return rectangle2D.containsTriangle(aX, aY, bX, bY, cX, cY);
}
return rectangle2D.intersectsTriangle(aX, aY, bX, bY, cX, cY);
}
@Override
public boolean equals(Object o) {
return sameClassAs(o) && equalsTo(getClass().cast(o));
}
@Override
protected boolean equalsTo(Object o) {
return super.equalsTo(o) && rectangle.equals(((LatLonShapeBoundingBoxQuery)o).rectangle);
}
@Override
public int hashCode() {
int hash = super.hashCode();
hash = 31 * hash + rectangle.hashCode();
return hash;
}
@Override
public String toString(String field) {
final StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName());
sb.append(':');
if (this.field.equals(field) == false) {
sb.append(" field=");
sb.append(this.field);
sb.append(':');
}
sb.append(rectangle.toString());
return sb.toString();
}
}
| 1 | 30,188 | Shouldn't this work as well for Disjoint? | apache-lucene-solr | java |
@@ -514,11 +514,11 @@ func (c *controller) finalizeOrder(ctx context.Context, cl acmecl.Interface, o *
// if it is already in the 'valid' state, as upon retry we will
// then retrieve the Certificate resource.
_, errUpdate := c.updateOrderStatus(ctx, cl, o)
- if acmeErr, ok := err.(*acmeapi.Error); ok {
+ if acmeErr, ok := errUpdate.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to update Order status due to a 4xx error, marking Order as failed")
c.setOrderState(&o.Status, string(cmacme.Errored))
- o.Status.Reason = fmt.Sprintf("Failed to retrieve Order resource: %v", err)
+ o.Status.Reason = fmt.Sprintf("Failed to retrieve Order resource: %v", errUpdate)
return nil
}
} | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package acmeorders
import (
"bytes"
"context"
"crypto/x509"
"encoding/pem"
"fmt"
"time"
acmeapi "golang.org/x/crypto/acme"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"github.com/jetstack/cert-manager/pkg/acme"
acmecl "github.com/jetstack/cert-manager/pkg/acme/client"
cmacme "github.com/jetstack/cert-manager/pkg/apis/acme/v1"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
logf "github.com/jetstack/cert-manager/pkg/logs"
)
const (
reasonSolver = "Solver"
reasonCreated = "Created"
)
var (
// RequeuePeriod is the default period after which an Order should be re-queued.
// It can be overriden in tests.
RequeuePeriod time.Duration = time.Second * 5
)
func (c *controller) Sync(ctx context.Context, o *cmacme.Order) (err error) {
log := logf.FromContext(ctx)
dbg := log.V(logf.DebugLevel)
oldOrder := o
o = o.DeepCopy()
defer func() {
if apiequality.Semantic.DeepEqual(oldOrder.Status, o.Status) {
dbg.Info("skipping updating resource as new status == existing status")
return
}
log.V(logf.DebugLevel).Info("updating Order resource status")
_, updateErr := c.cmClient.AcmeV1().Orders(o.Namespace).UpdateStatus(ctx, o, metav1.UpdateOptions{})
if updateErr != nil {
log.Error(err, "failed to update status")
err = utilerrors.NewAggregate([]error{err, updateErr})
return
}
dbg.Info("updated Order resource status successfully")
}()
genericIssuer, err := c.helper.GetGenericIssuer(o.Spec.IssuerRef, o.Namespace)
if err != nil {
return fmt.Errorf("error reading (cluster)issuer %q: %v", o.Spec.IssuerRef.Name, err)
}
cl, err := c.accountRegistry.GetClient(string(genericIssuer.GetUID()))
if err != nil {
return err
}
switch {
case acme.IsFailureState(o.Status.State):
log.V(logf.DebugLevel).Info("Doing nothing as Order is in a failed state")
// if the Order is failed there's nothing left for us to do, return nil
return nil
case o.Status.URL == "":
log.V(logf.DebugLevel).Info("Creating new ACME order as status.url is not set")
return c.createOrder(ctx, cl, o)
case o.Status.FinalizeURL == "":
log.V(logf.DebugLevel).Info("Updating Order status as status.finalizeURL is not set")
_, err := c.updateOrderStatus(ctx, cl, o)
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to update Order status due to a 4xx error, marking Order as failed")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to retrieve Order resource: %v", err)
return nil
}
}
return err
case anyAuthorizationsMissingMetadata(o):
log.V(logf.DebugLevel).Info("Fetching Authorizations from ACME server as status.authorizations contains unpopulated authorizations")
return c.fetchMetadataForAuthorizations(ctx, o, cl)
case o.Status.State == cmacme.Valid && o.Status.Certificate == nil:
log.V(logf.DebugLevel).Info("Order is in a Valid state but the Certificate data is empty, fetching existing Certificate")
return c.fetchCertificateData(ctx, cl, o)
case o.Status.State == cmacme.Valid && len(o.Status.Certificate) > 0:
log.V(logf.DebugLevel).Info("Order has already been completed, cleaning up any owned Challenge resources")
// if the Order is valid and the certificate data has been set, clean
// up any owned Challenge resources and do nothing
return c.deleteAllChallenges(ctx, o)
}
dbg.Info("Computing list of Challenge resources that need to exist to complete this Order")
requiredChallenges, err := buildRequiredChallenges(ctx, cl, genericIssuer, o)
if err != nil {
log.Error(err, "Failed to determine the list of Challenge resources needed for the Order")
c.recorder.Eventf(o, corev1.EventTypeWarning, reasonSolver, "Failed to determine a valid solver configuration for the set of domains on the Order: %v", err)
return nil
}
dbg.Info("Determining if any challenge resources need to be created")
needToCreateChallenges, err := c.anyRequiredChallengesDoNotExist(requiredChallenges)
if err != nil {
return err
}
dbg.Info("Determining if any challenge resources need to be cleaned up")
needToDeleteChallenges, err := c.anyLeftoverChallengesExist(o, requiredChallenges)
if err != nil {
return err
}
switch {
case needToCreateChallenges:
log.V(logf.DebugLevel).Info("Creating additional Challenge resources to complete Order")
return c.createRequiredChallenges(ctx, o, requiredChallenges)
case needToDeleteChallenges:
log.V(logf.DebugLevel).Info("Deleting leftover Challenge resources no longer required by Order")
return c.deleteLeftoverChallenges(ctx, o, requiredChallenges)
}
// we know that this list only contains the 'required' challenges as we use
// the same lister above to determine whether we need to create or delete
// any Challenge resources
challenges, err := c.listOwnedChallenges(o)
if err != nil {
return err
}
acmeOrder, err := getACMEOrder(ctx, cl, o)
// Order probably has been deleted, we cannot recover here.
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to retrieve the ACME order (4xx error) marking Order as failed")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to retrieve Order resource: %v", err)
return nil
}
}
if err != nil {
return err
}
switch {
case o.Status.State == cmacme.Ready:
log.V(logf.DebugLevel).Info("Finalizing Order as order state is 'Ready'")
return c.finalizeOrder(ctx, cl, o, genericIssuer)
case anyChallengesFailed(challenges):
// TODO (@munnerz): instead of waiting for the ACME server to mark this
// Order as failed, we could just mark the Order as failed as there is
// no way that we will attempt and continue the order anyway.
log.V(logf.DebugLevel).Info("Update Order status as at least one Challenge has failed")
_, err := c.updateOrderStatus(ctx, cl, o)
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to update Order status due to a 4xx error, marking Order as failed")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to retrieve Order resource: %v", err)
return nil
}
}
return err
// anyChallengesFailed(challenges) == false is already implied by the above
// case, but explicitly check it in the following cases for if anything changes in future.
// This is to avoid stuck Orders in edge cases where all the Challenges have
// been finalized, but the ACME server has not yet updated the ACME Order's
// status to valid. This is not an expected behaviour from an ACME server
// https://tools.ietf.org/html/rfc8555#section-7.1.6
// https://github.com/jetstack/cert-manager/issues/2868
case !anyChallengesFailed(challenges) && allChallengesFinal(challenges) && acmeOrder.Status == acmeapi.StatusPending:
log.V(logf.InfoLevel).Info("All challenges in a final state, waiting for ACME server to update the status of the order...")
// This is probably not needed as at this point the Order's status
// should already be Pending, but set it anyway to be explicit.
c.setOrderState(&o.Status, string(cmacme.Pending))
key, err := cache.MetaNamespaceKeyFunc(o)
if err != nil {
log.Error(err, "failed to construct key for pending Order")
// We should never end up here as this error would have been
// encountered in informers callback already. This probably cannot
// be fixed by re-queueing. If we do start encountering this
// scenario, we should consider whether the Order should be marked
// as failed here.
return nil
}
// Re-queue the Order to be processed again after 5 seconds.
c.scheduledWorkQueue.Add(key, RequeuePeriod)
return nil
case !anyChallengesFailed(challenges) && allChallengesFinal(challenges):
log.V(logf.DebugLevel).Info("All challenges are in a final state, updating order state")
_, err := c.updateOrderStatus(ctx, cl, o)
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to update Order status due to a 4xx error, marking Order as failed")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to retrieve Order resource: %v", err)
return nil
}
}
return err
}
log.V(logf.DebugLevel).Info("No action taken")
return nil
}
func (c *controller) createOrder(ctx context.Context, cl acmecl.Interface, o *cmacme.Order) error {
log := logf.FromContext(ctx)
if o.Status.URL != "" {
return fmt.Errorf("refusing to recreate a new order for Order %q. Please create a new Order resource to initiate a new order", o.Name)
}
log.V(logf.DebugLevel).Info("order URL not set, submitting Order to ACME server")
dnsIdentifierSet := sets.NewString(o.Spec.DNSNames...)
if o.Spec.CommonName != "" {
dnsIdentifierSet.Insert(o.Spec.CommonName)
}
log.V(logf.DebugLevel).Info("build set of domains for Order", "domains", dnsIdentifierSet.List())
ipIdentifierSet := sets.NewString(o.Spec.IPAddresses...)
log.V(logf.DebugLevel).Info("build set of IPs for Order", "domains", dnsIdentifierSet.List())
authzIDs := acmeapi.DomainIDs(dnsIdentifierSet.List()...)
authzIDs = append(authzIDs, acmeapi.IPIDs(ipIdentifierSet.List()...)...)
// create a new order with the acme server
var options []acmeapi.OrderOption
if o.Spec.Duration != nil {
options = append(options, acmeapi.WithOrderNotAfter(c.clock.Now().Add(o.Spec.Duration.Duration)))
}
acmeOrder, err := cl.AuthorizeOrder(ctx, authzIDs, options...)
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to create Order resource due to bad request, marking Order as failed")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to create Order: %v", err)
return nil
}
}
if err != nil {
return fmt.Errorf("error creating new order: %v", err)
}
log.V(logf.DebugLevel).Info("submitted Order to ACME server")
o.Status.URL = acmeOrder.URI
o.Status.FinalizeURL = acmeOrder.FinalizeURL
o.Status.Authorizations = constructAuthorizations(acmeOrder)
c.setOrderState(&o.Status, acmeOrder.Status)
return nil
}
func (c *controller) updateOrderStatus(ctx context.Context, cl acmecl.Interface, o *cmacme.Order) (*acmeapi.Order, error) {
acmeOrder, err := getACMEOrder(ctx, cl, o)
if err != nil {
return nil, err
}
// Workaround bug in golang.org/x/crypto/acme implementation whereby the
// order's URI field will be empty when calling GetOrder due to the
// 'Location' header not being set on the response from the ACME server.
if acmeOrder.URI != "" {
o.Status.URL = acmeOrder.URI
}
o.Status.FinalizeURL = acmeOrder.FinalizeURL
c.setOrderState(&o.Status, acmeOrder.Status)
// once the 'authorizations' slice contains at least one item, it cannot be
// updated. If it does not contain any items, update it containing the list
// of authorizations returned on the Order.
if len(o.Status.Authorizations) == 0 {
o.Status.Authorizations = constructAuthorizations(acmeOrder)
}
return acmeOrder, nil
}
// setOrderState will set the 'State' field of the given Order to 's'.
// It will set the Orders failureTime field if the state provided is classed as
// a failure state.
func (c *controller) setOrderState(o *cmacme.OrderStatus, s string) {
o.State = cmacme.State(s)
// if the order is in a failure state, we should set the `failureTime` field
if acme.IsFailureState(o.State) {
t := metav1.NewTime(c.clock.Now())
o.FailureTime = &t
}
}
// constructAuthorizations will construct a slice of ACMEAuthorizations must be
// completed for the given ACME order.
// It does *not* perform a query against the ACME server for each authorization
// named on the Order to fetch additional metadata, instead, use
// populateAuthorization on each authorization in turn.
func constructAuthorizations(o *acmeapi.Order) []cmacme.ACMEAuthorization {
authzs := make([]cmacme.ACMEAuthorization, len(o.AuthzURLs))
for i, url := range o.AuthzURLs {
authzs[i].URL = url
}
return authzs
}
func anyAuthorizationsMissingMetadata(o *cmacme.Order) bool {
for _, a := range o.Status.Authorizations {
if a.Identifier == "" {
return true
}
}
return false
}
func (c *controller) fetchMetadataForAuthorizations(ctx context.Context, o *cmacme.Order, cl acmecl.Interface) error {
log := logf.FromContext(ctx)
for i, authz := range o.Status.Authorizations {
// only fetch metadata for each authorization once
if authz.Identifier != "" {
continue
}
acmeAuthz, err := cl.GetAuthorization(ctx, authz.URL)
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to fetch authorization metadata from acme server")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to fetch authorization: %v", err)
return nil
}
}
if err != nil {
return err
}
authz.InitialState = cmacme.State(acmeAuthz.Status)
authz.Identifier = acmeAuthz.Identifier.Value
authz.Wildcard = &acmeAuthz.Wildcard
authz.Challenges = make([]cmacme.ACMEChallenge, len(acmeAuthz.Challenges))
for i, acmech := range acmeAuthz.Challenges {
authz.Challenges[i].URL = acmech.URI
authz.Challenges[i].Token = acmech.Token
authz.Challenges[i].Type = acmech.Type
}
o.Status.Authorizations[i] = authz
}
return nil
}
func (c *controller) anyRequiredChallengesDoNotExist(requiredChallenges []cmacme.Challenge) (bool, error) {
for _, ch := range requiredChallenges {
_, err := c.challengeLister.Challenges(ch.Namespace).Get(ch.Name)
if apierrors.IsNotFound(err) {
return true, nil
}
if err != nil {
return false, err
}
}
return false, nil
}
func (c *controller) createRequiredChallenges(ctx context.Context, o *cmacme.Order, requiredChallenges []cmacme.Challenge) error {
for _, ch := range requiredChallenges {
_, err := c.cmClient.AcmeV1().Challenges(ch.Namespace).Create(ctx, &ch, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
continue
}
if err != nil {
return err
}
c.recorder.Eventf(o, corev1.EventTypeNormal, reasonCreated, "Created Challenge resource %q for domain %q", ch.Name, ch.Spec.DNSName)
}
return nil
}
func (c *controller) anyLeftoverChallengesExist(o *cmacme.Order, requiredChallenges []cmacme.Challenge) (bool, error) {
leftoverChallenges, err := c.determineLeftoverChallenges(o, requiredChallenges)
if err != nil {
return false, err
}
return len(leftoverChallenges) > 0, nil
}
func (c *controller) deleteLeftoverChallenges(ctx context.Context, o *cmacme.Order, requiredChallenges []cmacme.Challenge) error {
leftover, err := c.determineLeftoverChallenges(o, requiredChallenges)
if err != nil {
return err
}
for _, ch := range leftover {
if err := c.cmClient.AcmeV1().Challenges(ch.Namespace).Delete(ctx, ch.Name, metav1.DeleteOptions{}); err != nil {
return err
}
}
return nil
}
func (c *controller) deleteAllChallenges(ctx context.Context, o *cmacme.Order) error {
challenges, err := c.listOwnedChallenges(o)
if err != nil {
return err
}
for _, ch := range challenges {
if err := c.cmClient.AcmeV1().Challenges(ch.Namespace).Delete(ctx, ch.Name, metav1.DeleteOptions{}); err != nil {
return err
}
}
return nil
}
func (c *controller) determineLeftoverChallenges(o *cmacme.Order, requiredChallenges []cmacme.Challenge) ([]*cmacme.Challenge, error) {
requiredNames := map[string]struct{}{}
for _, ch := range requiredChallenges {
requiredNames[ch.Name] = struct{}{}
}
ownedChallenges, err := c.listOwnedChallenges(o)
if err != nil {
return nil, err
}
var leftover []*cmacme.Challenge
for _, ch := range ownedChallenges {
if _, ok := requiredNames[ch.Name]; ok {
continue
}
leftover = append(leftover, ch)
}
return leftover, nil
}
func (c *controller) listOwnedChallenges(o *cmacme.Order) ([]*cmacme.Challenge, error) {
chs, err := c.challengeLister.Challenges(o.Namespace).List(labels.Everything())
if err != nil {
return nil, err
}
var ownedChs []*cmacme.Challenge
for _, ch := range chs {
if !metav1.IsControlledBy(ch, o) {
continue
}
ownedChs = append(ownedChs, ch)
}
return ownedChs, nil
}
func (c *controller) finalizeOrder(ctx context.Context, cl acmecl.Interface, o *cmacme.Order, issuer cmapi.GenericIssuer) error {
log := logf.FromContext(ctx)
// Due to a bug in the initial release of this controller, we previously
// only supported DER encoded CSRs and not PEM encoded as they are intended
// to be as part of our API.
// To work around this, we first attempt to decode the Request into DER bytes
// by running pem.Decode. If the PEM block is empty, we assume that the Request
// is DER encoded and continue to call FinalizeOrder.
var derBytes []byte
block, _ := pem.Decode(o.Spec.Request)
if block == nil {
log.V(logf.WarnLevel).Info("failed to parse Request as PEM data, attempting to treat Request as DER encoded for compatibility reasons")
derBytes = o.Spec.Request
} else {
derBytes = block.Bytes
}
certSlice, certURL, err := cl.CreateOrderCert(ctx, o.Status.FinalizeURL, derBytes, true)
// if an ACME error is returned and it's a 4xx error, mark this Order as
// failed and do not retry it until after applying the global backoff.
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to finalize Order resource due to bad request, marking Order as failed")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to finalize Order: %v", err)
return nil
}
}
// even if any other kind of error occurred, we always update the order
// status after calling Finalize - this allows us to record the current
// order's status on this order resource despite it not being returned
// directly by the acme client.
// This will catch cases where the Order cannot be finalized because it
// if it is already in the 'valid' state, as upon retry we will
// then retrieve the Certificate resource.
_, errUpdate := c.updateOrderStatus(ctx, cl, o)
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to update Order status due to a 4xx error, marking Order as failed")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to retrieve Order resource: %v", err)
return nil
}
}
if errUpdate != nil {
return fmt.Errorf("error syncing order status: %v", errUpdate)
}
// check for errors from FinalizeOrder
if err != nil {
return fmt.Errorf("error finalizing order: %v", err)
}
if issuer.GetSpec().ACME != nil && issuer.GetSpec().ACME.PreferredChain != "" {
altURLs, err := cl.ListCertAlternates(ctx, certURL)
if err != nil {
return fmt.Errorf("error listing alternate certificate URLs: %w", err)
}
// Loop over all alternative chains
for _, altURL := range altURLs {
altChain, err := cl.FetchCert(ctx, altURL, true)
if err != nil {
return fmt.Errorf("error fetching alternate certificate chain from %s: %w", altURL, err)
}
// Loop over each cert in this alternative chain
for _, altCert := range altChain {
cert, err := x509.ParseCertificate(altCert)
if err != nil {
return fmt.Errorf("error parsing alternate certificate chain: %w", err)
}
log.V(logf.DebugLevel).WithValues("Issuer CN", cert.Issuer.CommonName).Info("Found alternative ACME bundle")
if cert.Issuer.CommonName == issuer.GetSpec().ACME.PreferredChain {
// if the issuer's CN matched the preferred chain it means this bundle is
// signed by the requested chain
log.V(logf.DebugLevel).WithValues("Issuer CN", cert.Issuer.CommonName).Info("Selecting alternative ACME bundle with a mathing Common Name from %s", altURL)
return c.storeCertificateOnStatus(ctx, o, altChain)
}
}
}
// if no match is found we return to the actual cert
// it is a *preferred* chain after all
}
return c.storeCertificateOnStatus(ctx, o, certSlice)
}
func (c *controller) storeCertificateOnStatus(ctx context.Context, o *cmacme.Order, certs [][]byte) error {
log := logf.FromContext(ctx)
// encode the retrieved certificates (including the chain)
certBuffer := bytes.NewBuffer([]byte{})
for _, cert := range certs {
err := pem.Encode(certBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: cert})
if err != nil {
log.Error(err, "invalid certificate data returned by ACME server")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Invalid certificate retrieved from ACME server: %v", err)
return nil
}
}
o.Status.Certificate = certBuffer.Bytes()
c.recorder.Event(o, corev1.EventTypeNormal, "Complete", "Order completed successfully")
return nil
}
func (c *controller) fetchCertificateData(ctx context.Context, cl acmecl.Interface, o *cmacme.Order) error {
log := logf.FromContext(ctx)
acmeOrder, err := c.updateOrderStatus(ctx, cl, o)
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to update Order status due to a 4xx error, marking Order as failed")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to retrieve Order resource: %v", err)
return nil
}
}
if err != nil {
return err
}
if acmeOrder == nil {
log.V(logf.WarnLevel).Info("Failed to fetch Order from ACME server as it no longer exists. Not retrying.")
return nil
}
// If the Order state has actually changed and we've not observed it,
// update the order status and let the change in the resource trigger
// a resync
if acmeOrder.Status != acmeapi.StatusValid {
return nil
}
certs, err := cl.FetchCert(ctx, acmeOrder.CertURL, true)
if acmeErr, ok := err.(*acmeapi.Error); ok {
if acmeErr.StatusCode >= 400 && acmeErr.StatusCode < 500 {
log.Error(err, "failed to retrieve issued certificate from ACME server")
c.setOrderState(&o.Status, string(cmacme.Errored))
o.Status.Reason = fmt.Sprintf("Failed to retrieve signed certificate: %v", err)
return nil
}
}
if err != nil {
return err
}
err = c.storeCertificateOnStatus(ctx, o, certs)
if err != nil {
return err
}
return nil
}
// getACMEOrder returns the ACME Order for an Order Custom Resource.
func getACMEOrder(ctx context.Context, cl acmecl.Interface, o *cmacme.Order) (*acmeapi.Order, error) {
log := logf.FromContext(ctx)
if o.Status.URL == "" {
return nil, fmt.Errorf("internal error: order URL not set")
}
log.V(logf.DebugLevel).Info("Fetching Order metadata from ACME server")
acmeOrder, err := cl.GetOrder(ctx, o.Status.URL)
if err != nil {
return nil, err
}
log.V(logf.DebugLevel).Info("Retrieved ACME order from server", "raw_data", acmeOrder)
return acmeOrder, nil
}
| 1 | 30,246 | You've not updated the `o.Status.Reason = fmt.Sprintf("Failed to retrieve Order resource: %v", err)` line below here when you changed this, so here we are checking `errUpdate` but will print the contents of `err` instead. | jetstack-cert-manager | go |
@@ -23,6 +23,9 @@ require 'socket'
module Selenium
module WebDriver
class SocketPoller
+ NOT_CONNECTED_ERRORS = [Errno::ECONNREFUSED, Errno::ENOTCONN, SocketError]
+ NOT_CONNECTED_ERRORS << Errno::EPERM if Platform.cygwin?
+
def initialize(host, port, timeout = 0, interval = 0.25)
@host = host
@port = Integer(port) | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require 'selenium/webdriver/common/platform'
require 'socket'
module Selenium
module WebDriver
class SocketPoller
def initialize(host, port, timeout = 0, interval = 0.25)
@host = host
@port = Integer(port)
@timeout = Float(timeout)
@interval = interval
end
#
# Returns true if the server is listening within the given timeout,
# false otherwise.
#
# @return [Boolean]
#
def connected?
with_timeout { listening? }
end
#
# Returns true if the server has stopped listening within the given timeout,
# false otherwise.
#
# @return [Boolean]
#
def closed?
with_timeout { !listening? }
end
private
CONNECT_TIMEOUT = 5
NOT_CONNECTED_ERRORS = [Errno::ECONNREFUSED, Errno::ENOTCONN, SocketError]
NOT_CONNECTED_ERRORS << Errno::EPERM if Platform.cygwin?
CONNECTED_ERRORS = [Errno::EISCONN]
CONNECTED_ERRORS << Errno::EINVAL if Platform.windows?
if Platform.jruby?
# we use a plain TCPSocket here since JRuby has issues select()ing on a connecting socket
# see http://jira.codehaus.org/browse/JRUBY-5165
def listening?
TCPSocket.new(@host, @port).close
true
rescue *NOT_CONNECTED_ERRORS
false
end
else
def listening?
addr = Socket.getaddrinfo(@host, @port, Socket::AF_INET, Socket::SOCK_STREAM)
sock = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0)
sockaddr = Socket.pack_sockaddr_in(@port, addr[0][3])
begin
sock.connect_nonblock sockaddr
rescue Errno::EINPROGRESS
retry if IO.select(nil, [sock], nil, CONNECT_TIMEOUT)
raise Errno::ECONNREFUSED
rescue *CONNECTED_ERRORS
# yay!
end
sock.close
true
rescue *NOT_CONNECTED_ERRORS
sock.close if sock
$stderr.puts [@host, @port].inspect if $DEBUG
false
end
end
def with_timeout
max_time = time_now + @timeout
(
return true if yield
wait
) until time_now > max_time
false
end
def wait
sleep @interval
end
# for testability
def time_now
Time.now
end
end # SocketPoller
end # WebDriver
end # Selenium
| 1 | 13,776 | Doesn't this need to include `Errno::ECONNRESET` to fix the issue? | SeleniumHQ-selenium | java |
@@ -301,6 +301,9 @@ public class Constants {
public static final String PROJECT_CACHE_SIZE_PERCENTAGE = "azkaban"
+ ".project_cache_size_percentage_of_disk";
+ public static final String PROJECT_CACHE_THROTTLE_PERCENTAGE = "azkaban"
+ + ".project_cache_throttle_percentage";
+
// how many older versions of project files are kept in DB before deleting them
public static final String PROJECT_VERSION_RETENTION = "project.version.retention";
| 1 | /*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban;
import java.time.Duration;
/**
* Constants used in configuration files or shared among classes.
*
* <p>Conventions:
*
* <p>Internal constants to be put in the {@link Constants} class
*
* <p>Configuration keys to be put in the {@link ConfigurationKeys} class
*
* <p>Flow level properties keys to be put in the {@link FlowProperties} class
*
* <p>Job level Properties keys to be put in the {@link JobProperties} class
*
* <p>Use '.' to separate name spaces and '_" to separate words in the same namespace. e.g.
* azkaban.job.some_key</p>
*/
public class Constants {
// Azkaban Flow Versions
public static final double DEFAULT_AZKABAN_FLOW_VERSION = 1.0;
public static final double AZKABAN_FLOW_VERSION_2_0 = 2.0;
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
// Flow 2.0 node type
public static final String NODE_TYPE = "type";
public static final String FLOW_NODE_TYPE = "flow";
// Flow 2.0 flow and job path delimiter
public static final String PATH_DELIMITER = ":";
// Job properties override suffix
public static final String JOB_OVERRIDE_SUFFIX = ".jor";
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String DEFAULT_EXECUTOR_PORT_FILE = "executor.port";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
// One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
// Default flow trigger max wait time
public static final Duration DEFAULT_FLOW_TRIGGER_MAX_WAIT_TIME = Duration.ofDays(10);
public static final Duration MIN_FLOW_TRIGGER_WAIT_TIME = Duration.ofMinutes(1);
// The flow exec id for a flow trigger instance which hasn't started a flow yet
public static final int UNASSIGNED_EXEC_ID = -1;
// The flow exec id for a flow trigger instance unable to trigger a flow yet
public static final int FAILED_EXEC_ID = -2;
// Default locked flow error message
public static final String DEFAULT_LOCKED_FLOW_ERROR_MESSAGE =
"Flow %s in project %s is locked. This is either a repeatedly failing flow, or an ineffcient"
+ " flow. Please refer to the Dr. Elephant report for this flow for more information.";
// Default maximum number of concurrent runs for a single flow
public static final int DEFAULT_MAX_ONCURRENT_RUNS_ONEFLOW = 30;
// How often executors will poll new executions in Poll Dispatch model
public static final int DEFAULT_AZKABAN_POLLING_INTERVAL_MS = 1000;
// Executors can use cpu load calculated from this period to take/skip polling turns
public static final int DEFAULT_AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC = 60;
// Default value to feature enable setting. To be backward compatible, this value === FALSE
public static final boolean DEFAULT_AZKABAN_RAMP_ENABLED = false;
// Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to push result into DB every N finished ramped workflows
public static final int DEFAULT_AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = 20;
// Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to pull result from DB every N new ramped workflows
public static final int DEFAULT_AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = 50;
// Use Polling Service to sync the ramp status cross EXEC Server.
public static final boolean DEFAULT_AZKABAN_RAMP_STATUS_POOLING_ENABLED = false;
// How often executors will poll ramp status in Poll Dispatch model
public static final int DEFAULT_AZKABAN_RAMP_STATUS_POLLING_INTERVAL = 10;
public static class ConfigurationKeys {
public static final String AZKABAN_GLOBAL_PROPERTIES_EXT_PATH = "executor.global.properties";
// Configures Azkaban to use new polling model for dispatching
public static final String AZKABAN_POLL_MODEL = "azkaban.poll.model";
public static final String AZKABAN_POLLING_INTERVAL_MS = "azkaban.polling.interval.ms";
public static final String AZKABAN_POLLING_LOCK_ENABLED = "azkaban.polling.lock.enabled";
public static final String AZKABAN_POLLING_CRITERIA_FLOW_THREADS_AVAILABLE =
"azkaban.polling_criteria.flow_threads_available";
public static final String AZKABAN_POLLING_CRITERIA_MIN_FREE_MEMORY_GB =
"azkaban.polling_criteria.min_free_memory_gb";
public static final String AZKABAN_POLLING_CRITERIA_MAX_CPU_UTILIZATION_PCT =
"azkaban.polling_criteria.max_cpu_utilization_pct";
public static final String AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC =
"azkaban.polling_criteria.cpu_load_period_sec";
// Configures properties for Azkaban executor health check
public static final String AZKABAN_EXECUTOR_HEALTHCHECK_INTERVAL_MIN = "azkaban.executor.healthcheck.interval.min";
public static final String AZKABAN_EXECUTOR_MAX_FAILURE_COUNT = "azkaban.executor.max.failurecount";
public static final String AZKABAN_ADMIN_ALERT_EMAIL = "azkaban.admin.alert.email";
// Configures Azkaban Flow Version in project YAML file
public static final String AZKABAN_FLOW_VERSION = "azkaban-flow-version";
// These properties are configurable through azkaban.properties
public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename";
// Defines a list of external links, each referred to as a topic
public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
/*
* Hadoop/Spark user job link.
* Example:
* a) azkaban.server.external.resource_manager_job_url=http://***rm***:8088/cluster/app/application_${application.id}
* b) azkaban.server.external.history_server_job_url=http://***jh***:19888/jobhistory/job/job_${application.id}
* c) azkaban.server.external.spark_history_server_job_url=http://***sh***:18080/history/application_${application.id}/1/jobs
* */
public static final String RESOURCE_MANAGER_JOB_URL = "azkaban.server.external.resource_manager_job_url";
public static final String HISTORY_SERVER_JOB_URL = "azkaban.server.external.history_server_job_url";
public static final String SPARK_HISTORY_SERVER_JOB_URL = "azkaban.server.external.spark_history_server_job_url";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
// User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users.
// enduser -> myazkabanhost:443 -> proxy -> localhost:8081
// when this parameters set then these parameters are used to generate email links.
// if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used.
public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname";
public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port";
public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// List of users we prevent azkaban from running flows as. (ie: root, azkaban)
public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users";
// Path name of execute-as-user executable
public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib";
// Name of *nix group associated with the process running Azkaban
public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
public static final String EXECUTOR_PORT_FILE = "executor.portfile";
// To set a fixed port for executor-server. Otherwise some available port is used.
public static final String EXECUTOR_PORT = "executor.port";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
// Maximum number of tries to download a dependency (no more retry attempts will be made after this many download failures)
public static final String AZKABAN_DEPENDENCY_MAX_DOWNLOAD_TRIES = "azkaban.dependency.max.download.tries";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
// This really should be azkaban.storage.hdfs.project_root.uri
public static final String AZKABAN_STORAGE_HDFS_PROJECT_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ENABLED = "azkaban.storage.cache.dependency.enabled";
public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ROOT_URI = "azkaban.storage.cache.dependency_root.uri";
public static final String AZKABAN_STORAGE_ORIGIN_DEPENDENCY_ROOT_URI = "azkaban.storage.origin.dependency_root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
public static final String PROJECT_TEMP_DIR = "project.temp.dir";
// Event reporting properties
public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM =
"azkaban.event.reporting.class";
public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled";
// Comma separated list of properties to propagate from flow to Event reporter metadata
public static final String AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE = "azkaban.event.reporting.propagateProperties";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS =
"azkaban.event.reporting.kafka.brokers";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC =
"azkaban.event.reporting.kafka.topic";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL =
"azkaban.event.reporting.kafka.schema.registry.url";
/*
* The max number of artifacts retained per project.
* Accepted Values:
* - 0 : Save all artifacts. No clean up is done on storage.
* - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage
*
* Note: Having an unacceptable value results in an exception and the service would REFUSE
* to start.
*
* Example:
* a) azkaban.storage.artifact.max.retention=all
* implies save all artifacts
* b) azkaban.storage.artifact.max.retention=3
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
// enable quartz scheduler and flow trigger if true.
public static final String ENABLE_QUARTZ = "azkaban.server.schedule.enable_quartz";
public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential";
public static final String OAUTH_CREDENTIAL_NAME = "azkaban.oauth.credential";
public static final String SECURITY_USER_GROUP = "azkaban.security.user.group";
// dir to keep dependency plugins
public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir";
public static final String USE_MULTIPLE_EXECUTORS = "azkaban.use.multiple.executors";
public static final String MAX_CONCURRENT_RUNS_ONEFLOW = "azkaban.max.concurrent.runs.oneflow";
// list of whitelisted flows, with specific max number of concurrent runs. Format:
// <project 1>,<flow 1>,<number>;<project 2>,<flow 2>,<number>
public static final String CONCURRENT_RUNS_ONEFLOW_WHITELIST =
"azkaban.concurrent.runs.oneflow.whitelist";
public static final String WEBSERVER_QUEUE_SIZE = "azkaban.webserver.queue.size";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_MS =
"azkaban.activeexecutor.refresh.milisecinterval";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW =
"azkaban.activeexecutor.refresh.flowinterval";
public static final String EXECUTORINFO_REFRESH_MAX_THREADS =
"azkaban.executorinfo.refresh.maxThreads";
public static final String MAX_DISPATCHING_ERRORS_PERMITTED = "azkaban.maxDispatchingErrors";
public static final String EXECUTOR_SELECTOR_FILTERS = "azkaban.executorselector.filters";
public static final String EXECUTOR_SELECTOR_COMPARATOR_PREFIX =
"azkaban.executorselector.comparator.";
public static final String QUEUEPROCESSING_ENABLED = "azkaban.queueprocessing.enabled";
public static final String SESSION_TIME_TO_LIVE = "session.time.to.live";
// allowed max number of sessions per user per IP
public static final String MAX_SESSION_NUMBER_PER_IP_PER_USER = "azkaban.session"
+ ".max_number_per_ip_per_user";
// allowed max size of shared project dir (percentage of partition size), e.g 0.8
public static final String PROJECT_CACHE_SIZE_PERCENTAGE = "azkaban"
+ ".project_cache_size_percentage_of_disk";
// how many older versions of project files are kept in DB before deleting them
public static final String PROJECT_VERSION_RETENTION = "project.version.retention";
// number of rows to be displayed on the executions page.
public static final String DISPLAY_EXECUTION_PAGE_SIZE = "azkaban.display.execution_page_size";
// locked flow error message. Parameters passed in are the flow name and project name.
public static final String AZKABAN_LOCKED_FLOW_ERROR_MESSAGE =
"azkaban.locked.flow.error.message";
// flow ramp related setting keys
// Default value to feature enable setting. To be backward compatible, this value === FALSE
public static final String AZKABAN_RAMP_ENABLED = "azkaban.ramp.enabled";
// Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to push result into DB every N finished ramped workflows
public static final String AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = "azkaban.ramp.status.push.interval.max";
// Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to pull result from DB every N new ramped workflows
public static final String AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = "azkaban.ramp.status.pull.interval.max";
// A Polling Service can be applied to determine the ramp status synchronization interval.
public static final String AZKABAN_RAMP_STATUS_POLLING_ENABLED = "azkaban.ramp.status.polling.enabled";
public static final String AZKABAN_RAMP_STATUS_POLLING_INTERVAL = "azkaban.ramp.status.polling.interval";
public static final String AZKABAN_RAMP_STATUS_POLLING_CPU_MAX = "azkaban.ramp.status.polling.cpu.max";
public static final String AZKABAN_RAMP_STATUS_POLLING_MEMORY_MIN = "azkaban.ramp.status.polling.memory.min";
public static final String EXECUTION_LOGS_RETENTION_MS = "execution.logs.retention.ms";
public static final String EXECUTION_LOGS_CLEANUP_INTERVAL_SECONDS =
"execution.logs.cleanup.interval.seconds";
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
/*
* this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available.
* EXTRA_HCAT_CLUSTERS has the following format:
* other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port"
* Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster.
* The uris(hcat servers) in a "cluster" ensures HA is provided.
**/
public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters";
/*
* the settings to be defined by user indicating if there are hcat locations other than the
* default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are
* supported, use comma to separate the values, values are case insensitive.
**/
// Use EXTRA_HCAT_CLUSTERS instead
@Deprecated
public static final String EXTRA_HCAT_LOCATION = "other_hcat_location";
// If true, AZ will fetches the jobs' certificate from remote Certificate Authority.
public static final String ENABLE_JOB_SSL = "azkaban.job.enable.ssl";
// If true, AZ will fetch OAuth token from credential provider
public static final String ENABLE_OAUTH = "azkaban.enable.oauth";
// Job properties that indicate maximum memory size
public static final String JOB_MAX_XMS = "job.max.Xms";
public static final String MAX_XMS_DEFAULT = "1G";
public static final String JOB_MAX_XMX = "job.max.Xmx";
public static final String MAX_XMX_DEFAULT = "2G";
// The hadoop user the job should run under. If not specified, it will default to submit user.
public static final String USER_TO_PROXY = "user.to.proxy";
/**
* Format string for Log4j's EnhancedPatternLayout
*/
public static final String JOB_LOG_LAYOUT = "azkaban.job.log.layout";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
public static class FlowTriggerProps {
// Flow trigger props
public static final String SCHEDULE_TYPE = "type";
public static final String CRON_SCHEDULE_TYPE = "cron";
public static final String SCHEDULE_VALUE = "value";
public static final String SCHEDULE_TIMEZONE = "timezone";
public static final String DEP_NAME = "name";
// Flow trigger dependency run time props
public static final String START_TIME = "startTime";
public static final String TRIGGER_INSTANCE_ID = "triggerInstanceId";
}
public static class PluginManager {
public static final String JOBTYPE_DEFAULTDIR = "plugins/jobtypes";
public static final String RAMPPOLICY_DEFAULTDIR = "plugins/ramppolicies";
// need jars.to.include property, will be loaded with user property
public static final String CONFFILE = "plugin.properties";
// not exposed to users
public static final String SYSCONFFILE = "private.properties";
// common properties for multiple plugins
public static final String COMMONCONFFILE = "common.properties";
// common private properties for multiple plugins
public static final String COMMONSYSCONFFILE = "commonprivate.properties";
}
}
| 1 | 19,469 | Please consider keeping the config key in one line as it will help in case someone is looking at how this config is used. public static final String PROJECT_CACHE_THROTTLE_PERCENTAGE = "azkaban.project_cache_throttle_percentage"; | azkaban-azkaban | java |
@@ -46,6 +46,7 @@ type DataStore interface {
ListParentIDEntries(request *ListParentIDEntriesRequest) (*ListParentIDEntriesResponse, error)
ListSelectorEntries(request *ListSelectorEntriesRequest) (*ListSelectorEntriesResponse, error)
+ ListPowerSelectorEntries(request *ListSelectorEntriesRequest) (*ListSelectorEntriesResponse, error)
ListSpiffeEntries(request *ListSpiffeEntriesRequest) (*ListSpiffeEntriesResponse, error)
RegisterToken(request *JoinToken) (*common.Empty, error) | 1 | package datastore
import (
"net/rpc"
"time"
"github.com/golang/protobuf/ptypes/empty"
"github.com/hashicorp/go-plugin"
"github.com/spiffe/spire/proto/common"
"google.golang.org/grpc"
spi "github.com/spiffe/spire/proto/common/plugin"
)
const TimeFormat = time.RFC1123Z
// Handshake is a common handshake that is shared between noderesolution and host.
var Handshake = plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "DataStore",
MagicCookieValue: "DataStore",
}
type DataStore interface {
CreateFederatedEntry(request *CreateFederatedEntryRequest) (*CreateFederatedEntryResponse, error)
ListFederatedEntry(request *ListFederatedEntryRequest) (*ListFederatedEntryResponse, error)
UpdateFederatedEntry(request *UpdateFederatedEntryRequest) (*UpdateFederatedEntryResponse, error)
DeleteFederatedEntry(request *DeleteFederatedEntryRequest) (*DeleteFederatedEntryResponse, error)
CreateAttestedNodeEntry(request *CreateAttestedNodeEntryRequest) (*CreateAttestedNodeEntryResponse, error)
FetchAttestedNodeEntry(request *FetchAttestedNodeEntryRequest) (*FetchAttestedNodeEntryResponse, error)
FetchStaleNodeEntries(request *FetchStaleNodeEntriesRequest) (*FetchStaleNodeEntriesResponse, error)
UpdateAttestedNodeEntry(request *UpdateAttestedNodeEntryRequest) (*UpdateAttestedNodeEntryResponse, error)
DeleteAttestedNodeEntry(request *DeleteAttestedNodeEntryRequest) (*DeleteAttestedNodeEntryResponse, error)
CreateNodeResolverMapEntry(request *CreateNodeResolverMapEntryRequest) (*CreateNodeResolverMapEntryResponse, error)
FetchNodeResolverMapEntry(request *FetchNodeResolverMapEntryRequest) (*FetchNodeResolverMapEntryResponse, error)
DeleteNodeResolverMapEntry(request *DeleteNodeResolverMapEntryRequest) (*DeleteNodeResolverMapEntryResponse, error)
RectifyNodeResolverMapEntries(request *RectifyNodeResolverMapEntriesRequest) (*RectifyNodeResolverMapEntriesResponse, error)
CreateRegistrationEntry(request *CreateRegistrationEntryRequest) (*CreateRegistrationEntryResponse, error)
FetchRegistrationEntry(request *FetchRegistrationEntryRequest) (*FetchRegistrationEntryResponse, error)
UpdateRegistrationEntry(request *UpdateRegistrationEntryRequest) (*UpdateRegistrationEntryResponse, error)
DeleteRegistrationEntry(request *DeleteRegistrationEntryRequest) (*DeleteRegistrationEntryResponse, error)
ListParentIDEntries(request *ListParentIDEntriesRequest) (*ListParentIDEntriesResponse, error)
ListSelectorEntries(request *ListSelectorEntriesRequest) (*ListSelectorEntriesResponse, error)
ListSpiffeEntries(request *ListSpiffeEntriesRequest) (*ListSpiffeEntriesResponse, error)
RegisterToken(request *JoinToken) (*common.Empty, error)
FetchToken(request *JoinToken) (*JoinToken, error)
DeleteToken(request *JoinToken) (*common.Empty, error)
PruneTokens(request *JoinToken) (*common.Empty, error)
Configure(request *spi.ConfigureRequest) (*spi.ConfigureResponse, error)
GetPluginInfo(request *spi.GetPluginInfoRequest) (*spi.GetPluginInfoResponse, error)
}
type DataStorePlugin struct {
DataStoreImpl DataStore
}
func (p DataStorePlugin) Server(*plugin.MuxBroker) (interface{}, error) {
return empty.Empty{}, nil
}
func (p DataStorePlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
return empty.Empty{}, nil
}
func (p DataStorePlugin) GRPCServer(s *grpc.Server) error {
RegisterDataStoreServer(s, &GRPCServer{DataStoreImpl: p.DataStoreImpl})
return nil
}
func (p DataStorePlugin) GRPCClient(c *grpc.ClientConn) (interface{}, error) {
return &GRPCClient{client: NewDataStoreClient(c)}, nil
}
| 1 | 8,926 | This is a hard one, naming-wise... `ListPowerSelectorEntries` is logical given `ListSelectorEntries`. Another option could be `ListMatchingEntries` or even `FindMatchingEntries`. :shrug: | spiffe-spire | go |
@@ -0,0 +1,4 @@
+package types
+
+// UVarint represent a 32-bit number encoded as varint.
+type UVarint uint | 1 | 1 | 22,033 | This should be called `Uint` or `Uint32` or simply using our `Uint64` type instead. All of our numbers have varint encoding. It's not inherently a varint. | filecoin-project-venus | go |
|
@@ -1513,7 +1513,8 @@ class CommandDispatcher:
)
@cmdutils.register(instance='command-dispatcher', scope='window')
- def view_source(self, edit=False):
+ @cmdutils.argument('pygment')
+ def view_source(self, edit=False, pygment=False):
"""Show the source of the current page in a new tab.
Args: | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import os.path
import shlex
import functools
import typing
from PyQt5.QtWidgets import QApplication, QTabBar, QDialog
from PyQt5.QtCore import pyqtSlot, Qt, QUrl, QEvent, QUrlQuery
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configdata
from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate,
webelem, downloads)
from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils, standarddir)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import urlmodel, miscmodels
from qutebrowser.mainwindow import mainwindow
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_editor: The ExternalEditor object.
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self, private):
"""Get a tabbed-browser from a new window."""
new_window = mainwindow.MainWindow(private=private)
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
cmdutils.check_overflow(idx, 'int')
self._tabbed_browser.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False,
related=False, private=None):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
private: If opening a new window, open it in private browsing mode.
If not given, inherit the current window's mode.
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window, private), 'tbwp')
if window and private is None:
private = self._tabbed_browser.private
if window or private:
tabbed_browser = self._new_tabbed_browser(private)
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, related=related)
elif background:
tabbed_browser.tabopen(url, background=True, related=related)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget(count - 1)
else:
return None
def _tab_focus_last(self, *, show_error=True):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
if not show_error:
return
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, prev, next_, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((prev, next_, opposite), 'pno')
if prev:
return QTabBar.SelectLeftTab
elif next_:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.val.tabs.select_on_remove
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs.select_on_remove' set to "
"'last-used'!")
else: # pragma: no cover
raise ValueError("Invalid select_on_remove value "
"{!r}!".format(conf_selection))
return None
def _tab_close(self, tab, prev=False, next_=False, opposite=False):
"""Helper function for tab_close be able to handle message.async.
Args:
tab: Tab object to select be closed.
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
count: The tab index to close, or None
"""
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(prev, next_,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_close(self, prev=False, next_=False, opposite=False,
force=False, count=None):
"""Close the current/[count]th tab.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
force: Avoid confirmation for pinned tabs.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
close = functools.partial(self._tab_close, tab, prev,
next_, opposite)
self._tabbed_browser.tab_close_prompt_if_pinned(tab, force, close)
@cmdutils.register(instance='command-dispatcher', scope='window',
name='tab-pin')
@cmdutils.argument('count', count=True)
def tab_pin(self, count=None):
"""Pin/Unpin the current/[count]th tab.
Pinning a tab shrinks it to the size of its title text.
Attempting to close a pinned tab will cause a confirmation,
unless --force is passed.
Args:
count: The tab index to pin or unpin, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
to_pin = not tab.data.pinned
self._tabbed_browser.set_tab_pinned(tab, to_pin)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window')
@cmdutils.argument('url', completion=urlmodel.url)
@cmdutils.argument('count', count=True)
def openurl(self, url=None, related=False,
bg=False, tab=False, window=False, count=None, secure=False,
private=False):
"""Open a URL in the current/[count]th tab.
If the URL contains newlines, each line gets opened in its own tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
count: The tab index to open the URL in, or None.
secure: Force HTTPS.
private: Open a new window in private browsing mode.
"""
if url is None:
urls = [config.val.url.default_page]
else:
urls = self._parse_url_input(url)
for i, cur_url in enumerate(urls):
if secure:
cur_url.setScheme('https')
if not window and i > 0:
tab = False
bg = True
if tab or bg or window or private:
self._open(cur_url, tab, bg, window, related=related,
private=private)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none
# exists yet.
self._tabbed_browser.tabopen(cur_url)
else:
# Explicit count with a tab that doesn't exist.
return
elif curtab.data.pinned:
message.info("Tab is pinned!")
else:
curtab.openurl(cur_url)
def _parse_url(self, url, *, force_search=False):
"""Parse a URL or quickmark or search query.
Args:
url: The URL to parse.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A URL that can be opened.
"""
try:
return objreg.get('quickmark-manager').get(url)
except urlmarks.Error:
try:
return urlutils.fuzzy_url(url, force_search=force_search)
except urlutils.InvalidUrlError as e:
# We don't use cmdexc.CommandError here as this can be
# called async from edit_url
message.error(str(e))
return None
def _parse_url_input(self, url):
"""Parse a URL or newline-separated list of URLs.
Args:
url: The URL or list to parse.
Return:
A list of URLs that can be opened.
"""
if isinstance(url, QUrl):
yield url
return
force_search = False
urllist = [u for u in url.split('\n') if u.strip()]
if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and
urlutils.get_path_if_valid(urllist[0], check_exists=True)
is None):
urllist = [url]
force_search = True
for cur_url in urllist:
parsed = self._parse_url(cur_url, force_search=force_search)
if parsed is not None:
yield parsed
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window')
@cmdutils.argument('count', count=True)
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.reload(force=force)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
def _print_preview(self, tab):
"""Show a print preview."""
def print_callback(ok):
if not ok:
message.error("Printing failed!")
tab.printing.check_preview_support()
diag = QPrintPreviewDialog(tab)
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(functools.partial(
tab.printing.to_printer, callback=print_callback))
diag.exec_()
def _print_pdf(self, tab, filename):
"""Print to the given PDF file."""
tab.printing.check_pdf_support()
filename = os.path.expanduser(filename)
directory = os.path.dirname(filename)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(filename)
log.misc.debug("Print to file: {}".format(filename))
def _print(self, tab):
"""Print with a QPrintDialog."""
def print_callback(ok):
"""Called when printing finished."""
if not ok:
message.error("Printing failed!")
diag.deleteLater()
def do_print():
"""Called when the dialog was closed."""
tab.printing.to_printer(diag.printer(), print_callback)
diag = QPrintDialog(tab)
if utils.is_mac:
# For some reason we get a segfault when using open() on macOS
ret = diag.exec_()
if ret == QDialog.Accepted:
do_print()
else:
diag.open(do_print)
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(self, preview=False, count=None, *, pdf=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
if pdf:
tab.printing.check_pdf_support()
else:
tab.printing.check_printer_support()
if preview:
tab.printing.check_preview_support()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
if preview:
self._print_preview(tab)
elif pdf:
self._print_pdf(tab, pdf)
else:
self._print(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
cmdutils.check_exclusive((bg, window), 'bw')
curtab = self._current_widget()
cur_title = self._tabbed_browser.page_title(self._current_index())
try:
history = curtab.history.serialize()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs.tabs_are_windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.indexOf(newtab)
new_tabbed_browser.set_page_title(idx, cur_title)
if config.val.tabs.favicons.show:
new_tabbed_browser.setTabIcon(idx, curtab.icon())
if config.val.tabs.tabs_are_windows:
new_tabbed_browser.window().setWindowIcon(curtab.icon())
newtab.data.keep_icon = True
newtab.history.deserialize(history)
newtab.zoom.set_factor(curtab.zoom.factor())
new_tabbed_browser.set_tab_pinned(newtab, curtab.data.pinned)
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=miscmodels.other_buffer)
def tab_take(self, index):
"""Take a tab from another window.
Args:
index: The [win_id/]index of the tab to take. Or a substring
in which case the closest match will be taken.
"""
tabbed_browser, tab = self._resolve_buffer_index(index)
if tabbed_browser is self._tabbed_browser:
raise cmdexc.CommandError("Can't take a tab from the same window")
self._open(tab.url(), tab=True)
tabbed_browser.close_tab(tab, add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('win_id', completion=miscmodels.window)
@cmdutils.argument('count', count=True)
def tab_give(self, win_id: int = None, count=None):
"""Give the current tab to a new or existing window if win_id given.
If no win_id is given, the tab will get detached into a new window.
Args:
win_id: The window ID of the window to give the current tab to.
count: Overrides win_id (index starts at 1 for win_id=0).
"""
if count is not None:
win_id = count - 1
if win_id == self._win_id:
raise cmdexc.CommandError("Can't give a tab to the same window")
if win_id is None:
if self._count() < 2:
raise cmdexc.CommandError("Cannot detach from a window with "
"only one tab")
tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.tabopen(self._current_url())
self._tabbed_browser.close_tab(self._current_widget(), add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window',
deprecated='Use :tab-give instead!')
def tab_detach(self):
"""Deprecated way to detach a tab."""
self.tab_give()
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
history = self._current_widget().history
# Catch common cases before e.g. cloning tab
if not forward and not history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
elif forward and not history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
try:
if forward:
widget.history.forward(count)
else:
widget.history.back(count)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment',
'decrement'])
@cmdutils.argument('count', count=True)
def navigate(self, where: str, tab=False, bg=False, window=False, count=1):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
Uses the
link:settings.html#url.incdec_segments[url.incdec_segments]
config option.
- `decrement`: Decrement the last number in the URL.
Uses the
link:settings.html#url.incdec_segments[url.incdec_segments]
config option.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
count: For `increment` and `decrement`, the number to change the
URL by. For `up`, the number of levels to go up in the URL.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
url = self._current_url().adjusted(QUrl.RemoveFragment)
handlers = {
'prev': functools.partial(navigate.prevnext, prev=True),
'next': functools.partial(navigate.prevnext, prev=False),
'up': navigate.path_up,
'decrement': functools.partial(navigate.incdec,
inc_or_dec='decrement'),
'increment': functools.partial(navigate.incdec,
inc_or_dec='increment'),
}
try:
if where in ['prev', 'next']:
handler = handlers[where]
handler(browsertab=widget, win_id=self._win_id, baseurl=url,
tab=tab, background=bg, window=window)
elif where in ['up', 'increment', 'decrement']:
new_url = handlers[where](url, count)
self._open(new_url, tab, bg, window, related=True)
else: # pragma: no cover
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
except navigate.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll_px(self, dx: int, dy: int, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().scroller.delta(dx, dy)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll(self, direction: typing.Union[str, int], count=1):
"""Scroll the current tab in the given direction.
Note you can use `:run-with-count` to have a keybinding with a bigger
scroll increment.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
tab = self._current_widget()
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('horizontal', flag='x')
def scroll_to_perc(self, perc: float = None, horizontal=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
self._current_widget().scroller.to_perc(x, y)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('top_navigate', metavar='ACTION',
choices=('prev', 'decrement'))
@cmdutils.argument('bottom_navigate', metavar='ACTION',
choices=('next', 'increment'))
def scroll_page(self, x: float, y: float, *,
top_navigate: str = None, bottom_navigate: str = None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
tab = self._current_widget()
if not tab.url().isValid():
# See https://github.com/qutebrowser/qutebrowser/issues/701
return
if bottom_navigate is not None and tab.scroller.at_bottom():
self.navigate(bottom_navigate)
return
elif top_navigate is not None and tab.scroller.at_top():
self.navigate(top_navigate)
return
try:
tab.scroller.delta_page(count * x, count * y)
except OverflowError:
raise cmdexc.CommandError(
"Numeric argument is too large for internal int "
"representation.")
def _yank_url(self, what):
"""Helper method for yank() to get the URL to copy."""
assert what in ['url', 'pretty-url'], what
flags = QUrl.RemovePassword
if what == 'pretty-url':
flags |= QUrl.DecodeReserved
else:
flags |= QUrl.FullyEncoded
url = QUrl(self._current_url())
url_query = QUrlQuery()
url_query_str = urlutils.query_string(url)
if '&' not in url_query_str and ';' in url_query_str:
url_query.setQueryDelimiters('=', ';')
url_query.setQuery(url_query_str)
for key in dict(url_query.queryItems()):
if key in config.val.url.yank_ignored_parameters:
url_query.removeQueryItem(key)
url.setQuery(url_query)
return url.toString(flags)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('what', choices=['selection', 'url', 'pretty-url',
'title', 'domain'])
def yank(self, what='url', sel=False, keep=False):
"""Yank something to the clipboard or primary selection.
Args:
what: What to yank.
- `url`: The current URL.
- `pretty-url`: The URL in pretty decoded form.
- `title`: The current page's title.
- `domain`: The current scheme, domain, and port number.
- `selection`: The selection under the cursor.
sel: Use the primary selection instead of the clipboard.
keep: Stay in visual mode after yanking the selection.
"""
if what == 'title':
s = self._tabbed_browser.page_title(self._current_index())
elif what == 'domain':
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
elif what in ['url', 'pretty-url']:
s = self._yank_url(what)
what = 'URL' # For printing
elif what == 'selection':
def _selection_callback(s):
if not s:
message.info("Nothing to yank")
return
self._yank_to_target(s, sel, what, keep)
caret = self._current_widget().caret
caret.selection(callback=_selection_callback)
return
else: # pragma: no cover
raise ValueError("Invalid value {!r} for `what'.".format(what))
self._yank_to_target(s, sel, what, keep)
def _yank_to_target(self, s, sel, what, keep):
if sel and utils.supports_selection():
target = "primary selection"
else:
sel = False
target = "clipboard"
utils.set_clipboard(s, selection=sel)
if what != 'selection':
message.info("Yanked {} to {}: {}".format(what, target, s))
else:
message.info("{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.leave(self._win_id, KeyMode.caret, "yank selected",
maybe=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom(self, zoom=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
if zoom is not None:
try:
zoom = int(zoom.rstrip('%'))
except ValueError:
raise cmdexc.CommandError("zoom: Invalid int value {}"
.format(zoom))
level = count if count is not None else zoom
if level is None:
level = config.val.zoom.default
tab = self._current_widget()
try:
tab.zoom.set_factor(float(level) / 100)
except ValueError:
raise cmdexc.CommandError("Can't zoom {}%!".format(level))
message.info("Zoom level: {}%".format(int(level)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, prev=False, next_=False, force=False):
"""Close all tabs except for the current one.
Args:
prev: Keep tabs before the current.
next_: Keep tabs after the current.
force: Avoid confirmation for pinned tabs.
"""
cmdutils.check_exclusive((prev, next_), 'pn')
cur_idx = self._tabbed_browser.currentIndex()
assert cur_idx != -1
def _to_close(i):
"""Helper method to check if a tab should be closed or not."""
return not (i == cur_idx or
(prev and i < cur_idx) or
(next_ and i > cur_idx))
# close as many tabs as we can
first_tab = True
pinned_tabs_cleanup = False
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i):
if force or not tab.data.pinned:
self._tabbed_browser.close_tab(tab, new_undo=first_tab)
first_tab = False
else:
pinned_tabs_cleanup = tab
# Check to see if we would like to close any pinned tabs
if pinned_tabs_cleanup:
self._tabbed_browser.tab_close_prompt_if_pinned(
pinned_tabs_cleanup,
force,
lambda: self.tab_only(
prev=prev, next_=next_, force=True),
text="Are you sure you want to close pinned tabs?")
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open the last closed tab or tabs."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
if self._count() == 0:
# Running :tab-prev after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
if self._count() == 0:
# Running :tab-next after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("Last tab")
def _resolve_buffer_index(self, index):
"""Resolve a buffer index to the tabbedbrowser and tab.
Args:
index: The [win_id/]index of the tab to be selected. Or a substring
in which case the closest match will be focused.
"""
index_parts = index.split('/', 1)
try:
for part in index_parts:
int(part)
except ValueError:
model = miscmodels.buffer()
model.set_pattern(index)
if model.count() > 0:
index = model.data(model.first_item())
index_parts = index.split('/', 1)
else:
raise cmdexc.CommandError(
"No matching tab for: {}".format(index))
if len(index_parts) == 2:
win_id = int(index_parts[0])
idx = int(index_parts[1])
elif len(index_parts) == 1:
idx = int(index_parts[0])
active_win = objreg.get('app').activeWindow()
if active_win is None:
# Not sure how you enter a command without an active window...
raise cmdexc.CommandError(
"No window specified and couldn't find active window!")
win_id = active_win.win_id
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if not 0 < idx <= tabbed_browser.count():
raise cmdexc.CommandError(
"There's no tab with index {}!".format(idx))
return (tabbed_browser, tabbed_browser.widget(idx-1))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('index', completion=miscmodels.buffer)
@cmdutils.argument('count', count=True)
def buffer(self, index=None, count=None):
"""Select tab by index or url/title best match.
Focuses window if necessary when index is given. If both index and
count are given, use count.
With neither index nor count given, open the qute://tabs page.
Args:
index: The [win_id/]index of the tab to focus. Or a substring
in which case the closest match will be focused.
count: The tab index to focus, starting with 1.
"""
if count is None and index is None:
self.openurl('qute://tabs/', tab=True)
return
if count is not None:
index = str(count)
tabbed_browser, tab = self._resolve_buffer_index(index)
window = tabbed_browser.window()
window.activateWindow()
window.raise_()
tabbed_browser.setCurrentWidget(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['last'])
@cmdutils.argument('count', count=True)
def tab_focus(self, index: typing.Union[str, int] = None,
count=None, no_last=False):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
If both are given, use count.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab (regardless of count).
Negative indices count from the end, such that -1 is the
last tab.
count: The tab index to focus, starting with 1.
no_last: Whether to avoid focusing last tab if already focused.
"""
index = count if count is not None else index
if index == 'last':
self._tab_focus_last()
return
elif not no_last and index == self._current_index() + 1:
self._tab_focus_last(show_error=False)
return
elif index is None:
self.tab_next()
return
if index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
self._set_current_index(index - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
index))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['+', '-'])
@cmdutils.argument('count', count=True)
def tab_move(self, index: typing.Union[str, int] = None, count=None):
"""Move the current tab according to the argument and [count].
If neither is given, move it to the first position.
Args:
index: `+` or `-` to move relative to the current tab by
count, or a default of 1 space.
A tab index to move to that index.
count: If moving relatively: Offset.
If moving absolutely: New position (default: 0). This
overrides the index argument, if given.
"""
if index in ['+', '-']:
# relative moving
new_idx = self._current_index()
delta = 1 if count is None else count
if index == '-':
new_idx -= delta
elif index == '+': # pragma: no branch
new_idx += delta
if config.val.tabs.wrap:
new_idx %= self._count()
else:
# absolute moving
if count is not None:
new_idx = count - 1
elif index is not None:
new_idx = index - 1 if index >= 0 else index + self._count()
else:
new_idx = 0
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx + 1))
cur_idx = self._current_index()
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.tabBar().moveTab(cur_idx, new_idx)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_replace_variables=True)
def spawn(self, cmdline, userscript=False, verbose=False,
output=False, detach=False):
"""Spawn a command in a shell.
Args:
userscript: Run the command as a userscript. You can use an
absolute path, or store the userscript in one of those
locations:
- `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`)
- `/usr/share/qutebrowser/userscripts`
verbose: Show notifications when the command started/exited.
output: Whether the output should be shown in a new tab.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
"""
cmdutils.check_exclusive((userscript, detach), 'ud')
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
@pyqtSlot()
def _on_proc_finished():
if output:
tb = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tb.openurl(QUrl('qute://spawn-output'), newtab=True)
if userscript:
def _selection_callback(s):
try:
runner = self._run_userscript(s, cmd, args, verbose)
runner.finished.connect(_on_proc_finished)
except cmdexc.CommandError as e:
message.error(str(e))
# ~ expansion is handled by the userscript module.
# dirty hack for async call because of:
# https://bugreports.qt.io/browse/QTBUG-53134
# until it fixed or blocked async call implemented:
# https://github.com/qutebrowser/qutebrowser/issues/3327
caret = self._current_widget().caret
caret.selection(callback=_selection_callback)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(what='command', verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
proc.finished.connect(_on_proc_finished)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.val.url.start_pages[0])
def _run_userscript(self, selection, cmd, args, verbose):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
"""
env = {
'QUTE_MODE': 'command',
'QUTE_SELECTED_TEXT': selection,
}
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx)
# FIXME:qtwebengine: If tab is None, run_async will fail!
tab = self._tabbed_browser.currentWidget()
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
try:
runner = userscripts.run_async(
tab, cmd, *args, win_id=self._win_id, env=env, verbose=verbose)
except userscripts.Error as e:
raise cmdexc.CommandError(e)
return runner
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_del(self, name=None):
"""Delete a quickmark.
Args:
name: The name of the quickmark to delete. If not given, delete the
quickmark for the current page (choosing one arbitrarily
if there are more than one).
"""
quickmark_manager = objreg.get('quickmark-manager')
if name is None:
url = self._current_url()
try:
name = quickmark_manager.get_by_qurl(url)
except urlmarks.DoesNotExistError as e:
raise cmdexc.CommandError(str(e))
try:
quickmark_manager.delete(name)
except KeyError:
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
If no url and title are provided, then save the current page as a
bookmark.
If a url and title have been provided, then save the given url as
a bookmark with the provided title.
You can view all saved bookmarks on the
link:qute://bookmarks[bookmarks page].
Args:
url: url to save as a bookmark. If not given, use url of current
page.
title: title of the new bookmark.
toggle: remove the bookmark instead of raising an error if it
already exists.
"""
if url and not title:
raise cmdexc.CommandError('Title must be provided if url has '
'been provided')
bookmark_manager = objreg.get('bookmark-manager')
if not url:
url = self._current_url()
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if not title:
title = self._current_title()
try:
was_added = bookmark_manager.add(url, title, toggle=toggle)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
msg = "Bookmarked {}" if was_added else "Removed bookmark {}"
message.info(msg.format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_load(self, url, tab=False, bg=False, window=False,
delete=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
delete: Whether to delete the bookmark afterwards.
"""
try:
qurl = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(qurl, tab, bg, window)
if delete:
self.bookmark_del(url)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_del(self, url=None):
"""Delete a bookmark.
Args:
url: The url of the bookmark to delete. If not given, use the
current page's url.
"""
if url is None:
url = self._current_url().toString(QUrl.RemovePassword |
QUrl.FullyEncoded)
try:
objreg.get('bookmark-manager').delete(url)
except KeyError:
raise cmdexc.CommandError("Bookmark '{}' not found!".format(url))
@cmdutils.register(instance='command-dispatcher', scope='window')
def follow_selected(self, *, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
self._current_widget().caret.follow_selected(tab=tab)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
page = tab._widget.page() # pylint: disable=protected-access
try:
if tab.data.inspector is None:
tab.data.inspector = inspector.create()
tab.data.inspector.inspect(page)
else:
tab.data.inspector.toggle(page)
except inspector.WebInspectorError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def download(self, url=None, *, mhtml_=False, dest=None):
"""Download a given URL, or current page if no URL given.
Args:
url: The URL to download. If not given, download the current page.
dest: The file path to write the download to, or None to ask.
mhtml_: Download the current page and all assets as mhtml file.
"""
# FIXME:qtwebengine do this with the QtWebEngine download manager?
download_manager = objreg.get('qtnetwork-download-manager')
target = None
if dest is not None:
dest = downloads.transform_path(dest)
if dest is None:
raise cmdexc.CommandError("Invalid target filename")
target = downloads.FileDownloadTarget(dest)
tab = self._current_widget()
user_agent = tab.user_agent()
if url:
if mhtml_:
raise cmdexc.CommandError("Can only download the current page"
" as mhtml.")
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
download_manager.get(url, user_agent=user_agent, target=target)
elif mhtml_:
tab = self._current_widget()
if tab.backend == usertypes.Backend.QtWebEngine:
webengine_download_manager = objreg.get(
'webengine-download-manager')
try:
webengine_download_manager.get_mhtml(tab, target)
except browsertab.UnsupportedOperationError as e:
raise cmdexc.CommandError(e)
else:
download_manager.get_mhtml(tab, target)
else:
qnam = tab.networkaccessmanager()
suggested_fn = downloads.suggested_fn_from_title(
self._current_url().path(), tab.title()
)
download_manager.get(
self._current_url(),
user_agent=user_agent,
qnam=qnam,
target=target,
suggested_fn=suggested_fn
)
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self, edit=False):
"""Show the source of the current page in a new tab.
Args:
edit: Edit the source in the editor instead of opening a tab.
"""
tab = self._current_widget()
try:
current_url = self._current_url()
except cmdexc.CommandError as e:
message.error(str(e))
return
if current_url.scheme() == 'view-source':
raise cmdexc.CommandError("Already viewing source!")
if edit:
ed = editor.ExternalEditor(self._tabbed_browser)
tab.dump_async(ed.edit)
else:
tab.action.show_source()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_dump_page(self, dest, plain=False):
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
tab = self._current_widget()
dest = os.path.expanduser(dest)
def callback(data):
"""Write the data to disk."""
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(instance='command-dispatcher', scope='window')
def history(self, tab=True, bg=False, window=False):
"""Show browsing history.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
url = QUrl('qute://history/')
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', name='help',
scope='window')
@cmdutils.argument('topic', completion=miscmodels.helptopic)
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__.__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif topic in configdata.DATA:
path = 'settings.html#{}'.format(topic)
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def messages(self, level='info', plain=False, tab=False, bg=False,
window=False):
"""Show a log of past messages.
Args:
level: Include messages with `level` or higher severity.
Valid values: vdebug, debug, info, warning, error, critical.
plain: Whether to show plaintext (as opposed to html).
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
if level.upper() not in log.LOG_LEVELS:
raise cmdexc.CommandError("Invalid log level {}!".format(level))
if plain:
url = QUrl('qute://plainlog?level={}'.format(level))
else:
url = QUrl('qute://log?level={}'.format(level))
self._open(url, tab, bg, window)
def _open_editor_cb(self, elem):
"""Open editor after the focus elem was found in open_editor."""
if elem is None:
message.error("No element focused!")
return
if not elem.is_editable(strict=True):
message.error("Focused element is not editable!")
return
text = elem.value()
if text is None:
message.error("Could not get text from the focused element.")
return
assert isinstance(text, str), text
caret_position = elem.caret_position()
ed = editor.ExternalEditor(watch=True, parent=self._tabbed_browser)
ed.file_updated.connect(functools.partial(
self.on_file_updated, elem))
ed.editing_finished.connect(lambda: mainwindow.raise_window(
objreg.last_focused_window(), alert=False))
ed.edit(text, caret_position)
@cmdutils.register(instance='command-dispatcher', scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`editor.command` config option.
"""
tab = self._current_widget()
tab.elements.find_focused(self._open_editor_cb)
def on_file_updated(self, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the edited text was updated.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
elem.set_value(text)
except webelem.OrphanedError as e:
message.error('Edited element vanished')
except webelem.Error as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', maxsplit=0,
scope='window')
def insert_text(self, text):
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
tab = self._current_widget()
def _insert_text_cb(elem):
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except webelem.Error as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('filter_', choices=['id'])
def click_element(self, filter_: str, value, *,
target: usertypes.ClickTarget =
usertypes.ClickTarget.normal,
force_event=False):
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
force_event: Force generating a fake click event.
"""
tab = self._current_widget()
def single_cb(elem):
"""Click a single element."""
if elem is None:
message.error("No element found with id {}!".format(value))
return
try:
elem.click(target, force_event=force_event)
except webelem.Error as e:
message.error(str(e))
return
# def multiple_cb(elems):
# """Click multiple elements (with only one expected)."""
# if not elems:
# message.error("No element found!")
# return
# elif len(elems) != 1:
# message.error("{} elements found!".format(len(elems)))
# return
# elems[0].click(target)
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev):
"""Callback called from search/search_next/search_prev.
Args:
found: Whether the text was found.
tab: The AbstractTab in which the search was made.
old_scroll_pos: The scroll position (QPoint) before the search.
options: The options (dict) the search was made with.
text: The text searched for.
prev: Whether we're searching backwards (i.e. :search-prev)
"""
# :search/:search-next without reverse -> down
# :search/:search-next with reverse -> up
# :search-prev without reverse -> up
# :search-prev with reverse -> down
going_up = options['reverse'] ^ prev
if found:
# Check if the scroll position got smaller and show info.
if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y():
message.info("Search hit BOTTOM, continuing at TOP")
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
message.warning("Text '{}' not found on page!".format(text),
replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
self.set_mark("'")
tab = self._current_widget()
if not text:
if tab.search.search_displayed:
tab.search.clear()
return
options = {
'ignore_case': config.val.search.ignore_case,
'reverse': reverse,
}
self._tabbed_browser.search_text = text
self._tabbed_browser.search_options = dict(options)
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=options, text=text, prev=False)
options['result_cb'] = cb
tab.search.search(text, **options)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=True)
for _ in range(count - 1):
tab.search.prev_result()
tab.search.prev_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_prev_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
self._current_widget().caret.move_to_prev_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_end_of_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_next_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_prev_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
self._current_widget().caret.move_to_start_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
self._current_widget().caret.move_to_end_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
self._current_widget().caret.move_to_start_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
self._current_widget().caret.move_to_end_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
self._current_widget().caret.toggle_selection()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().caret.drop_selection()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
@cmdutils.argument('count', count=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
Available actions:
http://doc.qt.io/archives/qt-5.5/qwebpage.html#WebAction-enum (WebKit)
http://doc.qt.io/qt-5/qwebenginepage.html#WebAction-enum (WebEngine)
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
tab = self._current_widget()
for _ in range(count):
try:
tab.action.run_string(action)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, file=False, quiet=False, *,
world: typing.Union[usertypes.JsWorld, int] = None):
"""Evaluate a JavaScript string.
Args:
js_code: The string/file to evaluate.
file: Interpret js-code as a path to a file.
If the path is relative, the file is searched in a js/ subdir
in qutebrowser's data dir, e.g.
`~/.local/share/qutebrowser/js`.
quiet: Don't show resulting JS object.
world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to
run the snippet in.
"""
if world is None:
world = usertypes.JsWorld.jseval
if quiet:
jseval_cb = None
else:
def jseval_cb(out):
"""Show the data returned from JS."""
if out is None:
# Getting the actual error (if any) seems to be difficult.
# The error does end up in
# BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the
# webpage is not trivial...
message.info('No output or error')
else:
# The output can be a string, number, dict, array, etc. But
# *don't* output too much data, as this will make
# qutebrowser hang
out = str(out)
if len(out) > 5000:
out = out[:5000] + ' [...trimmed...]'
message.info(out)
if file:
path = os.path.expanduser(js_code)
if not os.path.isabs(path):
path = os.path.join(standarddir.data(), 'js', path)
try:
with open(path, 'r', encoding='utf-8') as f:
js_code = f.read()
except OSError as e:
raise cmdexc.CommandError(str(e))
widget = self._current_widget()
widget.run_js_async(js_code, callback=jseval_cb, world=world)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fake_key(self, keystring, global_=False):
"""Send a fake keypress or key string to the website or qutebrowser.
:fake-key xy - sends the keychain 'xy'
:fake-key <Ctrl-x> - sends Ctrl-x
:fake-key <Escape> - sends the escape key
Args:
keystring: The keystring to send.
global_: If given, the keys are sent to the qutebrowser UI.
"""
try:
keyinfos = utils.parse_keystring(keystring)
except utils.KeyParseError as e:
raise cmdexc.CommandError(str(e))
for keyinfo in keyinfos:
press_event = QKeyEvent(QEvent.KeyPress, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
release_event = QKeyEvent(QEvent.KeyRelease, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
if global_:
window = QApplication.focusWindow()
if window is None:
raise cmdexc.CommandError("No focused window!")
QApplication.postEvent(window, press_event)
QApplication.postEvent(window, release_event)
else:
tab = self._current_widget()
tab.send_event(press_event)
tab.send_event(release_event)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True, backend=usertypes.Backend.QtWebKit)
def debug_clear_ssl_errors(self):
"""Clear remembered SSL error answers."""
self._current_widget().clear_ssl_errors()
@cmdutils.register(instance='command-dispatcher', scope='window')
def edit_url(self, url=None, bg=False, tab=False, window=False,
private=False, related=False):
"""Navigate to a url formed in an external editor.
The editor which should be launched can be configured via the
`editor.command` config option.
Args:
url: URL to edit; defaults to the current page url.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
old_url = self._current_url().toString()
ed = editor.ExternalEditor(self._tabbed_browser)
# Passthrough for openurl args (e.g. -t, -b, -w)
ed.file_updated.connect(functools.partial(
self._open_if_changed, old_url=old_url, bg=bg, tab=tab,
window=window, private=private, related=related))
ed.edit(url or old_url)
@cmdutils.register(instance='command-dispatcher', scope='window')
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.set_mark(key)
@cmdutils.register(instance='command-dispatcher', scope='window')
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False,
window=False, private=False, related=False):
"""Open a URL unless it's already open in the tab.
Args:
old_url: The original URL to compare against.
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
if bg or tab or window or private or related or url != old_url:
self.openurl(url=url, bg=bg, tab=tab, window=window,
private=private, related=related)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fullscreen(self, leave=False):
"""Toggle fullscreen mode.
Args:
leave: Only leave fullscreen if it was entered by the page.
"""
if leave:
tab = self._current_widget()
try:
tab.action.exit_fullscreen()
except browsertab.UnsupportedOperationError:
pass
return
window = self._tabbed_browser.window()
window.setWindowState(window.windowState() ^ Qt.WindowFullScreen)
| 1 | 20,848 | No need for this when you don't need to customize anything about the argument. | qutebrowser-qutebrowser | py |
@@ -0,0 +1,14 @@
+<!-- TODO: Move this away from using ID attr -->
+<div class="panel-group section-group"
+ data-modifiable="<%= modifiable %>"
+ id="sections_accordion"
+ role="tablist">
+ <% sections.each do |section| %>
+ <%= render partial: "org_admin/sections/section",
+ object: section,
+ locals: { phase: phase,
+ template: template,
+ current_section: current_section,
+ draggable: section == sections.first } %>
+ <% end%>
+</div> | 1 | 1 | 17,828 | We're now going to have 3 `#sections_accordion` will need to see how JS behaves. We should probably switch to a class selector instead | DMPRoadmap-roadmap | rb |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.